diff options
Diffstat (limited to 'arch/sparc/kernel')
90 files changed, 43306 insertions, 0 deletions
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 6558eea5f0bc..46439465c3b2 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile | |||
@@ -38,6 +38,19 @@ obj-$(CONFIG_SPARC32) += muldiv.o | |||
38 | obj-y += prom_$(BITS).o | 38 | obj-y += prom_$(BITS).o |
39 | obj-y += of_device_$(BITS).o | 39 | obj-y += of_device_$(BITS).o |
40 | 40 | ||
41 | obj-$(CONFIG_SPARC64) += reboot.o | ||
42 | obj-$(CONFIG_SPARC64) += sysfs.o | ||
43 | obj-$(CONFIG_SPARC64) += iommu.o | ||
44 | obj-$(CONFIG_SPARC64) += central.o | ||
45 | obj-$(CONFIG_SPARC64) += starfire.o | ||
46 | obj-$(CONFIG_SPARC64) += power.o | ||
47 | obj-$(CONFIG_SPARC64) += sbus.o | ||
48 | obj-$(CONFIG_SPARC64) += ebus.o | ||
49 | obj-$(CONFIG_SPARC64) += visemul.o | ||
50 | obj-$(CONFIG_SPARC64) += hvapi.o | ||
51 | obj-$(CONFIG_SPARC64) += sstate.o | ||
52 | obj-$(CONFIG_SPARC64) += mdesc.o | ||
53 | |||
41 | # sparc32 do not use GENERIC_HARDIRQS but uses the generic devres implementation | 54 | # sparc32 do not use GENERIC_HARDIRQS but uses the generic devres implementation |
42 | obj-$(CONFIG_SPARC32) += devres.o | 55 | obj-$(CONFIG_SPARC32) += devres.o |
43 | devres-y := ../../../kernel/irq/devres.o | 56 | devres-y := ../../../kernel/irq/devres.o |
@@ -48,6 +61,7 @@ obj-$(CONFIG_SPARC32_PCI) += pcic.o | |||
48 | 61 | ||
49 | obj-$(CONFIG_SMP) += trampoline_$(BITS).o smp_$(BITS).o | 62 | obj-$(CONFIG_SMP) += trampoline_$(BITS).o smp_$(BITS).o |
50 | obj-$(CONFIG_SPARC32_SMP) += sun4m_smp.o sun4d_smp.o | 63 | obj-$(CONFIG_SPARC32_SMP) += sun4m_smp.o sun4d_smp.o |
64 | obj-$(CONFIG_SPARC64_SMP) += hvtramp.o | ||
51 | 65 | ||
52 | obj-y += auxio_$(BITS).o | 66 | obj-y += auxio_$(BITS).o |
53 | obj-$(CONFIG_SUN_PM) += apc.o pmc.o | 67 | obj-$(CONFIG_SUN_PM) += apc.o pmc.o |
@@ -56,3 +70,28 @@ obj-$(CONFIG_MODULES) += module_$(BITS).o | |||
56 | obj-$(CONFIG_MODULES) += sparc_ksyms_$(BITS).o | 70 | obj-$(CONFIG_MODULES) += sparc_ksyms_$(BITS).o |
57 | obj-$(CONFIG_SPARC_LED) += led.o | 71 | obj-$(CONFIG_SPARC_LED) += led.o |
58 | obj-$(CONFIG_KGDB) += kgdb_$(BITS).o | 72 | obj-$(CONFIG_KGDB) += kgdb_$(BITS).o |
73 | |||
74 | |||
75 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | ||
76 | CFLAGS_REMOVE_ftrace.o := -pg | ||
77 | |||
78 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | ||
79 | # sparc64 PCI | ||
80 | obj-$(CONFIG_SPARC64_PCI) += pci.o pci_common.o psycho_common.o | ||
81 | obj-$(CONFIG_SPARC64_PCI) += pci_psycho.o pci_sabre.o pci_schizo.o | ||
82 | obj-$(CONFIG_SPARC64_PCI) += pci_sun4v.o pci_sun4v_asm.o pci_fire.o | ||
83 | obj-$(CONFIG_PCI_MSI) += pci_msi.o | ||
84 | |||
85 | obj-$(CONFIG_COMPAT) += sys32.o sys_sparc32.o signal32.o | ||
86 | |||
87 | # sparc64 cpufreq | ||
88 | obj-$(CONFIG_US3_FREQ) += us3_cpufreq.o | ||
89 | obj-$(CONFIG_US2E_FREQ) += us2e_cpufreq.o | ||
90 | obj-$(CONFIG_US3_MC) += chmc.o | ||
91 | |||
92 | obj-$(CONFIG_KPROBES) += kprobes.o | ||
93 | obj-$(CONFIG_SUN_LDOMS) += ldc.o vio.o viohs.o ds.o | ||
94 | |||
95 | obj-$(CONFIG_AUDIT) += audit.o | ||
96 | audit--$(CONFIG_AUDIT) := compat_audit.o | ||
97 | obj-$(CONFIG_COMPAT) += $(audit--y) | ||
diff --git a/arch/sparc/kernel/audit.c b/arch/sparc/kernel/audit.c new file mode 100644 index 000000000000..8fff0ac63d56 --- /dev/null +++ b/arch/sparc/kernel/audit.c | |||
@@ -0,0 +1,83 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/types.h> | ||
3 | #include <linux/audit.h> | ||
4 | #include <asm/unistd.h> | ||
5 | |||
6 | static unsigned dir_class[] = { | ||
7 | #include <asm-generic/audit_dir_write.h> | ||
8 | ~0U | ||
9 | }; | ||
10 | |||
11 | static unsigned read_class[] = { | ||
12 | #include <asm-generic/audit_read.h> | ||
13 | ~0U | ||
14 | }; | ||
15 | |||
16 | static unsigned write_class[] = { | ||
17 | #include <asm-generic/audit_write.h> | ||
18 | ~0U | ||
19 | }; | ||
20 | |||
21 | static unsigned chattr_class[] = { | ||
22 | #include <asm-generic/audit_change_attr.h> | ||
23 | ~0U | ||
24 | }; | ||
25 | |||
26 | static unsigned signal_class[] = { | ||
27 | #include <asm-generic/audit_signal.h> | ||
28 | ~0U | ||
29 | }; | ||
30 | |||
31 | int audit_classify_arch(int arch) | ||
32 | { | ||
33 | #ifdef CONFIG_COMPAT | ||
34 | if (arch == AUDIT_ARCH_SPARC) | ||
35 | return 1; | ||
36 | #endif | ||
37 | return 0; | ||
38 | } | ||
39 | |||
40 | int audit_classify_syscall(int abi, unsigned syscall) | ||
41 | { | ||
42 | #ifdef CONFIG_COMPAT | ||
43 | extern int sparc32_classify_syscall(unsigned); | ||
44 | if (abi == AUDIT_ARCH_SPARC) | ||
45 | return sparc32_classify_syscall(syscall); | ||
46 | #endif | ||
47 | switch(syscall) { | ||
48 | case __NR_open: | ||
49 | return 2; | ||
50 | case __NR_openat: | ||
51 | return 3; | ||
52 | case __NR_socketcall: | ||
53 | return 4; | ||
54 | case __NR_execve: | ||
55 | return 5; | ||
56 | default: | ||
57 | return 0; | ||
58 | } | ||
59 | } | ||
60 | |||
61 | static int __init audit_classes_init(void) | ||
62 | { | ||
63 | #ifdef CONFIG_COMPAT | ||
64 | extern __u32 sparc32_dir_class[]; | ||
65 | extern __u32 sparc32_write_class[]; | ||
66 | extern __u32 sparc32_read_class[]; | ||
67 | extern __u32 sparc32_chattr_class[]; | ||
68 | extern __u32 sparc32_signal_class[]; | ||
69 | audit_register_class(AUDIT_CLASS_WRITE_32, sparc32_write_class); | ||
70 | audit_register_class(AUDIT_CLASS_READ_32, sparc32_read_class); | ||
71 | audit_register_class(AUDIT_CLASS_DIR_WRITE_32, sparc32_dir_class); | ||
72 | audit_register_class(AUDIT_CLASS_CHATTR_32, sparc32_chattr_class); | ||
73 | audit_register_class(AUDIT_CLASS_SIGNAL_32, sparc32_signal_class); | ||
74 | #endif | ||
75 | audit_register_class(AUDIT_CLASS_WRITE, write_class); | ||
76 | audit_register_class(AUDIT_CLASS_READ, read_class); | ||
77 | audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); | ||
78 | audit_register_class(AUDIT_CLASS_CHATTR, chattr_class); | ||
79 | audit_register_class(AUDIT_CLASS_SIGNAL, signal_class); | ||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | __initcall(audit_classes_init); | ||
diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c new file mode 100644 index 000000000000..8b67347d4221 --- /dev/null +++ b/arch/sparc/kernel/auxio_64.c | |||
@@ -0,0 +1,149 @@ | |||
1 | /* auxio.c: Probing for the Sparc AUXIO register at boot time. | ||
2 | * | ||
3 | * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) | ||
4 | * | ||
5 | * Refactoring for unified NCR/PCIO support 2002 Eric Brower (ebrower@usa.net) | ||
6 | */ | ||
7 | |||
8 | #include <linux/module.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/ioport.h> | ||
12 | #include <linux/of_device.h> | ||
13 | |||
14 | #include <asm/prom.h> | ||
15 | #include <asm/io.h> | ||
16 | #include <asm/auxio.h> | ||
17 | |||
18 | void __iomem *auxio_register = NULL; | ||
19 | EXPORT_SYMBOL(auxio_register); | ||
20 | |||
21 | enum auxio_type { | ||
22 | AUXIO_TYPE_NODEV, | ||
23 | AUXIO_TYPE_SBUS, | ||
24 | AUXIO_TYPE_EBUS | ||
25 | }; | ||
26 | |||
27 | static enum auxio_type auxio_devtype = AUXIO_TYPE_NODEV; | ||
28 | static DEFINE_SPINLOCK(auxio_lock); | ||
29 | |||
30 | static void __auxio_rmw(u8 bits_on, u8 bits_off, int ebus) | ||
31 | { | ||
32 | if (auxio_register) { | ||
33 | unsigned long flags; | ||
34 | u8 regval, newval; | ||
35 | |||
36 | spin_lock_irqsave(&auxio_lock, flags); | ||
37 | |||
38 | regval = (ebus ? | ||
39 | (u8) readl(auxio_register) : | ||
40 | sbus_readb(auxio_register)); | ||
41 | newval = regval | bits_on; | ||
42 | newval &= ~bits_off; | ||
43 | if (!ebus) | ||
44 | newval &= ~AUXIO_AUX1_MASK; | ||
45 | if (ebus) | ||
46 | writel((u32) newval, auxio_register); | ||
47 | else | ||
48 | sbus_writeb(newval, auxio_register); | ||
49 | |||
50 | spin_unlock_irqrestore(&auxio_lock, flags); | ||
51 | } | ||
52 | } | ||
53 | |||
54 | static void __auxio_set_bit(u8 bit, int on, int ebus) | ||
55 | { | ||
56 | u8 bits_on = (ebus ? AUXIO_PCIO_LED : AUXIO_AUX1_LED); | ||
57 | u8 bits_off = 0; | ||
58 | |||
59 | if (!on) { | ||
60 | u8 tmp = bits_off; | ||
61 | bits_off = bits_on; | ||
62 | bits_on = tmp; | ||
63 | } | ||
64 | __auxio_rmw(bits_on, bits_off, ebus); | ||
65 | } | ||
66 | |||
67 | void auxio_set_led(int on) | ||
68 | { | ||
69 | int ebus = auxio_devtype == AUXIO_TYPE_EBUS; | ||
70 | u8 bit; | ||
71 | |||
72 | bit = (ebus ? AUXIO_PCIO_LED : AUXIO_AUX1_LED); | ||
73 | __auxio_set_bit(bit, on, ebus); | ||
74 | } | ||
75 | |||
76 | static void __auxio_sbus_set_lte(int on) | ||
77 | { | ||
78 | __auxio_set_bit(AUXIO_AUX1_LTE, on, 0); | ||
79 | } | ||
80 | |||
81 | void auxio_set_lte(int on) | ||
82 | { | ||
83 | switch(auxio_devtype) { | ||
84 | case AUXIO_TYPE_SBUS: | ||
85 | __auxio_sbus_set_lte(on); | ||
86 | break; | ||
87 | case AUXIO_TYPE_EBUS: | ||
88 | /* FALL-THROUGH */ | ||
89 | default: | ||
90 | break; | ||
91 | } | ||
92 | } | ||
93 | |||
94 | static struct of_device_id __initdata auxio_match[] = { | ||
95 | { | ||
96 | .name = "auxio", | ||
97 | }, | ||
98 | {}, | ||
99 | }; | ||
100 | |||
101 | MODULE_DEVICE_TABLE(of, auxio_match); | ||
102 | |||
103 | static int __devinit auxio_probe(struct of_device *dev, const struct of_device_id *match) | ||
104 | { | ||
105 | struct device_node *dp = dev->node; | ||
106 | unsigned long size; | ||
107 | |||
108 | if (!strcmp(dp->parent->name, "ebus")) { | ||
109 | auxio_devtype = AUXIO_TYPE_EBUS; | ||
110 | size = sizeof(u32); | ||
111 | } else if (!strcmp(dp->parent->name, "sbus")) { | ||
112 | auxio_devtype = AUXIO_TYPE_SBUS; | ||
113 | size = 1; | ||
114 | } else { | ||
115 | printk("auxio: Unknown parent bus type [%s]\n", | ||
116 | dp->parent->name); | ||
117 | return -ENODEV; | ||
118 | } | ||
119 | auxio_register = of_ioremap(&dev->resource[0], 0, size, "auxio"); | ||
120 | if (!auxio_register) | ||
121 | return -ENODEV; | ||
122 | |||
123 | printk(KERN_INFO "AUXIO: Found device at %s\n", | ||
124 | dp->full_name); | ||
125 | |||
126 | if (auxio_devtype == AUXIO_TYPE_EBUS) | ||
127 | auxio_set_led(AUXIO_LED_ON); | ||
128 | |||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | static struct of_platform_driver auxio_driver = { | ||
133 | .match_table = auxio_match, | ||
134 | .probe = auxio_probe, | ||
135 | .driver = { | ||
136 | .name = "auxio", | ||
137 | }, | ||
138 | }; | ||
139 | |||
140 | static int __init auxio_init(void) | ||
141 | { | ||
142 | return of_register_driver(&auxio_driver, &of_platform_bus_type); | ||
143 | } | ||
144 | |||
145 | /* Must be after subsys_initcall() so that busses are probed. Must | ||
146 | * be before device_initcall() because things like the floppy driver | ||
147 | * need to use the AUXIO register. | ||
148 | */ | ||
149 | fs_initcall(auxio_init); | ||
diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c new file mode 100644 index 000000000000..05f1c916db06 --- /dev/null +++ b/arch/sparc/kernel/central.c | |||
@@ -0,0 +1,268 @@ | |||
1 | /* central.c: Central FHC driver for Sunfire/Starfire/Wildfire. | ||
2 | * | ||
3 | * Copyright (C) 1997, 1999, 2008 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/string.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/of_device.h> | ||
11 | #include <linux/platform_device.h> | ||
12 | |||
13 | #include <asm/fhc.h> | ||
14 | #include <asm/upa.h> | ||
15 | |||
16 | struct clock_board { | ||
17 | void __iomem *clock_freq_regs; | ||
18 | void __iomem *clock_regs; | ||
19 | void __iomem *clock_ver_reg; | ||
20 | int num_slots; | ||
21 | struct resource leds_resource; | ||
22 | struct platform_device leds_pdev; | ||
23 | }; | ||
24 | |||
25 | struct fhc { | ||
26 | void __iomem *pregs; | ||
27 | bool central; | ||
28 | bool jtag_master; | ||
29 | int board_num; | ||
30 | struct resource leds_resource; | ||
31 | struct platform_device leds_pdev; | ||
32 | }; | ||
33 | |||
34 | static int __devinit clock_board_calc_nslots(struct clock_board *p) | ||
35 | { | ||
36 | u8 reg = upa_readb(p->clock_regs + CLOCK_STAT1) & 0xc0; | ||
37 | |||
38 | switch (reg) { | ||
39 | case 0x40: | ||
40 | return 16; | ||
41 | |||
42 | case 0xc0: | ||
43 | return 8; | ||
44 | |||
45 | case 0x80: | ||
46 | reg = 0; | ||
47 | if (p->clock_ver_reg) | ||
48 | reg = upa_readb(p->clock_ver_reg); | ||
49 | if (reg) { | ||
50 | if (reg & 0x80) | ||
51 | return 4; | ||
52 | else | ||
53 | return 5; | ||
54 | } | ||
55 | /* Fallthrough */ | ||
56 | default: | ||
57 | return 4; | ||
58 | } | ||
59 | } | ||
60 | |||
61 | static int __devinit clock_board_probe(struct of_device *op, | ||
62 | const struct of_device_id *match) | ||
63 | { | ||
64 | struct clock_board *p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
65 | int err = -ENOMEM; | ||
66 | |||
67 | if (!p) { | ||
68 | printk(KERN_ERR "clock_board: Cannot allocate struct clock_board\n"); | ||
69 | goto out; | ||
70 | } | ||
71 | |||
72 | p->clock_freq_regs = of_ioremap(&op->resource[0], 0, | ||
73 | resource_size(&op->resource[0]), | ||
74 | "clock_board_freq"); | ||
75 | if (!p->clock_freq_regs) { | ||
76 | printk(KERN_ERR "clock_board: Cannot map clock_freq_regs\n"); | ||
77 | goto out_free; | ||
78 | } | ||
79 | |||
80 | p->clock_regs = of_ioremap(&op->resource[1], 0, | ||
81 | resource_size(&op->resource[1]), | ||
82 | "clock_board_regs"); | ||
83 | if (!p->clock_regs) { | ||
84 | printk(KERN_ERR "clock_board: Cannot map clock_regs\n"); | ||
85 | goto out_unmap_clock_freq_regs; | ||
86 | } | ||
87 | |||
88 | if (op->resource[2].flags) { | ||
89 | p->clock_ver_reg = of_ioremap(&op->resource[2], 0, | ||
90 | resource_size(&op->resource[2]), | ||
91 | "clock_ver_reg"); | ||
92 | if (!p->clock_ver_reg) { | ||
93 | printk(KERN_ERR "clock_board: Cannot map clock_ver_reg\n"); | ||
94 | goto out_unmap_clock_regs; | ||
95 | } | ||
96 | } | ||
97 | |||
98 | p->num_slots = clock_board_calc_nslots(p); | ||
99 | |||
100 | p->leds_resource.start = (unsigned long) | ||
101 | (p->clock_regs + CLOCK_CTRL); | ||
102 | p->leds_resource.end = p->leds_resource.end; | ||
103 | p->leds_resource.name = "leds"; | ||
104 | |||
105 | p->leds_pdev.name = "sunfire-clockboard-leds"; | ||
106 | p->leds_pdev.resource = &p->leds_resource; | ||
107 | p->leds_pdev.num_resources = 1; | ||
108 | p->leds_pdev.dev.parent = &op->dev; | ||
109 | |||
110 | err = platform_device_register(&p->leds_pdev); | ||
111 | if (err) { | ||
112 | printk(KERN_ERR "clock_board: Could not register LEDS " | ||
113 | "platform device\n"); | ||
114 | goto out_unmap_clock_ver_reg; | ||
115 | } | ||
116 | |||
117 | printk(KERN_INFO "clock_board: Detected %d slot Enterprise system.\n", | ||
118 | p->num_slots); | ||
119 | |||
120 | err = 0; | ||
121 | out: | ||
122 | return err; | ||
123 | |||
124 | out_unmap_clock_ver_reg: | ||
125 | if (p->clock_ver_reg) | ||
126 | of_iounmap(&op->resource[2], p->clock_ver_reg, | ||
127 | resource_size(&op->resource[2])); | ||
128 | |||
129 | out_unmap_clock_regs: | ||
130 | of_iounmap(&op->resource[1], p->clock_regs, | ||
131 | resource_size(&op->resource[1])); | ||
132 | |||
133 | out_unmap_clock_freq_regs: | ||
134 | of_iounmap(&op->resource[0], p->clock_freq_regs, | ||
135 | resource_size(&op->resource[0])); | ||
136 | |||
137 | out_free: | ||
138 | kfree(p); | ||
139 | goto out; | ||
140 | } | ||
141 | |||
142 | static struct of_device_id __initdata clock_board_match[] = { | ||
143 | { | ||
144 | .name = "clock-board", | ||
145 | }, | ||
146 | {}, | ||
147 | }; | ||
148 | |||
149 | static struct of_platform_driver clock_board_driver = { | ||
150 | .match_table = clock_board_match, | ||
151 | .probe = clock_board_probe, | ||
152 | .driver = { | ||
153 | .name = "clock_board", | ||
154 | }, | ||
155 | }; | ||
156 | |||
157 | static int __devinit fhc_probe(struct of_device *op, | ||
158 | const struct of_device_id *match) | ||
159 | { | ||
160 | struct fhc *p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
161 | int err = -ENOMEM; | ||
162 | u32 reg; | ||
163 | |||
164 | if (!p) { | ||
165 | printk(KERN_ERR "fhc: Cannot allocate struct fhc\n"); | ||
166 | goto out; | ||
167 | } | ||
168 | |||
169 | if (!strcmp(op->node->parent->name, "central")) | ||
170 | p->central = true; | ||
171 | |||
172 | p->pregs = of_ioremap(&op->resource[0], 0, | ||
173 | resource_size(&op->resource[0]), | ||
174 | "fhc_pregs"); | ||
175 | if (!p->pregs) { | ||
176 | printk(KERN_ERR "fhc: Cannot map pregs\n"); | ||
177 | goto out_free; | ||
178 | } | ||
179 | |||
180 | if (p->central) { | ||
181 | reg = upa_readl(p->pregs + FHC_PREGS_BSR); | ||
182 | p->board_num = ((reg >> 16) & 1) | ((reg >> 12) & 0x0e); | ||
183 | } else { | ||
184 | p->board_num = of_getintprop_default(op->node, "board#", -1); | ||
185 | if (p->board_num == -1) { | ||
186 | printk(KERN_ERR "fhc: No board# property\n"); | ||
187 | goto out_unmap_pregs; | ||
188 | } | ||
189 | if (upa_readl(p->pregs + FHC_PREGS_JCTRL) & FHC_JTAG_CTRL_MENAB) | ||
190 | p->jtag_master = true; | ||
191 | } | ||
192 | |||
193 | if (!p->central) { | ||
194 | p->leds_resource.start = (unsigned long) | ||
195 | (p->pregs + FHC_PREGS_CTRL); | ||
196 | p->leds_resource.end = p->leds_resource.end; | ||
197 | p->leds_resource.name = "leds"; | ||
198 | |||
199 | p->leds_pdev.name = "sunfire-fhc-leds"; | ||
200 | p->leds_pdev.resource = &p->leds_resource; | ||
201 | p->leds_pdev.num_resources = 1; | ||
202 | p->leds_pdev.dev.parent = &op->dev; | ||
203 | |||
204 | err = platform_device_register(&p->leds_pdev); | ||
205 | if (err) { | ||
206 | printk(KERN_ERR "fhc: Could not register LEDS " | ||
207 | "platform device\n"); | ||
208 | goto out_unmap_pregs; | ||
209 | } | ||
210 | } | ||
211 | reg = upa_readl(p->pregs + FHC_PREGS_CTRL); | ||
212 | |||
213 | if (!p->central) | ||
214 | reg |= FHC_CONTROL_IXIST; | ||
215 | |||
216 | reg &= ~(FHC_CONTROL_AOFF | | ||
217 | FHC_CONTROL_BOFF | | ||
218 | FHC_CONTROL_SLINE); | ||
219 | |||
220 | upa_writel(reg, p->pregs + FHC_PREGS_CTRL); | ||
221 | upa_readl(p->pregs + FHC_PREGS_CTRL); | ||
222 | |||
223 | reg = upa_readl(p->pregs + FHC_PREGS_ID); | ||
224 | printk(KERN_INFO "fhc: Board #%d, Version[%x] PartID[%x] Manuf[%x] %s\n", | ||
225 | p->board_num, | ||
226 | (reg & FHC_ID_VERS) >> 28, | ||
227 | (reg & FHC_ID_PARTID) >> 12, | ||
228 | (reg & FHC_ID_MANUF) >> 1, | ||
229 | (p->jtag_master ? | ||
230 | "(JTAG Master)" : | ||
231 | (p->central ? "(Central)" : ""))); | ||
232 | |||
233 | err = 0; | ||
234 | |||
235 | out: | ||
236 | return err; | ||
237 | |||
238 | out_unmap_pregs: | ||
239 | of_iounmap(&op->resource[0], p->pregs, resource_size(&op->resource[0])); | ||
240 | |||
241 | out_free: | ||
242 | kfree(p); | ||
243 | goto out; | ||
244 | } | ||
245 | |||
246 | static struct of_device_id __initdata fhc_match[] = { | ||
247 | { | ||
248 | .name = "fhc", | ||
249 | }, | ||
250 | {}, | ||
251 | }; | ||
252 | |||
253 | static struct of_platform_driver fhc_driver = { | ||
254 | .match_table = fhc_match, | ||
255 | .probe = fhc_probe, | ||
256 | .driver = { | ||
257 | .name = "fhc", | ||
258 | }, | ||
259 | }; | ||
260 | |||
261 | static int __init sunfire_init(void) | ||
262 | { | ||
263 | (void) of_register_driver(&fhc_driver, &of_platform_bus_type); | ||
264 | (void) of_register_driver(&clock_board_driver, &of_platform_bus_type); | ||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | subsys_initcall(sunfire_init); | ||
diff --git a/arch/sparc/kernel/cherrs.S b/arch/sparc/kernel/cherrs.S new file mode 100644 index 000000000000..4ee1ad420862 --- /dev/null +++ b/arch/sparc/kernel/cherrs.S | |||
@@ -0,0 +1,579 @@ | |||
1 | /* These get patched into the trap table at boot time | ||
2 | * once we know we have a cheetah processor. | ||
3 | */ | ||
4 | .globl cheetah_fecc_trap_vector | ||
5 | .type cheetah_fecc_trap_vector,#function | ||
6 | cheetah_fecc_trap_vector: | ||
7 | membar #Sync | ||
8 | ldxa [%g0] ASI_DCU_CONTROL_REG, %g1 | ||
9 | andn %g1, DCU_DC | DCU_IC, %g1 | ||
10 | stxa %g1, [%g0] ASI_DCU_CONTROL_REG | ||
11 | membar #Sync | ||
12 | sethi %hi(cheetah_fast_ecc), %g2 | ||
13 | jmpl %g2 + %lo(cheetah_fast_ecc), %g0 | ||
14 | mov 0, %g1 | ||
15 | .size cheetah_fecc_trap_vector,.-cheetah_fecc_trap_vector | ||
16 | |||
17 | .globl cheetah_fecc_trap_vector_tl1 | ||
18 | .type cheetah_fecc_trap_vector_tl1,#function | ||
19 | cheetah_fecc_trap_vector_tl1: | ||
20 | membar #Sync | ||
21 | ldxa [%g0] ASI_DCU_CONTROL_REG, %g1 | ||
22 | andn %g1, DCU_DC | DCU_IC, %g1 | ||
23 | stxa %g1, [%g0] ASI_DCU_CONTROL_REG | ||
24 | membar #Sync | ||
25 | sethi %hi(cheetah_fast_ecc), %g2 | ||
26 | jmpl %g2 + %lo(cheetah_fast_ecc), %g0 | ||
27 | mov 1, %g1 | ||
28 | .size cheetah_fecc_trap_vector_tl1,.-cheetah_fecc_trap_vector_tl1 | ||
29 | |||
30 | .globl cheetah_cee_trap_vector | ||
31 | .type cheetah_cee_trap_vector,#function | ||
32 | cheetah_cee_trap_vector: | ||
33 | membar #Sync | ||
34 | ldxa [%g0] ASI_DCU_CONTROL_REG, %g1 | ||
35 | andn %g1, DCU_IC, %g1 | ||
36 | stxa %g1, [%g0] ASI_DCU_CONTROL_REG | ||
37 | membar #Sync | ||
38 | sethi %hi(cheetah_cee), %g2 | ||
39 | jmpl %g2 + %lo(cheetah_cee), %g0 | ||
40 | mov 0, %g1 | ||
41 | .size cheetah_cee_trap_vector,.-cheetah_cee_trap_vector | ||
42 | |||
43 | .globl cheetah_cee_trap_vector_tl1 | ||
44 | .type cheetah_cee_trap_vector_tl1,#function | ||
45 | cheetah_cee_trap_vector_tl1: | ||
46 | membar #Sync | ||
47 | ldxa [%g0] ASI_DCU_CONTROL_REG, %g1 | ||
48 | andn %g1, DCU_IC, %g1 | ||
49 | stxa %g1, [%g0] ASI_DCU_CONTROL_REG | ||
50 | membar #Sync | ||
51 | sethi %hi(cheetah_cee), %g2 | ||
52 | jmpl %g2 + %lo(cheetah_cee), %g0 | ||
53 | mov 1, %g1 | ||
54 | .size cheetah_cee_trap_vector_tl1,.-cheetah_cee_trap_vector_tl1 | ||
55 | |||
56 | .globl cheetah_deferred_trap_vector | ||
57 | .type cheetah_deferred_trap_vector,#function | ||
58 | cheetah_deferred_trap_vector: | ||
59 | membar #Sync | ||
60 | ldxa [%g0] ASI_DCU_CONTROL_REG, %g1; | ||
61 | andn %g1, DCU_DC | DCU_IC, %g1; | ||
62 | stxa %g1, [%g0] ASI_DCU_CONTROL_REG; | ||
63 | membar #Sync; | ||
64 | sethi %hi(cheetah_deferred_trap), %g2 | ||
65 | jmpl %g2 + %lo(cheetah_deferred_trap), %g0 | ||
66 | mov 0, %g1 | ||
67 | .size cheetah_deferred_trap_vector,.-cheetah_deferred_trap_vector | ||
68 | |||
69 | .globl cheetah_deferred_trap_vector_tl1 | ||
70 | .type cheetah_deferred_trap_vector_tl1,#function | ||
71 | cheetah_deferred_trap_vector_tl1: | ||
72 | membar #Sync; | ||
73 | ldxa [%g0] ASI_DCU_CONTROL_REG, %g1; | ||
74 | andn %g1, DCU_DC | DCU_IC, %g1; | ||
75 | stxa %g1, [%g0] ASI_DCU_CONTROL_REG; | ||
76 | membar #Sync; | ||
77 | sethi %hi(cheetah_deferred_trap), %g2 | ||
78 | jmpl %g2 + %lo(cheetah_deferred_trap), %g0 | ||
79 | mov 1, %g1 | ||
80 | .size cheetah_deferred_trap_vector_tl1,.-cheetah_deferred_trap_vector_tl1 | ||
81 | |||
82 | /* Cheetah+ specific traps. These are for the new I/D cache parity | ||
83 | * error traps. The first argument to cheetah_plus_parity_handler | ||
84 | * is encoded as follows: | ||
85 | * | ||
86 | * Bit0: 0=dcache,1=icache | ||
87 | * Bit1: 0=recoverable,1=unrecoverable | ||
88 | */ | ||
89 | .globl cheetah_plus_dcpe_trap_vector | ||
90 | .type cheetah_plus_dcpe_trap_vector,#function | ||
91 | cheetah_plus_dcpe_trap_vector: | ||
92 | membar #Sync | ||
93 | sethi %hi(do_cheetah_plus_data_parity), %g7 | ||
94 | jmpl %g7 + %lo(do_cheetah_plus_data_parity), %g0 | ||
95 | nop | ||
96 | nop | ||
97 | nop | ||
98 | nop | ||
99 | nop | ||
100 | .size cheetah_plus_dcpe_trap_vector,.-cheetah_plus_dcpe_trap_vector | ||
101 | |||
102 | .type do_cheetah_plus_data_parity,#function | ||
103 | do_cheetah_plus_data_parity: | ||
104 | rdpr %pil, %g2 | ||
105 | wrpr %g0, PIL_NORMAL_MAX, %pil | ||
106 | ba,pt %xcc, etrap_irq | ||
107 | rd %pc, %g7 | ||
108 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
109 | call trace_hardirqs_off | ||
110 | nop | ||
111 | #endif | ||
112 | mov 0x0, %o0 | ||
113 | call cheetah_plus_parity_error | ||
114 | add %sp, PTREGS_OFF, %o1 | ||
115 | ba,a,pt %xcc, rtrap_irq | ||
116 | .size do_cheetah_plus_data_parity,.-do_cheetah_plus_data_parity | ||
117 | |||
118 | .globl cheetah_plus_dcpe_trap_vector_tl1 | ||
119 | .type cheetah_plus_dcpe_trap_vector_tl1,#function | ||
120 | cheetah_plus_dcpe_trap_vector_tl1: | ||
121 | membar #Sync | ||
122 | wrpr PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate | ||
123 | sethi %hi(do_dcpe_tl1), %g3 | ||
124 | jmpl %g3 + %lo(do_dcpe_tl1), %g0 | ||
125 | nop | ||
126 | nop | ||
127 | nop | ||
128 | nop | ||
129 | .size cheetah_plus_dcpe_trap_vector_tl1,.-cheetah_plus_dcpe_trap_vector_tl1 | ||
130 | |||
131 | .globl cheetah_plus_icpe_trap_vector | ||
132 | .type cheetah_plus_icpe_trap_vector,#function | ||
133 | cheetah_plus_icpe_trap_vector: | ||
134 | membar #Sync | ||
135 | sethi %hi(do_cheetah_plus_insn_parity), %g7 | ||
136 | jmpl %g7 + %lo(do_cheetah_plus_insn_parity), %g0 | ||
137 | nop | ||
138 | nop | ||
139 | nop | ||
140 | nop | ||
141 | nop | ||
142 | .size cheetah_plus_icpe_trap_vector,.-cheetah_plus_icpe_trap_vector | ||
143 | |||
144 | .type do_cheetah_plus_insn_parity,#function | ||
145 | do_cheetah_plus_insn_parity: | ||
146 | rdpr %pil, %g2 | ||
147 | wrpr %g0, PIL_NORMAL_MAX, %pil | ||
148 | ba,pt %xcc, etrap_irq | ||
149 | rd %pc, %g7 | ||
150 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
151 | call trace_hardirqs_off | ||
152 | nop | ||
153 | #endif | ||
154 | mov 0x1, %o0 | ||
155 | call cheetah_plus_parity_error | ||
156 | add %sp, PTREGS_OFF, %o1 | ||
157 | ba,a,pt %xcc, rtrap_irq | ||
158 | .size do_cheetah_plus_insn_parity,.-do_cheetah_plus_insn_parity | ||
159 | |||
160 | .globl cheetah_plus_icpe_trap_vector_tl1 | ||
161 | .type cheetah_plus_icpe_trap_vector_tl1,#function | ||
162 | cheetah_plus_icpe_trap_vector_tl1: | ||
163 | membar #Sync | ||
164 | wrpr PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate | ||
165 | sethi %hi(do_icpe_tl1), %g3 | ||
166 | jmpl %g3 + %lo(do_icpe_tl1), %g0 | ||
167 | nop | ||
168 | nop | ||
169 | nop | ||
170 | nop | ||
171 | .size cheetah_plus_icpe_trap_vector_tl1,.-cheetah_plus_icpe_trap_vector_tl1 | ||
172 | |||
173 | /* If we take one of these traps when tl >= 1, then we | ||
174 | * jump to interrupt globals. If some trap level above us | ||
175 | * was also using interrupt globals, we cannot recover. | ||
176 | * We may use all interrupt global registers except %g6. | ||
177 | */ | ||
178 | .globl do_dcpe_tl1 | ||
179 | .type do_dcpe_tl1,#function | ||
180 | do_dcpe_tl1: | ||
181 | rdpr %tl, %g1 ! Save original trap level | ||
182 | mov 1, %g2 ! Setup TSTATE checking loop | ||
183 | sethi %hi(TSTATE_IG), %g3 ! TSTATE mask bit | ||
184 | 1: wrpr %g2, %tl ! Set trap level to check | ||
185 | rdpr %tstate, %g4 ! Read TSTATE for this level | ||
186 | andcc %g4, %g3, %g0 ! Interrupt globals in use? | ||
187 | bne,a,pn %xcc, do_dcpe_tl1_fatal ! Yep, irrecoverable | ||
188 | wrpr %g1, %tl ! Restore original trap level | ||
189 | add %g2, 1, %g2 ! Next trap level | ||
190 | cmp %g2, %g1 ! Hit them all yet? | ||
191 | ble,pt %icc, 1b ! Not yet | ||
192 | nop | ||
193 | wrpr %g1, %tl ! Restore original trap level | ||
194 | do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ | ||
195 | sethi %hi(dcache_parity_tl1_occurred), %g2 | ||
196 | lduw [%g2 + %lo(dcache_parity_tl1_occurred)], %g1 | ||
197 | add %g1, 1, %g1 | ||
198 | stw %g1, [%g2 + %lo(dcache_parity_tl1_occurred)] | ||
199 | /* Reset D-cache parity */ | ||
200 | sethi %hi(1 << 16), %g1 ! D-cache size | ||
201 | mov (1 << 5), %g2 ! D-cache line size | ||
202 | sub %g1, %g2, %g1 ! Move down 1 cacheline | ||
203 | 1: srl %g1, 14, %g3 ! Compute UTAG | ||
204 | membar #Sync | ||
205 | stxa %g3, [%g1] ASI_DCACHE_UTAG | ||
206 | membar #Sync | ||
207 | sub %g2, 8, %g3 ! 64-bit data word within line | ||
208 | 2: membar #Sync | ||
209 | stxa %g0, [%g1 + %g3] ASI_DCACHE_DATA | ||
210 | membar #Sync | ||
211 | subcc %g3, 8, %g3 ! Next 64-bit data word | ||
212 | bge,pt %icc, 2b | ||
213 | nop | ||
214 | subcc %g1, %g2, %g1 ! Next cacheline | ||
215 | bge,pt %icc, 1b | ||
216 | nop | ||
217 | ba,pt %xcc, dcpe_icpe_tl1_common | ||
218 | nop | ||
219 | |||
220 | do_dcpe_tl1_fatal: | ||
221 | sethi %hi(1f), %g7 | ||
222 | ba,pt %xcc, etraptl1 | ||
223 | 1: or %g7, %lo(1b), %g7 | ||
224 | mov 0x2, %o0 | ||
225 | call cheetah_plus_parity_error | ||
226 | add %sp, PTREGS_OFF, %o1 | ||
227 | ba,pt %xcc, rtrap | ||
228 | nop | ||
229 | .size do_dcpe_tl1,.-do_dcpe_tl1 | ||
230 | |||
231 | .globl do_icpe_tl1 | ||
232 | .type do_icpe_tl1,#function | ||
233 | do_icpe_tl1: | ||
234 | rdpr %tl, %g1 ! Save original trap level | ||
235 | mov 1, %g2 ! Setup TSTATE checking loop | ||
236 | sethi %hi(TSTATE_IG), %g3 ! TSTATE mask bit | ||
237 | 1: wrpr %g2, %tl ! Set trap level to check | ||
238 | rdpr %tstate, %g4 ! Read TSTATE for this level | ||
239 | andcc %g4, %g3, %g0 ! Interrupt globals in use? | ||
240 | bne,a,pn %xcc, do_icpe_tl1_fatal ! Yep, irrecoverable | ||
241 | wrpr %g1, %tl ! Restore original trap level | ||
242 | add %g2, 1, %g2 ! Next trap level | ||
243 | cmp %g2, %g1 ! Hit them all yet? | ||
244 | ble,pt %icc, 1b ! Not yet | ||
245 | nop | ||
246 | wrpr %g1, %tl ! Restore original trap level | ||
247 | do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ | ||
248 | sethi %hi(icache_parity_tl1_occurred), %g2 | ||
249 | lduw [%g2 + %lo(icache_parity_tl1_occurred)], %g1 | ||
250 | add %g1, 1, %g1 | ||
251 | stw %g1, [%g2 + %lo(icache_parity_tl1_occurred)] | ||
252 | /* Flush I-cache */ | ||
253 | sethi %hi(1 << 15), %g1 ! I-cache size | ||
254 | mov (1 << 5), %g2 ! I-cache line size | ||
255 | sub %g1, %g2, %g1 | ||
256 | 1: or %g1, (2 << 3), %g3 | ||
257 | stxa %g0, [%g3] ASI_IC_TAG | ||
258 | membar #Sync | ||
259 | subcc %g1, %g2, %g1 | ||
260 | bge,pt %icc, 1b | ||
261 | nop | ||
262 | ba,pt %xcc, dcpe_icpe_tl1_common | ||
263 | nop | ||
264 | |||
265 | do_icpe_tl1_fatal: | ||
266 | sethi %hi(1f), %g7 | ||
267 | ba,pt %xcc, etraptl1 | ||
268 | 1: or %g7, %lo(1b), %g7 | ||
269 | mov 0x3, %o0 | ||
270 | call cheetah_plus_parity_error | ||
271 | add %sp, PTREGS_OFF, %o1 | ||
272 | ba,pt %xcc, rtrap | ||
273 | nop | ||
274 | .size do_icpe_tl1,.-do_icpe_tl1 | ||
275 | |||
276 | .type dcpe_icpe_tl1_common,#function | ||
277 | dcpe_icpe_tl1_common: | ||
278 | /* Flush D-cache, re-enable D/I caches in DCU and finally | ||
279 | * retry the trapping instruction. | ||
280 | */ | ||
281 | sethi %hi(1 << 16), %g1 ! D-cache size | ||
282 | mov (1 << 5), %g2 ! D-cache line size | ||
283 | sub %g1, %g2, %g1 | ||
284 | 1: stxa %g0, [%g1] ASI_DCACHE_TAG | ||
285 | membar #Sync | ||
286 | subcc %g1, %g2, %g1 | ||
287 | bge,pt %icc, 1b | ||
288 | nop | ||
289 | ldxa [%g0] ASI_DCU_CONTROL_REG, %g1 | ||
290 | or %g1, (DCU_DC | DCU_IC), %g1 | ||
291 | stxa %g1, [%g0] ASI_DCU_CONTROL_REG | ||
292 | membar #Sync | ||
293 | retry | ||
294 | .size dcpe_icpe_tl1_common,.-dcpe_icpe_tl1_common | ||
295 | |||
296 | /* Capture I/D/E-cache state into per-cpu error scoreboard. | ||
297 | * | ||
298 | * %g1: (TL>=0) ? 1 : 0 | ||
299 | * %g2: scratch | ||
300 | * %g3: scratch | ||
301 | * %g4: AFSR | ||
302 | * %g5: AFAR | ||
303 | * %g6: unused, will have current thread ptr after etrap | ||
304 | * %g7: scratch | ||
305 | */ | ||
306 | .type __cheetah_log_error,#function | ||
307 | __cheetah_log_error: | ||
308 | /* Put "TL1" software bit into AFSR. */ | ||
309 | and %g1, 0x1, %g1 | ||
310 | sllx %g1, 63, %g2 | ||
311 | or %g4, %g2, %g4 | ||
312 | |||
313 | /* Get log entry pointer for this cpu at this trap level. */ | ||
314 | BRANCH_IF_JALAPENO(g2,g3,50f) | ||
315 | ldxa [%g0] ASI_SAFARI_CONFIG, %g2 | ||
316 | srlx %g2, 17, %g2 | ||
317 | ba,pt %xcc, 60f | ||
318 | and %g2, 0x3ff, %g2 | ||
319 | |||
320 | 50: ldxa [%g0] ASI_JBUS_CONFIG, %g2 | ||
321 | srlx %g2, 17, %g2 | ||
322 | and %g2, 0x1f, %g2 | ||
323 | |||
324 | 60: sllx %g2, 9, %g2 | ||
325 | sethi %hi(cheetah_error_log), %g3 | ||
326 | ldx [%g3 + %lo(cheetah_error_log)], %g3 | ||
327 | brz,pn %g3, 80f | ||
328 | nop | ||
329 | |||
330 | add %g3, %g2, %g3 | ||
331 | sllx %g1, 8, %g1 | ||
332 | add %g3, %g1, %g1 | ||
333 | |||
334 | /* %g1 holds pointer to the top of the logging scoreboard */ | ||
335 | ldx [%g1 + 0x0], %g7 | ||
336 | cmp %g7, -1 | ||
337 | bne,pn %xcc, 80f | ||
338 | nop | ||
339 | |||
340 | stx %g4, [%g1 + 0x0] | ||
341 | stx %g5, [%g1 + 0x8] | ||
342 | add %g1, 0x10, %g1 | ||
343 | |||
344 | /* %g1 now points to D-cache logging area */ | ||
345 | set 0x3ff8, %g2 /* DC_addr mask */ | ||
346 | and %g5, %g2, %g2 /* DC_addr bits of AFAR */ | ||
347 | srlx %g5, 12, %g3 | ||
348 | or %g3, 1, %g3 /* PHYS tag + valid */ | ||
349 | |||
350 | 10: ldxa [%g2] ASI_DCACHE_TAG, %g7 | ||
351 | cmp %g3, %g7 /* TAG match? */ | ||
352 | bne,pt %xcc, 13f | ||
353 | nop | ||
354 | |||
355 | /* Yep, what we want, capture state. */ | ||
356 | stx %g2, [%g1 + 0x20] | ||
357 | stx %g7, [%g1 + 0x28] | ||
358 | |||
359 | /* A membar Sync is required before and after utag access. */ | ||
360 | membar #Sync | ||
361 | ldxa [%g2] ASI_DCACHE_UTAG, %g7 | ||
362 | membar #Sync | ||
363 | stx %g7, [%g1 + 0x30] | ||
364 | ldxa [%g2] ASI_DCACHE_SNOOP_TAG, %g7 | ||
365 | stx %g7, [%g1 + 0x38] | ||
366 | clr %g3 | ||
367 | |||
368 | 12: ldxa [%g2 + %g3] ASI_DCACHE_DATA, %g7 | ||
369 | stx %g7, [%g1] | ||
370 | add %g3, (1 << 5), %g3 | ||
371 | cmp %g3, (4 << 5) | ||
372 | bl,pt %xcc, 12b | ||
373 | add %g1, 0x8, %g1 | ||
374 | |||
375 | ba,pt %xcc, 20f | ||
376 | add %g1, 0x20, %g1 | ||
377 | |||
378 | 13: sethi %hi(1 << 14), %g7 | ||
379 | add %g2, %g7, %g2 | ||
380 | srlx %g2, 14, %g7 | ||
381 | cmp %g7, 4 | ||
382 | bl,pt %xcc, 10b | ||
383 | nop | ||
384 | |||
385 | add %g1, 0x40, %g1 | ||
386 | |||
387 | /* %g1 now points to I-cache logging area */ | ||
388 | 20: set 0x1fe0, %g2 /* IC_addr mask */ | ||
389 | and %g5, %g2, %g2 /* IC_addr bits of AFAR */ | ||
390 | sllx %g2, 1, %g2 /* IC_addr[13:6]==VA[12:5] */ | ||
391 | srlx %g5, (13 - 8), %g3 /* Make PTAG */ | ||
392 | andn %g3, 0xff, %g3 /* Mask off undefined bits */ | ||
393 | |||
394 | 21: ldxa [%g2] ASI_IC_TAG, %g7 | ||
395 | andn %g7, 0xff, %g7 | ||
396 | cmp %g3, %g7 | ||
397 | bne,pt %xcc, 23f | ||
398 | nop | ||
399 | |||
400 | /* Yep, what we want, capture state. */ | ||
401 | stx %g2, [%g1 + 0x40] | ||
402 | stx %g7, [%g1 + 0x48] | ||
403 | add %g2, (1 << 3), %g2 | ||
404 | ldxa [%g2] ASI_IC_TAG, %g7 | ||
405 | add %g2, (1 << 3), %g2 | ||
406 | stx %g7, [%g1 + 0x50] | ||
407 | ldxa [%g2] ASI_IC_TAG, %g7 | ||
408 | add %g2, (1 << 3), %g2 | ||
409 | stx %g7, [%g1 + 0x60] | ||
410 | ldxa [%g2] ASI_IC_TAG, %g7 | ||
411 | stx %g7, [%g1 + 0x68] | ||
412 | sub %g2, (3 << 3), %g2 | ||
413 | ldxa [%g2] ASI_IC_STAG, %g7 | ||
414 | stx %g7, [%g1 + 0x58] | ||
415 | clr %g3 | ||
416 | srlx %g2, 2, %g2 | ||
417 | |||
418 | 22: ldxa [%g2 + %g3] ASI_IC_INSTR, %g7 | ||
419 | stx %g7, [%g1] | ||
420 | add %g3, (1 << 3), %g3 | ||
421 | cmp %g3, (8 << 3) | ||
422 | bl,pt %xcc, 22b | ||
423 | add %g1, 0x8, %g1 | ||
424 | |||
425 | ba,pt %xcc, 30f | ||
426 | add %g1, 0x30, %g1 | ||
427 | |||
428 | 23: sethi %hi(1 << 14), %g7 | ||
429 | add %g2, %g7, %g2 | ||
430 | srlx %g2, 14, %g7 | ||
431 | cmp %g7, 4 | ||
432 | bl,pt %xcc, 21b | ||
433 | nop | ||
434 | |||
435 | add %g1, 0x70, %g1 | ||
436 | |||
437 | /* %g1 now points to E-cache logging area */ | ||
438 | 30: andn %g5, (32 - 1), %g2 | ||
439 | stx %g2, [%g1 + 0x20] | ||
440 | ldxa [%g2] ASI_EC_TAG_DATA, %g7 | ||
441 | stx %g7, [%g1 + 0x28] | ||
442 | ldxa [%g2] ASI_EC_R, %g0 | ||
443 | clr %g3 | ||
444 | |||
445 | 31: ldxa [%g3] ASI_EC_DATA, %g7 | ||
446 | stx %g7, [%g1 + %g3] | ||
447 | add %g3, 0x8, %g3 | ||
448 | cmp %g3, 0x20 | ||
449 | |||
450 | bl,pt %xcc, 31b | ||
451 | nop | ||
452 | 80: | ||
453 | rdpr %tt, %g2 | ||
454 | cmp %g2, 0x70 | ||
455 | be c_fast_ecc | ||
456 | cmp %g2, 0x63 | ||
457 | be c_cee | ||
458 | nop | ||
459 | ba,pt %xcc, c_deferred | ||
460 | .size __cheetah_log_error,.-__cheetah_log_error | ||
461 | |||
462 | /* Cheetah FECC trap handling, we get here from tl{0,1}_fecc | ||
463 | * in the trap table. That code has done a memory barrier | ||
464 | * and has disabled both the I-cache and D-cache in the DCU | ||
465 | * control register. The I-cache is disabled so that we may | ||
466 | * capture the corrupted cache line, and the D-cache is disabled | ||
467 | * because corrupt data may have been placed there and we don't | ||
468 | * want to reference it. | ||
469 | * | ||
470 | * %g1 is one if this trap occurred at %tl >= 1. | ||
471 | * | ||
472 | * Next, we turn off error reporting so that we don't recurse. | ||
473 | */ | ||
474 | .globl cheetah_fast_ecc | ||
475 | .type cheetah_fast_ecc,#function | ||
476 | cheetah_fast_ecc: | ||
477 | ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2 | ||
478 | andn %g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2 | ||
479 | stxa %g2, [%g0] ASI_ESTATE_ERROR_EN | ||
480 | membar #Sync | ||
481 | |||
482 | /* Fetch and clear AFSR/AFAR */ | ||
483 | ldxa [%g0] ASI_AFSR, %g4 | ||
484 | ldxa [%g0] ASI_AFAR, %g5 | ||
485 | stxa %g4, [%g0] ASI_AFSR | ||
486 | membar #Sync | ||
487 | |||
488 | ba,pt %xcc, __cheetah_log_error | ||
489 | nop | ||
490 | .size cheetah_fast_ecc,.-cheetah_fast_ecc | ||
491 | |||
492 | .type c_fast_ecc,#function | ||
493 | c_fast_ecc: | ||
494 | rdpr %pil, %g2 | ||
495 | wrpr %g0, PIL_NORMAL_MAX, %pil | ||
496 | ba,pt %xcc, etrap_irq | ||
497 | rd %pc, %g7 | ||
498 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
499 | call trace_hardirqs_off | ||
500 | nop | ||
501 | #endif | ||
502 | mov %l4, %o1 | ||
503 | mov %l5, %o2 | ||
504 | call cheetah_fecc_handler | ||
505 | add %sp, PTREGS_OFF, %o0 | ||
506 | ba,a,pt %xcc, rtrap_irq | ||
507 | .size c_fast_ecc,.-c_fast_ecc | ||
508 | |||
509 | /* Our caller has disabled I-cache and performed membar Sync. */ | ||
510 | .globl cheetah_cee | ||
511 | .type cheetah_cee,#function | ||
512 | cheetah_cee: | ||
513 | ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2 | ||
514 | andn %g2, ESTATE_ERROR_CEEN, %g2 | ||
515 | stxa %g2, [%g0] ASI_ESTATE_ERROR_EN | ||
516 | membar #Sync | ||
517 | |||
518 | /* Fetch and clear AFSR/AFAR */ | ||
519 | ldxa [%g0] ASI_AFSR, %g4 | ||
520 | ldxa [%g0] ASI_AFAR, %g5 | ||
521 | stxa %g4, [%g0] ASI_AFSR | ||
522 | membar #Sync | ||
523 | |||
524 | ba,pt %xcc, __cheetah_log_error | ||
525 | nop | ||
526 | .size cheetah_cee,.-cheetah_cee | ||
527 | |||
528 | .type c_cee,#function | ||
529 | c_cee: | ||
530 | rdpr %pil, %g2 | ||
531 | wrpr %g0, PIL_NORMAL_MAX, %pil | ||
532 | ba,pt %xcc, etrap_irq | ||
533 | rd %pc, %g7 | ||
534 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
535 | call trace_hardirqs_off | ||
536 | nop | ||
537 | #endif | ||
538 | mov %l4, %o1 | ||
539 | mov %l5, %o2 | ||
540 | call cheetah_cee_handler | ||
541 | add %sp, PTREGS_OFF, %o0 | ||
542 | ba,a,pt %xcc, rtrap_irq | ||
543 | .size c_cee,.-c_cee | ||
544 | |||
545 | /* Our caller has disabled I-cache+D-cache and performed membar Sync. */ | ||
546 | .globl cheetah_deferred_trap | ||
547 | .type cheetah_deferred_trap,#function | ||
548 | cheetah_deferred_trap: | ||
549 | ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2 | ||
550 | andn %g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2 | ||
551 | stxa %g2, [%g0] ASI_ESTATE_ERROR_EN | ||
552 | membar #Sync | ||
553 | |||
554 | /* Fetch and clear AFSR/AFAR */ | ||
555 | ldxa [%g0] ASI_AFSR, %g4 | ||
556 | ldxa [%g0] ASI_AFAR, %g5 | ||
557 | stxa %g4, [%g0] ASI_AFSR | ||
558 | membar #Sync | ||
559 | |||
560 | ba,pt %xcc, __cheetah_log_error | ||
561 | nop | ||
562 | .size cheetah_deferred_trap,.-cheetah_deferred_trap | ||
563 | |||
564 | .type c_deferred,#function | ||
565 | c_deferred: | ||
566 | rdpr %pil, %g2 | ||
567 | wrpr %g0, PIL_NORMAL_MAX, %pil | ||
568 | ba,pt %xcc, etrap_irq | ||
569 | rd %pc, %g7 | ||
570 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
571 | call trace_hardirqs_off | ||
572 | nop | ||
573 | #endif | ||
574 | mov %l4, %o1 | ||
575 | mov %l5, %o2 | ||
576 | call cheetah_deferred_handler | ||
577 | add %sp, PTREGS_OFF, %o0 | ||
578 | ba,a,pt %xcc, rtrap_irq | ||
579 | .size c_deferred,.-c_deferred | ||
diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c new file mode 100644 index 000000000000..3b9f4d6e14a9 --- /dev/null +++ b/arch/sparc/kernel/chmc.c | |||
@@ -0,0 +1,863 @@ | |||
1 | /* chmc.c: Driver for UltraSPARC-III memory controller. | ||
2 | * | ||
3 | * Copyright (C) 2001, 2007, 2008 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #include <linux/module.h> | ||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/types.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <linux/list.h> | ||
11 | #include <linux/string.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/smp.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/of.h> | ||
17 | #include <linux/of_device.h> | ||
18 | #include <asm/spitfire.h> | ||
19 | #include <asm/chmctrl.h> | ||
20 | #include <asm/cpudata.h> | ||
21 | #include <asm/oplib.h> | ||
22 | #include <asm/prom.h> | ||
23 | #include <asm/head.h> | ||
24 | #include <asm/io.h> | ||
25 | #include <asm/memctrl.h> | ||
26 | |||
27 | #define DRV_MODULE_NAME "chmc" | ||
28 | #define PFX DRV_MODULE_NAME ": " | ||
29 | #define DRV_MODULE_VERSION "0.2" | ||
30 | |||
31 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | ||
32 | MODULE_DESCRIPTION("UltraSPARC-III memory controller driver"); | ||
33 | MODULE_LICENSE("GPL"); | ||
34 | MODULE_VERSION(DRV_MODULE_VERSION); | ||
35 | |||
36 | static int mc_type; | ||
37 | #define MC_TYPE_SAFARI 1 | ||
38 | #define MC_TYPE_JBUS 2 | ||
39 | |||
40 | static dimm_printer_t us3mc_dimm_printer; | ||
41 | |||
42 | #define CHMCTRL_NDGRPS 2 | ||
43 | #define CHMCTRL_NDIMMS 4 | ||
44 | |||
45 | #define CHMC_DIMMS_PER_MC (CHMCTRL_NDGRPS * CHMCTRL_NDIMMS) | ||
46 | |||
47 | /* OBP memory-layout property format. */ | ||
48 | struct chmc_obp_map { | ||
49 | unsigned char dimm_map[144]; | ||
50 | unsigned char pin_map[576]; | ||
51 | }; | ||
52 | |||
53 | #define DIMM_LABEL_SZ 8 | ||
54 | |||
55 | struct chmc_obp_mem_layout { | ||
56 | /* One max 8-byte string label per DIMM. Usually | ||
57 | * this matches the label on the motherboard where | ||
58 | * that DIMM resides. | ||
59 | */ | ||
60 | char dimm_labels[CHMC_DIMMS_PER_MC][DIMM_LABEL_SZ]; | ||
61 | |||
62 | /* If symmetric use map[0], else it is | ||
63 | * asymmetric and map[1] should be used. | ||
64 | */ | ||
65 | char symmetric; | ||
66 | |||
67 | struct chmc_obp_map map[2]; | ||
68 | }; | ||
69 | |||
70 | #define CHMCTRL_NBANKS 4 | ||
71 | |||
72 | struct chmc_bank_info { | ||
73 | struct chmc *p; | ||
74 | int bank_id; | ||
75 | |||
76 | u64 raw_reg; | ||
77 | int valid; | ||
78 | int uk; | ||
79 | int um; | ||
80 | int lk; | ||
81 | int lm; | ||
82 | int interleave; | ||
83 | unsigned long base; | ||
84 | unsigned long size; | ||
85 | }; | ||
86 | |||
87 | struct chmc { | ||
88 | struct list_head list; | ||
89 | int portid; | ||
90 | |||
91 | struct chmc_obp_mem_layout layout_prop; | ||
92 | int layout_size; | ||
93 | |||
94 | void __iomem *regs; | ||
95 | |||
96 | u64 timing_control1; | ||
97 | u64 timing_control2; | ||
98 | u64 timing_control3; | ||
99 | u64 timing_control4; | ||
100 | u64 memaddr_control; | ||
101 | |||
102 | struct chmc_bank_info logical_banks[CHMCTRL_NBANKS]; | ||
103 | }; | ||
104 | |||
105 | #define JBUSMC_REGS_SIZE 8 | ||
106 | |||
107 | #define JB_MC_REG1_DIMM2_BANK3 0x8000000000000000UL | ||
108 | #define JB_MC_REG1_DIMM1_BANK1 0x4000000000000000UL | ||
109 | #define JB_MC_REG1_DIMM2_BANK2 0x2000000000000000UL | ||
110 | #define JB_MC_REG1_DIMM1_BANK0 0x1000000000000000UL | ||
111 | #define JB_MC_REG1_XOR 0x0000010000000000UL | ||
112 | #define JB_MC_REG1_ADDR_GEN_2 0x000000e000000000UL | ||
113 | #define JB_MC_REG1_ADDR_GEN_2_SHIFT 37 | ||
114 | #define JB_MC_REG1_ADDR_GEN_1 0x0000001c00000000UL | ||
115 | #define JB_MC_REG1_ADDR_GEN_1_SHIFT 34 | ||
116 | #define JB_MC_REG1_INTERLEAVE 0x0000000001800000UL | ||
117 | #define JB_MC_REG1_INTERLEAVE_SHIFT 23 | ||
118 | #define JB_MC_REG1_DIMM2_PTYPE 0x0000000000200000UL | ||
119 | #define JB_MC_REG1_DIMM2_PTYPE_SHIFT 21 | ||
120 | #define JB_MC_REG1_DIMM1_PTYPE 0x0000000000100000UL | ||
121 | #define JB_MC_REG1_DIMM1_PTYPE_SHIFT 20 | ||
122 | |||
123 | #define PART_TYPE_X8 0 | ||
124 | #define PART_TYPE_X4 1 | ||
125 | |||
126 | #define INTERLEAVE_NONE 0 | ||
127 | #define INTERLEAVE_SAME 1 | ||
128 | #define INTERLEAVE_INTERNAL 2 | ||
129 | #define INTERLEAVE_BOTH 3 | ||
130 | |||
131 | #define ADDR_GEN_128MB 0 | ||
132 | #define ADDR_GEN_256MB 1 | ||
133 | #define ADDR_GEN_512MB 2 | ||
134 | #define ADDR_GEN_1GB 3 | ||
135 | |||
136 | #define JB_NUM_DIMM_GROUPS 2 | ||
137 | #define JB_NUM_DIMMS_PER_GROUP 2 | ||
138 | #define JB_NUM_DIMMS (JB_NUM_DIMM_GROUPS * JB_NUM_DIMMS_PER_GROUP) | ||
139 | |||
140 | struct jbusmc_obp_map { | ||
141 | unsigned char dimm_map[18]; | ||
142 | unsigned char pin_map[144]; | ||
143 | }; | ||
144 | |||
145 | struct jbusmc_obp_mem_layout { | ||
146 | /* One max 8-byte string label per DIMM. Usually | ||
147 | * this matches the label on the motherboard where | ||
148 | * that DIMM resides. | ||
149 | */ | ||
150 | char dimm_labels[JB_NUM_DIMMS][DIMM_LABEL_SZ]; | ||
151 | |||
152 | /* If symmetric use map[0], else it is | ||
153 | * asymmetric and map[1] should be used. | ||
154 | */ | ||
155 | char symmetric; | ||
156 | |||
157 | struct jbusmc_obp_map map; | ||
158 | |||
159 | char _pad; | ||
160 | }; | ||
161 | |||
162 | struct jbusmc_dimm_group { | ||
163 | struct jbusmc *controller; | ||
164 | int index; | ||
165 | u64 base_addr; | ||
166 | u64 size; | ||
167 | }; | ||
168 | |||
169 | struct jbusmc { | ||
170 | void __iomem *regs; | ||
171 | u64 mc_reg_1; | ||
172 | u32 portid; | ||
173 | struct jbusmc_obp_mem_layout layout; | ||
174 | int layout_len; | ||
175 | int num_dimm_groups; | ||
176 | struct jbusmc_dimm_group dimm_groups[JB_NUM_DIMM_GROUPS]; | ||
177 | struct list_head list; | ||
178 | }; | ||
179 | |||
180 | static DEFINE_SPINLOCK(mctrl_list_lock); | ||
181 | static LIST_HEAD(mctrl_list); | ||
182 | |||
183 | static void mc_list_add(struct list_head *list) | ||
184 | { | ||
185 | spin_lock(&mctrl_list_lock); | ||
186 | list_add(list, &mctrl_list); | ||
187 | spin_unlock(&mctrl_list_lock); | ||
188 | } | ||
189 | |||
190 | static void mc_list_del(struct list_head *list) | ||
191 | { | ||
192 | spin_lock(&mctrl_list_lock); | ||
193 | list_del_init(list); | ||
194 | spin_unlock(&mctrl_list_lock); | ||
195 | } | ||
196 | |||
197 | #define SYNDROME_MIN -1 | ||
198 | #define SYNDROME_MAX 144 | ||
199 | |||
200 | /* Covert syndrome code into the way the bits are positioned | ||
201 | * on the bus. | ||
202 | */ | ||
203 | static int syndrome_to_qword_code(int syndrome_code) | ||
204 | { | ||
205 | if (syndrome_code < 128) | ||
206 | syndrome_code += 16; | ||
207 | else if (syndrome_code < 128 + 9) | ||
208 | syndrome_code -= (128 - 7); | ||
209 | else if (syndrome_code < (128 + 9 + 3)) | ||
210 | syndrome_code -= (128 + 9 - 4); | ||
211 | else | ||
212 | syndrome_code -= (128 + 9 + 3); | ||
213 | return syndrome_code; | ||
214 | } | ||
215 | |||
216 | /* All this magic has to do with how a cache line comes over the wire | ||
217 | * on Safari and JBUS. A 64-bit line comes over in 1 or more quadword | ||
218 | * cycles, each of which transmit ECC/MTAG info as well as the actual | ||
219 | * data. | ||
220 | */ | ||
221 | #define L2_LINE_SIZE 64 | ||
222 | #define L2_LINE_ADDR_MSK (L2_LINE_SIZE - 1) | ||
223 | #define QW_PER_LINE 4 | ||
224 | #define QW_BYTES (L2_LINE_SIZE / QW_PER_LINE) | ||
225 | #define QW_BITS 144 | ||
226 | #define SAFARI_LAST_BIT (576 - 1) | ||
227 | #define JBUS_LAST_BIT (144 - 1) | ||
228 | |||
229 | static void get_pin_and_dimm_str(int syndrome_code, unsigned long paddr, | ||
230 | int *pin_p, char **dimm_str_p, void *_prop, | ||
231 | int base_dimm_offset) | ||
232 | { | ||
233 | int qword_code = syndrome_to_qword_code(syndrome_code); | ||
234 | int cache_line_offset; | ||
235 | int offset_inverse; | ||
236 | int dimm_map_index; | ||
237 | int map_val; | ||
238 | |||
239 | if (mc_type == MC_TYPE_JBUS) { | ||
240 | struct jbusmc_obp_mem_layout *p = _prop; | ||
241 | |||
242 | /* JBUS */ | ||
243 | cache_line_offset = qword_code; | ||
244 | offset_inverse = (JBUS_LAST_BIT - cache_line_offset); | ||
245 | dimm_map_index = offset_inverse / 8; | ||
246 | map_val = p->map.dimm_map[dimm_map_index]; | ||
247 | map_val = ((map_val >> ((7 - (offset_inverse & 7)))) & 1); | ||
248 | *dimm_str_p = p->dimm_labels[base_dimm_offset + map_val]; | ||
249 | *pin_p = p->map.pin_map[cache_line_offset]; | ||
250 | } else { | ||
251 | struct chmc_obp_mem_layout *p = _prop; | ||
252 | struct chmc_obp_map *mp; | ||
253 | int qword; | ||
254 | |||
255 | /* Safari */ | ||
256 | if (p->symmetric) | ||
257 | mp = &p->map[0]; | ||
258 | else | ||
259 | mp = &p->map[1]; | ||
260 | |||
261 | qword = (paddr & L2_LINE_ADDR_MSK) / QW_BYTES; | ||
262 | cache_line_offset = ((3 - qword) * QW_BITS) + qword_code; | ||
263 | offset_inverse = (SAFARI_LAST_BIT - cache_line_offset); | ||
264 | dimm_map_index = offset_inverse >> 2; | ||
265 | map_val = mp->dimm_map[dimm_map_index]; | ||
266 | map_val = ((map_val >> ((3 - (offset_inverse & 3)) << 1)) & 0x3); | ||
267 | *dimm_str_p = p->dimm_labels[base_dimm_offset + map_val]; | ||
268 | *pin_p = mp->pin_map[cache_line_offset]; | ||
269 | } | ||
270 | } | ||
271 | |||
272 | static struct jbusmc_dimm_group *jbusmc_find_dimm_group(unsigned long phys_addr) | ||
273 | { | ||
274 | struct jbusmc *p; | ||
275 | |||
276 | list_for_each_entry(p, &mctrl_list, list) { | ||
277 | int i; | ||
278 | |||
279 | for (i = 0; i < p->num_dimm_groups; i++) { | ||
280 | struct jbusmc_dimm_group *dp = &p->dimm_groups[i]; | ||
281 | |||
282 | if (phys_addr < dp->base_addr || | ||
283 | (dp->base_addr + dp->size) <= phys_addr) | ||
284 | continue; | ||
285 | |||
286 | return dp; | ||
287 | } | ||
288 | } | ||
289 | return NULL; | ||
290 | } | ||
291 | |||
292 | static int jbusmc_print_dimm(int syndrome_code, | ||
293 | unsigned long phys_addr, | ||
294 | char *buf, int buflen) | ||
295 | { | ||
296 | struct jbusmc_obp_mem_layout *prop; | ||
297 | struct jbusmc_dimm_group *dp; | ||
298 | struct jbusmc *p; | ||
299 | int first_dimm; | ||
300 | |||
301 | dp = jbusmc_find_dimm_group(phys_addr); | ||
302 | if (dp == NULL || | ||
303 | syndrome_code < SYNDROME_MIN || | ||
304 | syndrome_code > SYNDROME_MAX) { | ||
305 | buf[0] = '?'; | ||
306 | buf[1] = '?'; | ||
307 | buf[2] = '?'; | ||
308 | buf[3] = '\0'; | ||
309 | } | ||
310 | p = dp->controller; | ||
311 | prop = &p->layout; | ||
312 | |||
313 | first_dimm = dp->index * JB_NUM_DIMMS_PER_GROUP; | ||
314 | |||
315 | if (syndrome_code != SYNDROME_MIN) { | ||
316 | char *dimm_str; | ||
317 | int pin; | ||
318 | |||
319 | get_pin_and_dimm_str(syndrome_code, phys_addr, &pin, | ||
320 | &dimm_str, prop, first_dimm); | ||
321 | sprintf(buf, "%s, pin %3d", dimm_str, pin); | ||
322 | } else { | ||
323 | int dimm; | ||
324 | |||
325 | /* Multi-bit error, we just dump out all the | ||
326 | * dimm labels associated with this dimm group. | ||
327 | */ | ||
328 | for (dimm = 0; dimm < JB_NUM_DIMMS_PER_GROUP; dimm++) { | ||
329 | sprintf(buf, "%s ", | ||
330 | prop->dimm_labels[first_dimm + dimm]); | ||
331 | buf += strlen(buf); | ||
332 | } | ||
333 | } | ||
334 | |||
335 | return 0; | ||
336 | } | ||
337 | |||
338 | static u64 __devinit jbusmc_dimm_group_size(u64 base, | ||
339 | const struct linux_prom64_registers *mem_regs, | ||
340 | int num_mem_regs) | ||
341 | { | ||
342 | u64 max = base + (8UL * 1024 * 1024 * 1024); | ||
343 | u64 max_seen = base; | ||
344 | int i; | ||
345 | |||
346 | for (i = 0; i < num_mem_regs; i++) { | ||
347 | const struct linux_prom64_registers *ent; | ||
348 | u64 this_base; | ||
349 | u64 this_end; | ||
350 | |||
351 | ent = &mem_regs[i]; | ||
352 | this_base = ent->phys_addr; | ||
353 | this_end = this_base + ent->reg_size; | ||
354 | if (base < this_base || base >= this_end) | ||
355 | continue; | ||
356 | if (this_end > max) | ||
357 | this_end = max; | ||
358 | if (this_end > max_seen) | ||
359 | max_seen = this_end; | ||
360 | } | ||
361 | |||
362 | return max_seen - base; | ||
363 | } | ||
364 | |||
365 | static void __devinit jbusmc_construct_one_dimm_group(struct jbusmc *p, | ||
366 | unsigned long index, | ||
367 | const struct linux_prom64_registers *mem_regs, | ||
368 | int num_mem_regs) | ||
369 | { | ||
370 | struct jbusmc_dimm_group *dp = &p->dimm_groups[index]; | ||
371 | |||
372 | dp->controller = p; | ||
373 | dp->index = index; | ||
374 | |||
375 | dp->base_addr = (p->portid * (64UL * 1024 * 1024 * 1024)); | ||
376 | dp->base_addr += (index * (8UL * 1024 * 1024 * 1024)); | ||
377 | dp->size = jbusmc_dimm_group_size(dp->base_addr, mem_regs, num_mem_regs); | ||
378 | } | ||
379 | |||
380 | static void __devinit jbusmc_construct_dimm_groups(struct jbusmc *p, | ||
381 | const struct linux_prom64_registers *mem_regs, | ||
382 | int num_mem_regs) | ||
383 | { | ||
384 | if (p->mc_reg_1 & JB_MC_REG1_DIMM1_BANK0) { | ||
385 | jbusmc_construct_one_dimm_group(p, 0, mem_regs, num_mem_regs); | ||
386 | p->num_dimm_groups++; | ||
387 | } | ||
388 | if (p->mc_reg_1 & JB_MC_REG1_DIMM2_BANK2) { | ||
389 | jbusmc_construct_one_dimm_group(p, 1, mem_regs, num_mem_regs); | ||
390 | p->num_dimm_groups++; | ||
391 | } | ||
392 | } | ||
393 | |||
394 | static int __devinit jbusmc_probe(struct of_device *op, | ||
395 | const struct of_device_id *match) | ||
396 | { | ||
397 | const struct linux_prom64_registers *mem_regs; | ||
398 | struct device_node *mem_node; | ||
399 | int err, len, num_mem_regs; | ||
400 | struct jbusmc *p; | ||
401 | const u32 *prop; | ||
402 | const void *ml; | ||
403 | |||
404 | err = -ENODEV; | ||
405 | mem_node = of_find_node_by_path("/memory"); | ||
406 | if (!mem_node) { | ||
407 | printk(KERN_ERR PFX "Cannot find /memory node.\n"); | ||
408 | goto out; | ||
409 | } | ||
410 | mem_regs = of_get_property(mem_node, "reg", &len); | ||
411 | if (!mem_regs) { | ||
412 | printk(KERN_ERR PFX "Cannot get reg property of /memory node.\n"); | ||
413 | goto out; | ||
414 | } | ||
415 | num_mem_regs = len / sizeof(*mem_regs); | ||
416 | |||
417 | err = -ENOMEM; | ||
418 | p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
419 | if (!p) { | ||
420 | printk(KERN_ERR PFX "Cannot allocate struct jbusmc.\n"); | ||
421 | goto out; | ||
422 | } | ||
423 | |||
424 | INIT_LIST_HEAD(&p->list); | ||
425 | |||
426 | err = -ENODEV; | ||
427 | prop = of_get_property(op->node, "portid", &len); | ||
428 | if (!prop || len != 4) { | ||
429 | printk(KERN_ERR PFX "Cannot find portid.\n"); | ||
430 | goto out_free; | ||
431 | } | ||
432 | |||
433 | p->portid = *prop; | ||
434 | |||
435 | prop = of_get_property(op->node, "memory-control-register-1", &len); | ||
436 | if (!prop || len != 8) { | ||
437 | printk(KERN_ERR PFX "Cannot get memory control register 1.\n"); | ||
438 | goto out_free; | ||
439 | } | ||
440 | |||
441 | p->mc_reg_1 = ((u64)prop[0] << 32) | (u64) prop[1]; | ||
442 | |||
443 | err = -ENOMEM; | ||
444 | p->regs = of_ioremap(&op->resource[0], 0, JBUSMC_REGS_SIZE, "jbusmc"); | ||
445 | if (!p->regs) { | ||
446 | printk(KERN_ERR PFX "Cannot map jbusmc regs.\n"); | ||
447 | goto out_free; | ||
448 | } | ||
449 | |||
450 | err = -ENODEV; | ||
451 | ml = of_get_property(op->node, "memory-layout", &p->layout_len); | ||
452 | if (!ml) { | ||
453 | printk(KERN_ERR PFX "Cannot get memory layout property.\n"); | ||
454 | goto out_iounmap; | ||
455 | } | ||
456 | if (p->layout_len > sizeof(p->layout)) { | ||
457 | printk(KERN_ERR PFX "Unexpected memory-layout size %d\n", | ||
458 | p->layout_len); | ||
459 | goto out_iounmap; | ||
460 | } | ||
461 | memcpy(&p->layout, ml, p->layout_len); | ||
462 | |||
463 | jbusmc_construct_dimm_groups(p, mem_regs, num_mem_regs); | ||
464 | |||
465 | mc_list_add(&p->list); | ||
466 | |||
467 | printk(KERN_INFO PFX "UltraSPARC-IIIi memory controller at %s\n", | ||
468 | op->node->full_name); | ||
469 | |||
470 | dev_set_drvdata(&op->dev, p); | ||
471 | |||
472 | err = 0; | ||
473 | |||
474 | out: | ||
475 | return err; | ||
476 | |||
477 | out_iounmap: | ||
478 | of_iounmap(&op->resource[0], p->regs, JBUSMC_REGS_SIZE); | ||
479 | |||
480 | out_free: | ||
481 | kfree(p); | ||
482 | goto out; | ||
483 | } | ||
484 | |||
485 | /* Does BANK decode PHYS_ADDR? */ | ||
486 | static int chmc_bank_match(struct chmc_bank_info *bp, unsigned long phys_addr) | ||
487 | { | ||
488 | unsigned long upper_bits = (phys_addr & PA_UPPER_BITS) >> PA_UPPER_BITS_SHIFT; | ||
489 | unsigned long lower_bits = (phys_addr & PA_LOWER_BITS) >> PA_LOWER_BITS_SHIFT; | ||
490 | |||
491 | /* Bank must be enabled to match. */ | ||
492 | if (bp->valid == 0) | ||
493 | return 0; | ||
494 | |||
495 | /* Would BANK match upper bits? */ | ||
496 | upper_bits ^= bp->um; /* What bits are different? */ | ||
497 | upper_bits = ~upper_bits; /* Invert. */ | ||
498 | upper_bits |= bp->uk; /* What bits don't matter for matching? */ | ||
499 | upper_bits = ~upper_bits; /* Invert. */ | ||
500 | |||
501 | if (upper_bits) | ||
502 | return 0; | ||
503 | |||
504 | /* Would BANK match lower bits? */ | ||
505 | lower_bits ^= bp->lm; /* What bits are different? */ | ||
506 | lower_bits = ~lower_bits; /* Invert. */ | ||
507 | lower_bits |= bp->lk; /* What bits don't matter for matching? */ | ||
508 | lower_bits = ~lower_bits; /* Invert. */ | ||
509 | |||
510 | if (lower_bits) | ||
511 | return 0; | ||
512 | |||
513 | /* I always knew you'd be the one. */ | ||
514 | return 1; | ||
515 | } | ||
516 | |||
517 | /* Given PHYS_ADDR, search memory controller banks for a match. */ | ||
518 | static struct chmc_bank_info *chmc_find_bank(unsigned long phys_addr) | ||
519 | { | ||
520 | struct chmc *p; | ||
521 | |||
522 | list_for_each_entry(p, &mctrl_list, list) { | ||
523 | int bank_no; | ||
524 | |||
525 | for (bank_no = 0; bank_no < CHMCTRL_NBANKS; bank_no++) { | ||
526 | struct chmc_bank_info *bp; | ||
527 | |||
528 | bp = &p->logical_banks[bank_no]; | ||
529 | if (chmc_bank_match(bp, phys_addr)) | ||
530 | return bp; | ||
531 | } | ||
532 | } | ||
533 | |||
534 | return NULL; | ||
535 | } | ||
536 | |||
537 | /* This is the main purpose of this driver. */ | ||
538 | static int chmc_print_dimm(int syndrome_code, | ||
539 | unsigned long phys_addr, | ||
540 | char *buf, int buflen) | ||
541 | { | ||
542 | struct chmc_bank_info *bp; | ||
543 | struct chmc_obp_mem_layout *prop; | ||
544 | int bank_in_controller, first_dimm; | ||
545 | |||
546 | bp = chmc_find_bank(phys_addr); | ||
547 | if (bp == NULL || | ||
548 | syndrome_code < SYNDROME_MIN || | ||
549 | syndrome_code > SYNDROME_MAX) { | ||
550 | buf[0] = '?'; | ||
551 | buf[1] = '?'; | ||
552 | buf[2] = '?'; | ||
553 | buf[3] = '\0'; | ||
554 | return 0; | ||
555 | } | ||
556 | |||
557 | prop = &bp->p->layout_prop; | ||
558 | bank_in_controller = bp->bank_id & (CHMCTRL_NBANKS - 1); | ||
559 | first_dimm = (bank_in_controller & (CHMCTRL_NDGRPS - 1)); | ||
560 | first_dimm *= CHMCTRL_NDIMMS; | ||
561 | |||
562 | if (syndrome_code != SYNDROME_MIN) { | ||
563 | char *dimm_str; | ||
564 | int pin; | ||
565 | |||
566 | get_pin_and_dimm_str(syndrome_code, phys_addr, &pin, | ||
567 | &dimm_str, prop, first_dimm); | ||
568 | sprintf(buf, "%s, pin %3d", dimm_str, pin); | ||
569 | } else { | ||
570 | int dimm; | ||
571 | |||
572 | /* Multi-bit error, we just dump out all the | ||
573 | * dimm labels associated with this bank. | ||
574 | */ | ||
575 | for (dimm = 0; dimm < CHMCTRL_NDIMMS; dimm++) { | ||
576 | sprintf(buf, "%s ", | ||
577 | prop->dimm_labels[first_dimm + dimm]); | ||
578 | buf += strlen(buf); | ||
579 | } | ||
580 | } | ||
581 | return 0; | ||
582 | } | ||
583 | |||
584 | /* Accessing the registers is slightly complicated. If you want | ||
585 | * to get at the memory controller which is on the same processor | ||
586 | * the code is executing, you must use special ASI load/store else | ||
587 | * you go through the global mapping. | ||
588 | */ | ||
589 | static u64 chmc_read_mcreg(struct chmc *p, unsigned long offset) | ||
590 | { | ||
591 | unsigned long ret, this_cpu; | ||
592 | |||
593 | preempt_disable(); | ||
594 | |||
595 | this_cpu = real_hard_smp_processor_id(); | ||
596 | |||
597 | if (p->portid == this_cpu) { | ||
598 | __asm__ __volatile__("ldxa [%1] %2, %0" | ||
599 | : "=r" (ret) | ||
600 | : "r" (offset), "i" (ASI_MCU_CTRL_REG)); | ||
601 | } else { | ||
602 | __asm__ __volatile__("ldxa [%1] %2, %0" | ||
603 | : "=r" (ret) | ||
604 | : "r" (p->regs + offset), | ||
605 | "i" (ASI_PHYS_BYPASS_EC_E)); | ||
606 | } | ||
607 | |||
608 | preempt_enable(); | ||
609 | |||
610 | return ret; | ||
611 | } | ||
612 | |||
613 | #if 0 /* currently unused */ | ||
614 | static void chmc_write_mcreg(struct chmc *p, unsigned long offset, u64 val) | ||
615 | { | ||
616 | if (p->portid == smp_processor_id()) { | ||
617 | __asm__ __volatile__("stxa %0, [%1] %2" | ||
618 | : : "r" (val), | ||
619 | "r" (offset), "i" (ASI_MCU_CTRL_REG)); | ||
620 | } else { | ||
621 | __asm__ __volatile__("ldxa %0, [%1] %2" | ||
622 | : : "r" (val), | ||
623 | "r" (p->regs + offset), | ||
624 | "i" (ASI_PHYS_BYPASS_EC_E)); | ||
625 | } | ||
626 | } | ||
627 | #endif | ||
628 | |||
629 | static void chmc_interpret_one_decode_reg(struct chmc *p, int which_bank, u64 val) | ||
630 | { | ||
631 | struct chmc_bank_info *bp = &p->logical_banks[which_bank]; | ||
632 | |||
633 | bp->p = p; | ||
634 | bp->bank_id = (CHMCTRL_NBANKS * p->portid) + which_bank; | ||
635 | bp->raw_reg = val; | ||
636 | bp->valid = (val & MEM_DECODE_VALID) >> MEM_DECODE_VALID_SHIFT; | ||
637 | bp->uk = (val & MEM_DECODE_UK) >> MEM_DECODE_UK_SHIFT; | ||
638 | bp->um = (val & MEM_DECODE_UM) >> MEM_DECODE_UM_SHIFT; | ||
639 | bp->lk = (val & MEM_DECODE_LK) >> MEM_DECODE_LK_SHIFT; | ||
640 | bp->lm = (val & MEM_DECODE_LM) >> MEM_DECODE_LM_SHIFT; | ||
641 | |||
642 | bp->base = (bp->um); | ||
643 | bp->base &= ~(bp->uk); | ||
644 | bp->base <<= PA_UPPER_BITS_SHIFT; | ||
645 | |||
646 | switch(bp->lk) { | ||
647 | case 0xf: | ||
648 | default: | ||
649 | bp->interleave = 1; | ||
650 | break; | ||
651 | |||
652 | case 0xe: | ||
653 | bp->interleave = 2; | ||
654 | break; | ||
655 | |||
656 | case 0xc: | ||
657 | bp->interleave = 4; | ||
658 | break; | ||
659 | |||
660 | case 0x8: | ||
661 | bp->interleave = 8; | ||
662 | break; | ||
663 | |||
664 | case 0x0: | ||
665 | bp->interleave = 16; | ||
666 | break; | ||
667 | }; | ||
668 | |||
669 | /* UK[10] is reserved, and UK[11] is not set for the SDRAM | ||
670 | * bank size definition. | ||
671 | */ | ||
672 | bp->size = (((unsigned long)bp->uk & | ||
673 | ((1UL << 10UL) - 1UL)) + 1UL) << PA_UPPER_BITS_SHIFT; | ||
674 | bp->size /= bp->interleave; | ||
675 | } | ||
676 | |||
677 | static void chmc_fetch_decode_regs(struct chmc *p) | ||
678 | { | ||
679 | if (p->layout_size == 0) | ||
680 | return; | ||
681 | |||
682 | chmc_interpret_one_decode_reg(p, 0, | ||
683 | chmc_read_mcreg(p, CHMCTRL_DECODE1)); | ||
684 | chmc_interpret_one_decode_reg(p, 1, | ||
685 | chmc_read_mcreg(p, CHMCTRL_DECODE2)); | ||
686 | chmc_interpret_one_decode_reg(p, 2, | ||
687 | chmc_read_mcreg(p, CHMCTRL_DECODE3)); | ||
688 | chmc_interpret_one_decode_reg(p, 3, | ||
689 | chmc_read_mcreg(p, CHMCTRL_DECODE4)); | ||
690 | } | ||
691 | |||
692 | static int __devinit chmc_probe(struct of_device *op, | ||
693 | const struct of_device_id *match) | ||
694 | { | ||
695 | struct device_node *dp = op->node; | ||
696 | unsigned long ver; | ||
697 | const void *pval; | ||
698 | int len, portid; | ||
699 | struct chmc *p; | ||
700 | int err; | ||
701 | |||
702 | err = -ENODEV; | ||
703 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | ||
704 | if ((ver >> 32UL) == __JALAPENO_ID || | ||
705 | (ver >> 32UL) == __SERRANO_ID) | ||
706 | goto out; | ||
707 | |||
708 | portid = of_getintprop_default(dp, "portid", -1); | ||
709 | if (portid == -1) | ||
710 | goto out; | ||
711 | |||
712 | pval = of_get_property(dp, "memory-layout", &len); | ||
713 | if (pval && len > sizeof(p->layout_prop)) { | ||
714 | printk(KERN_ERR PFX "Unexpected memory-layout property " | ||
715 | "size %d.\n", len); | ||
716 | goto out; | ||
717 | } | ||
718 | |||
719 | err = -ENOMEM; | ||
720 | p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
721 | if (!p) { | ||
722 | printk(KERN_ERR PFX "Could not allocate struct chmc.\n"); | ||
723 | goto out; | ||
724 | } | ||
725 | |||
726 | p->portid = portid; | ||
727 | p->layout_size = len; | ||
728 | if (!pval) | ||
729 | p->layout_size = 0; | ||
730 | else | ||
731 | memcpy(&p->layout_prop, pval, len); | ||
732 | |||
733 | p->regs = of_ioremap(&op->resource[0], 0, 0x48, "chmc"); | ||
734 | if (!p->regs) { | ||
735 | printk(KERN_ERR PFX "Could not map registers.\n"); | ||
736 | goto out_free; | ||
737 | } | ||
738 | |||
739 | if (p->layout_size != 0UL) { | ||
740 | p->timing_control1 = chmc_read_mcreg(p, CHMCTRL_TCTRL1); | ||
741 | p->timing_control2 = chmc_read_mcreg(p, CHMCTRL_TCTRL2); | ||
742 | p->timing_control3 = chmc_read_mcreg(p, CHMCTRL_TCTRL3); | ||
743 | p->timing_control4 = chmc_read_mcreg(p, CHMCTRL_TCTRL4); | ||
744 | p->memaddr_control = chmc_read_mcreg(p, CHMCTRL_MACTRL); | ||
745 | } | ||
746 | |||
747 | chmc_fetch_decode_regs(p); | ||
748 | |||
749 | mc_list_add(&p->list); | ||
750 | |||
751 | printk(KERN_INFO PFX "UltraSPARC-III memory controller at %s [%s]\n", | ||
752 | dp->full_name, | ||
753 | (p->layout_size ? "ACTIVE" : "INACTIVE")); | ||
754 | |||
755 | dev_set_drvdata(&op->dev, p); | ||
756 | |||
757 | err = 0; | ||
758 | |||
759 | out: | ||
760 | return err; | ||
761 | |||
762 | out_free: | ||
763 | kfree(p); | ||
764 | goto out; | ||
765 | } | ||
766 | |||
767 | static int __devinit us3mc_probe(struct of_device *op, | ||
768 | const struct of_device_id *match) | ||
769 | { | ||
770 | if (mc_type == MC_TYPE_SAFARI) | ||
771 | return chmc_probe(op, match); | ||
772 | else if (mc_type == MC_TYPE_JBUS) | ||
773 | return jbusmc_probe(op, match); | ||
774 | return -ENODEV; | ||
775 | } | ||
776 | |||
777 | static void __devexit chmc_destroy(struct of_device *op, struct chmc *p) | ||
778 | { | ||
779 | list_del(&p->list); | ||
780 | of_iounmap(&op->resource[0], p->regs, 0x48); | ||
781 | kfree(p); | ||
782 | } | ||
783 | |||
784 | static void __devexit jbusmc_destroy(struct of_device *op, struct jbusmc *p) | ||
785 | { | ||
786 | mc_list_del(&p->list); | ||
787 | of_iounmap(&op->resource[0], p->regs, JBUSMC_REGS_SIZE); | ||
788 | kfree(p); | ||
789 | } | ||
790 | |||
791 | static int __devexit us3mc_remove(struct of_device *op) | ||
792 | { | ||
793 | void *p = dev_get_drvdata(&op->dev); | ||
794 | |||
795 | if (p) { | ||
796 | if (mc_type == MC_TYPE_SAFARI) | ||
797 | chmc_destroy(op, p); | ||
798 | else if (mc_type == MC_TYPE_JBUS) | ||
799 | jbusmc_destroy(op, p); | ||
800 | } | ||
801 | return 0; | ||
802 | } | ||
803 | |||
804 | static const struct of_device_id us3mc_match[] = { | ||
805 | { | ||
806 | .name = "memory-controller", | ||
807 | }, | ||
808 | {}, | ||
809 | }; | ||
810 | MODULE_DEVICE_TABLE(of, us3mc_match); | ||
811 | |||
812 | static struct of_platform_driver us3mc_driver = { | ||
813 | .name = "us3mc", | ||
814 | .match_table = us3mc_match, | ||
815 | .probe = us3mc_probe, | ||
816 | .remove = __devexit_p(us3mc_remove), | ||
817 | }; | ||
818 | |||
819 | static inline bool us3mc_platform(void) | ||
820 | { | ||
821 | if (tlb_type == cheetah || tlb_type == cheetah_plus) | ||
822 | return true; | ||
823 | return false; | ||
824 | } | ||
825 | |||
826 | static int __init us3mc_init(void) | ||
827 | { | ||
828 | unsigned long ver; | ||
829 | int ret; | ||
830 | |||
831 | if (!us3mc_platform()) | ||
832 | return -ENODEV; | ||
833 | |||
834 | __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver)); | ||
835 | if ((ver >> 32UL) == __JALAPENO_ID || | ||
836 | (ver >> 32UL) == __SERRANO_ID) { | ||
837 | mc_type = MC_TYPE_JBUS; | ||
838 | us3mc_dimm_printer = jbusmc_print_dimm; | ||
839 | } else { | ||
840 | mc_type = MC_TYPE_SAFARI; | ||
841 | us3mc_dimm_printer = chmc_print_dimm; | ||
842 | } | ||
843 | |||
844 | ret = register_dimm_printer(us3mc_dimm_printer); | ||
845 | |||
846 | if (!ret) { | ||
847 | ret = of_register_driver(&us3mc_driver, &of_bus_type); | ||
848 | if (ret) | ||
849 | unregister_dimm_printer(us3mc_dimm_printer); | ||
850 | } | ||
851 | return ret; | ||
852 | } | ||
853 | |||
854 | static void __exit us3mc_cleanup(void) | ||
855 | { | ||
856 | if (us3mc_platform()) { | ||
857 | unregister_dimm_printer(us3mc_dimm_printer); | ||
858 | of_unregister_driver(&us3mc_driver); | ||
859 | } | ||
860 | } | ||
861 | |||
862 | module_init(us3mc_init); | ||
863 | module_exit(us3mc_cleanup); | ||
diff --git a/arch/sparc/kernel/compat_audit.c b/arch/sparc/kernel/compat_audit.c new file mode 100644 index 000000000000..d865575b25bf --- /dev/null +++ b/arch/sparc/kernel/compat_audit.c | |||
@@ -0,0 +1,43 @@ | |||
1 | #define __32bit_syscall_numbers__ | ||
2 | #include <asm/unistd.h> | ||
3 | |||
4 | unsigned sparc32_dir_class[] = { | ||
5 | #include <asm-generic/audit_dir_write.h> | ||
6 | ~0U | ||
7 | }; | ||
8 | |||
9 | unsigned sparc32_chattr_class[] = { | ||
10 | #include <asm-generic/audit_change_attr.h> | ||
11 | ~0U | ||
12 | }; | ||
13 | |||
14 | unsigned sparc32_write_class[] = { | ||
15 | #include <asm-generic/audit_write.h> | ||
16 | ~0U | ||
17 | }; | ||
18 | |||
19 | unsigned sparc32_read_class[] = { | ||
20 | #include <asm-generic/audit_read.h> | ||
21 | ~0U | ||
22 | }; | ||
23 | |||
24 | unsigned sparc32_signal_class[] = { | ||
25 | #include <asm-generic/audit_signal.h> | ||
26 | ~0U | ||
27 | }; | ||
28 | |||
29 | int sparc32_classify_syscall(unsigned syscall) | ||
30 | { | ||
31 | switch(syscall) { | ||
32 | case __NR_open: | ||
33 | return 2; | ||
34 | case __NR_openat: | ||
35 | return 3; | ||
36 | case __NR_socketcall: | ||
37 | return 4; | ||
38 | case __NR_execve: | ||
39 | return 5; | ||
40 | default: | ||
41 | return 1; | ||
42 | } | ||
43 | } | ||
diff --git a/arch/sparc/kernel/cpu_64.c b/arch/sparc/kernel/cpu_64.c new file mode 100644 index 000000000000..0c9ac83ed0a8 --- /dev/null +++ b/arch/sparc/kernel/cpu_64.c | |||
@@ -0,0 +1,166 @@ | |||
1 | /* cpu.c: Dinky routines to look for the kind of Sparc cpu | ||
2 | * we are on. | ||
3 | * | ||
4 | * Copyright (C) 1996, 2007, 2008 David S. Miller (davem@davemloft.net) | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/smp.h> | ||
11 | #include <asm/asi.h> | ||
12 | #include <asm/system.h> | ||
13 | #include <asm/fpumacro.h> | ||
14 | #include <asm/cpudata.h> | ||
15 | #include <asm/spitfire.h> | ||
16 | #include <asm/oplib.h> | ||
17 | |||
18 | #include "entry.h" | ||
19 | |||
20 | DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 }; | ||
21 | |||
22 | struct cpu_chip_info { | ||
23 | unsigned short manuf; | ||
24 | unsigned short impl; | ||
25 | const char *cpu_name; | ||
26 | const char *fp_name; | ||
27 | }; | ||
28 | |||
29 | static const struct cpu_chip_info cpu_chips[] = { | ||
30 | { | ||
31 | .manuf = 0x17, | ||
32 | .impl = 0x10, | ||
33 | .cpu_name = "TI UltraSparc I (SpitFire)", | ||
34 | .fp_name = "UltraSparc I integrated FPU", | ||
35 | }, | ||
36 | { | ||
37 | .manuf = 0x22, | ||
38 | .impl = 0x10, | ||
39 | .cpu_name = "TI UltraSparc I (SpitFire)", | ||
40 | .fp_name = "UltraSparc I integrated FPU", | ||
41 | }, | ||
42 | { | ||
43 | .manuf = 0x17, | ||
44 | .impl = 0x11, | ||
45 | .cpu_name = "TI UltraSparc II (BlackBird)", | ||
46 | .fp_name = "UltraSparc II integrated FPU", | ||
47 | }, | ||
48 | { | ||
49 | .manuf = 0x17, | ||
50 | .impl = 0x12, | ||
51 | .cpu_name = "TI UltraSparc IIi (Sabre)", | ||
52 | .fp_name = "UltraSparc IIi integrated FPU", | ||
53 | }, | ||
54 | { | ||
55 | .manuf = 0x17, | ||
56 | .impl = 0x13, | ||
57 | .cpu_name = "TI UltraSparc IIe (Hummingbird)", | ||
58 | .fp_name = "UltraSparc IIe integrated FPU", | ||
59 | }, | ||
60 | { | ||
61 | .manuf = 0x3e, | ||
62 | .impl = 0x14, | ||
63 | .cpu_name = "TI UltraSparc III (Cheetah)", | ||
64 | .fp_name = "UltraSparc III integrated FPU", | ||
65 | }, | ||
66 | { | ||
67 | .manuf = 0x3e, | ||
68 | .impl = 0x15, | ||
69 | .cpu_name = "TI UltraSparc III+ (Cheetah+)", | ||
70 | .fp_name = "UltraSparc III+ integrated FPU", | ||
71 | }, | ||
72 | { | ||
73 | .manuf = 0x3e, | ||
74 | .impl = 0x16, | ||
75 | .cpu_name = "TI UltraSparc IIIi (Jalapeno)", | ||
76 | .fp_name = "UltraSparc IIIi integrated FPU", | ||
77 | }, | ||
78 | { | ||
79 | .manuf = 0x3e, | ||
80 | .impl = 0x18, | ||
81 | .cpu_name = "TI UltraSparc IV (Jaguar)", | ||
82 | .fp_name = "UltraSparc IV integrated FPU", | ||
83 | }, | ||
84 | { | ||
85 | .manuf = 0x3e, | ||
86 | .impl = 0x19, | ||
87 | .cpu_name = "TI UltraSparc IV+ (Panther)", | ||
88 | .fp_name = "UltraSparc IV+ integrated FPU", | ||
89 | }, | ||
90 | { | ||
91 | .manuf = 0x3e, | ||
92 | .impl = 0x22, | ||
93 | .cpu_name = "TI UltraSparc IIIi+ (Serrano)", | ||
94 | .fp_name = "UltraSparc IIIi+ integrated FPU", | ||
95 | }, | ||
96 | }; | ||
97 | |||
98 | #define NSPARCCHIPS ARRAY_SIZE(linux_sparc_chips) | ||
99 | |||
100 | const char *sparc_cpu_type; | ||
101 | const char *sparc_fpu_type; | ||
102 | |||
103 | static void __init sun4v_cpu_probe(void) | ||
104 | { | ||
105 | switch (sun4v_chip_type) { | ||
106 | case SUN4V_CHIP_NIAGARA1: | ||
107 | sparc_cpu_type = "UltraSparc T1 (Niagara)"; | ||
108 | sparc_fpu_type = "UltraSparc T1 integrated FPU"; | ||
109 | break; | ||
110 | |||
111 | case SUN4V_CHIP_NIAGARA2: | ||
112 | sparc_cpu_type = "UltraSparc T2 (Niagara2)"; | ||
113 | sparc_fpu_type = "UltraSparc T2 integrated FPU"; | ||
114 | break; | ||
115 | |||
116 | default: | ||
117 | printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n", | ||
118 | prom_cpu_compatible); | ||
119 | sparc_cpu_type = "Unknown SUN4V CPU"; | ||
120 | sparc_fpu_type = "Unknown SUN4V FPU"; | ||
121 | break; | ||
122 | } | ||
123 | } | ||
124 | |||
125 | static const struct cpu_chip_info * __init find_cpu_chip(unsigned short manuf, | ||
126 | unsigned short impl) | ||
127 | { | ||
128 | int i; | ||
129 | |||
130 | for (i = 0; i < ARRAY_SIZE(cpu_chips); i++) { | ||
131 | const struct cpu_chip_info *p = &cpu_chips[i]; | ||
132 | |||
133 | if (p->manuf == manuf && p->impl == impl) | ||
134 | return p; | ||
135 | } | ||
136 | return NULL; | ||
137 | } | ||
138 | |||
139 | static int __init cpu_type_probe(void) | ||
140 | { | ||
141 | if (tlb_type == hypervisor) { | ||
142 | sun4v_cpu_probe(); | ||
143 | } else { | ||
144 | unsigned long ver, manuf, impl; | ||
145 | const struct cpu_chip_info *p; | ||
146 | |||
147 | __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver)); | ||
148 | |||
149 | manuf = ((ver >> 48) & 0xffff); | ||
150 | impl = ((ver >> 32) & 0xffff); | ||
151 | |||
152 | p = find_cpu_chip(manuf, impl); | ||
153 | if (p) { | ||
154 | sparc_cpu_type = p->cpu_name; | ||
155 | sparc_fpu_type = p->fp_name; | ||
156 | } else { | ||
157 | printk(KERN_ERR "CPU: Unknown chip, manuf[%lx] impl[%lx]\n", | ||
158 | manuf, impl); | ||
159 | sparc_cpu_type = "Unknown CPU"; | ||
160 | sparc_fpu_type = "Unknown FPU"; | ||
161 | } | ||
162 | } | ||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | arch_initcall(cpu_type_probe); | ||
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c new file mode 100644 index 000000000000..f52e0534d91d --- /dev/null +++ b/arch/sparc/kernel/ds.c | |||
@@ -0,0 +1,1244 @@ | |||
1 | /* ds.c: Domain Services driver for Logical Domains | ||
2 | * | ||
3 | * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/types.h> | ||
9 | #include <linux/string.h> | ||
10 | #include <linux/slab.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/mutex.h> | ||
14 | #include <linux/kthread.h> | ||
15 | #include <linux/reboot.h> | ||
16 | #include <linux/cpu.h> | ||
17 | |||
18 | #include <asm/ldc.h> | ||
19 | #include <asm/vio.h> | ||
20 | #include <asm/mdesc.h> | ||
21 | #include <asm/head.h> | ||
22 | #include <asm/irq.h> | ||
23 | |||
24 | #define DRV_MODULE_NAME "ds" | ||
25 | #define PFX DRV_MODULE_NAME ": " | ||
26 | #define DRV_MODULE_VERSION "1.0" | ||
27 | #define DRV_MODULE_RELDATE "Jul 11, 2007" | ||
28 | |||
29 | static char version[] __devinitdata = | ||
30 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | ||
31 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | ||
32 | MODULE_DESCRIPTION("Sun LDOM domain services driver"); | ||
33 | MODULE_LICENSE("GPL"); | ||
34 | MODULE_VERSION(DRV_MODULE_VERSION); | ||
35 | |||
36 | struct ds_msg_tag { | ||
37 | __u32 type; | ||
38 | #define DS_INIT_REQ 0x00 | ||
39 | #define DS_INIT_ACK 0x01 | ||
40 | #define DS_INIT_NACK 0x02 | ||
41 | #define DS_REG_REQ 0x03 | ||
42 | #define DS_REG_ACK 0x04 | ||
43 | #define DS_REG_NACK 0x05 | ||
44 | #define DS_UNREG_REQ 0x06 | ||
45 | #define DS_UNREG_ACK 0x07 | ||
46 | #define DS_UNREG_NACK 0x08 | ||
47 | #define DS_DATA 0x09 | ||
48 | #define DS_NACK 0x0a | ||
49 | |||
50 | __u32 len; | ||
51 | }; | ||
52 | |||
53 | /* Result codes */ | ||
54 | #define DS_OK 0x00 | ||
55 | #define DS_REG_VER_NACK 0x01 | ||
56 | #define DS_REG_DUP 0x02 | ||
57 | #define DS_INV_HDL 0x03 | ||
58 | #define DS_TYPE_UNKNOWN 0x04 | ||
59 | |||
60 | struct ds_version { | ||
61 | __u16 major; | ||
62 | __u16 minor; | ||
63 | }; | ||
64 | |||
65 | struct ds_ver_req { | ||
66 | struct ds_msg_tag tag; | ||
67 | struct ds_version ver; | ||
68 | }; | ||
69 | |||
70 | struct ds_ver_ack { | ||
71 | struct ds_msg_tag tag; | ||
72 | __u16 minor; | ||
73 | }; | ||
74 | |||
75 | struct ds_ver_nack { | ||
76 | struct ds_msg_tag tag; | ||
77 | __u16 major; | ||
78 | }; | ||
79 | |||
80 | struct ds_reg_req { | ||
81 | struct ds_msg_tag tag; | ||
82 | __u64 handle; | ||
83 | __u16 major; | ||
84 | __u16 minor; | ||
85 | char svc_id[0]; | ||
86 | }; | ||
87 | |||
88 | struct ds_reg_ack { | ||
89 | struct ds_msg_tag tag; | ||
90 | __u64 handle; | ||
91 | __u16 minor; | ||
92 | }; | ||
93 | |||
94 | struct ds_reg_nack { | ||
95 | struct ds_msg_tag tag; | ||
96 | __u64 handle; | ||
97 | __u16 major; | ||
98 | }; | ||
99 | |||
100 | struct ds_unreg_req { | ||
101 | struct ds_msg_tag tag; | ||
102 | __u64 handle; | ||
103 | }; | ||
104 | |||
105 | struct ds_unreg_ack { | ||
106 | struct ds_msg_tag tag; | ||
107 | __u64 handle; | ||
108 | }; | ||
109 | |||
110 | struct ds_unreg_nack { | ||
111 | struct ds_msg_tag tag; | ||
112 | __u64 handle; | ||
113 | }; | ||
114 | |||
115 | struct ds_data { | ||
116 | struct ds_msg_tag tag; | ||
117 | __u64 handle; | ||
118 | }; | ||
119 | |||
120 | struct ds_data_nack { | ||
121 | struct ds_msg_tag tag; | ||
122 | __u64 handle; | ||
123 | __u64 result; | ||
124 | }; | ||
125 | |||
126 | struct ds_info; | ||
127 | struct ds_cap_state { | ||
128 | __u64 handle; | ||
129 | |||
130 | void (*data)(struct ds_info *dp, | ||
131 | struct ds_cap_state *cp, | ||
132 | void *buf, int len); | ||
133 | |||
134 | const char *service_id; | ||
135 | |||
136 | u8 state; | ||
137 | #define CAP_STATE_UNKNOWN 0x00 | ||
138 | #define CAP_STATE_REG_SENT 0x01 | ||
139 | #define CAP_STATE_REGISTERED 0x02 | ||
140 | }; | ||
141 | |||
142 | static void md_update_data(struct ds_info *dp, struct ds_cap_state *cp, | ||
143 | void *buf, int len); | ||
144 | static void domain_shutdown_data(struct ds_info *dp, | ||
145 | struct ds_cap_state *cp, | ||
146 | void *buf, int len); | ||
147 | static void domain_panic_data(struct ds_info *dp, | ||
148 | struct ds_cap_state *cp, | ||
149 | void *buf, int len); | ||
150 | #ifdef CONFIG_HOTPLUG_CPU | ||
151 | static void dr_cpu_data(struct ds_info *dp, | ||
152 | struct ds_cap_state *cp, | ||
153 | void *buf, int len); | ||
154 | #endif | ||
155 | static void ds_pri_data(struct ds_info *dp, | ||
156 | struct ds_cap_state *cp, | ||
157 | void *buf, int len); | ||
158 | static void ds_var_data(struct ds_info *dp, | ||
159 | struct ds_cap_state *cp, | ||
160 | void *buf, int len); | ||
161 | |||
162 | static struct ds_cap_state ds_states_template[] = { | ||
163 | { | ||
164 | .service_id = "md-update", | ||
165 | .data = md_update_data, | ||
166 | }, | ||
167 | { | ||
168 | .service_id = "domain-shutdown", | ||
169 | .data = domain_shutdown_data, | ||
170 | }, | ||
171 | { | ||
172 | .service_id = "domain-panic", | ||
173 | .data = domain_panic_data, | ||
174 | }, | ||
175 | #ifdef CONFIG_HOTPLUG_CPU | ||
176 | { | ||
177 | .service_id = "dr-cpu", | ||
178 | .data = dr_cpu_data, | ||
179 | }, | ||
180 | #endif | ||
181 | { | ||
182 | .service_id = "pri", | ||
183 | .data = ds_pri_data, | ||
184 | }, | ||
185 | { | ||
186 | .service_id = "var-config", | ||
187 | .data = ds_var_data, | ||
188 | }, | ||
189 | { | ||
190 | .service_id = "var-config-backup", | ||
191 | .data = ds_var_data, | ||
192 | }, | ||
193 | }; | ||
194 | |||
195 | static DEFINE_SPINLOCK(ds_lock); | ||
196 | |||
197 | struct ds_info { | ||
198 | struct ldc_channel *lp; | ||
199 | u8 hs_state; | ||
200 | #define DS_HS_START 0x01 | ||
201 | #define DS_HS_DONE 0x02 | ||
202 | |||
203 | u64 id; | ||
204 | |||
205 | void *rcv_buf; | ||
206 | int rcv_buf_len; | ||
207 | |||
208 | struct ds_cap_state *ds_states; | ||
209 | int num_ds_states; | ||
210 | |||
211 | struct ds_info *next; | ||
212 | }; | ||
213 | |||
214 | static struct ds_info *ds_info_list; | ||
215 | |||
216 | static struct ds_cap_state *find_cap(struct ds_info *dp, u64 handle) | ||
217 | { | ||
218 | unsigned int index = handle >> 32; | ||
219 | |||
220 | if (index >= dp->num_ds_states) | ||
221 | return NULL; | ||
222 | return &dp->ds_states[index]; | ||
223 | } | ||
224 | |||
225 | static struct ds_cap_state *find_cap_by_string(struct ds_info *dp, | ||
226 | const char *name) | ||
227 | { | ||
228 | int i; | ||
229 | |||
230 | for (i = 0; i < dp->num_ds_states; i++) { | ||
231 | if (strcmp(dp->ds_states[i].service_id, name)) | ||
232 | continue; | ||
233 | |||
234 | return &dp->ds_states[i]; | ||
235 | } | ||
236 | return NULL; | ||
237 | } | ||
238 | |||
239 | static int __ds_send(struct ldc_channel *lp, void *data, int len) | ||
240 | { | ||
241 | int err, limit = 1000; | ||
242 | |||
243 | err = -EINVAL; | ||
244 | while (limit-- > 0) { | ||
245 | err = ldc_write(lp, data, len); | ||
246 | if (!err || (err != -EAGAIN)) | ||
247 | break; | ||
248 | udelay(1); | ||
249 | } | ||
250 | |||
251 | return err; | ||
252 | } | ||
253 | |||
254 | static int ds_send(struct ldc_channel *lp, void *data, int len) | ||
255 | { | ||
256 | unsigned long flags; | ||
257 | int err; | ||
258 | |||
259 | spin_lock_irqsave(&ds_lock, flags); | ||
260 | err = __ds_send(lp, data, len); | ||
261 | spin_unlock_irqrestore(&ds_lock, flags); | ||
262 | |||
263 | return err; | ||
264 | } | ||
265 | |||
266 | struct ds_md_update_req { | ||
267 | __u64 req_num; | ||
268 | }; | ||
269 | |||
270 | struct ds_md_update_res { | ||
271 | __u64 req_num; | ||
272 | __u32 result; | ||
273 | }; | ||
274 | |||
275 | static void md_update_data(struct ds_info *dp, | ||
276 | struct ds_cap_state *cp, | ||
277 | void *buf, int len) | ||
278 | { | ||
279 | struct ldc_channel *lp = dp->lp; | ||
280 | struct ds_data *dpkt = buf; | ||
281 | struct ds_md_update_req *rp; | ||
282 | struct { | ||
283 | struct ds_data data; | ||
284 | struct ds_md_update_res res; | ||
285 | } pkt; | ||
286 | |||
287 | rp = (struct ds_md_update_req *) (dpkt + 1); | ||
288 | |||
289 | printk(KERN_INFO "ds-%lu: Machine description update.\n", dp->id); | ||
290 | |||
291 | mdesc_update(); | ||
292 | |||
293 | memset(&pkt, 0, sizeof(pkt)); | ||
294 | pkt.data.tag.type = DS_DATA; | ||
295 | pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); | ||
296 | pkt.data.handle = cp->handle; | ||
297 | pkt.res.req_num = rp->req_num; | ||
298 | pkt.res.result = DS_OK; | ||
299 | |||
300 | ds_send(lp, &pkt, sizeof(pkt)); | ||
301 | } | ||
302 | |||
303 | struct ds_shutdown_req { | ||
304 | __u64 req_num; | ||
305 | __u32 ms_delay; | ||
306 | }; | ||
307 | |||
308 | struct ds_shutdown_res { | ||
309 | __u64 req_num; | ||
310 | __u32 result; | ||
311 | char reason[1]; | ||
312 | }; | ||
313 | |||
314 | static void domain_shutdown_data(struct ds_info *dp, | ||
315 | struct ds_cap_state *cp, | ||
316 | void *buf, int len) | ||
317 | { | ||
318 | struct ldc_channel *lp = dp->lp; | ||
319 | struct ds_data *dpkt = buf; | ||
320 | struct ds_shutdown_req *rp; | ||
321 | struct { | ||
322 | struct ds_data data; | ||
323 | struct ds_shutdown_res res; | ||
324 | } pkt; | ||
325 | |||
326 | rp = (struct ds_shutdown_req *) (dpkt + 1); | ||
327 | |||
328 | printk(KERN_ALERT "ds-%lu: Shutdown request from " | ||
329 | "LDOM manager received.\n", dp->id); | ||
330 | |||
331 | memset(&pkt, 0, sizeof(pkt)); | ||
332 | pkt.data.tag.type = DS_DATA; | ||
333 | pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); | ||
334 | pkt.data.handle = cp->handle; | ||
335 | pkt.res.req_num = rp->req_num; | ||
336 | pkt.res.result = DS_OK; | ||
337 | pkt.res.reason[0] = 0; | ||
338 | |||
339 | ds_send(lp, &pkt, sizeof(pkt)); | ||
340 | |||
341 | orderly_poweroff(true); | ||
342 | } | ||
343 | |||
344 | struct ds_panic_req { | ||
345 | __u64 req_num; | ||
346 | }; | ||
347 | |||
348 | struct ds_panic_res { | ||
349 | __u64 req_num; | ||
350 | __u32 result; | ||
351 | char reason[1]; | ||
352 | }; | ||
353 | |||
354 | static void domain_panic_data(struct ds_info *dp, | ||
355 | struct ds_cap_state *cp, | ||
356 | void *buf, int len) | ||
357 | { | ||
358 | struct ldc_channel *lp = dp->lp; | ||
359 | struct ds_data *dpkt = buf; | ||
360 | struct ds_panic_req *rp; | ||
361 | struct { | ||
362 | struct ds_data data; | ||
363 | struct ds_panic_res res; | ||
364 | } pkt; | ||
365 | |||
366 | rp = (struct ds_panic_req *) (dpkt + 1); | ||
367 | |||
368 | printk(KERN_ALERT "ds-%lu: Panic request from " | ||
369 | "LDOM manager received.\n", dp->id); | ||
370 | |||
371 | memset(&pkt, 0, sizeof(pkt)); | ||
372 | pkt.data.tag.type = DS_DATA; | ||
373 | pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); | ||
374 | pkt.data.handle = cp->handle; | ||
375 | pkt.res.req_num = rp->req_num; | ||
376 | pkt.res.result = DS_OK; | ||
377 | pkt.res.reason[0] = 0; | ||
378 | |||
379 | ds_send(lp, &pkt, sizeof(pkt)); | ||
380 | |||
381 | panic("PANIC requested by LDOM manager."); | ||
382 | } | ||
383 | |||
384 | #ifdef CONFIG_HOTPLUG_CPU | ||
385 | struct dr_cpu_tag { | ||
386 | __u64 req_num; | ||
387 | __u32 type; | ||
388 | #define DR_CPU_CONFIGURE 0x43 | ||
389 | #define DR_CPU_UNCONFIGURE 0x55 | ||
390 | #define DR_CPU_FORCE_UNCONFIGURE 0x46 | ||
391 | #define DR_CPU_STATUS 0x53 | ||
392 | |||
393 | /* Responses */ | ||
394 | #define DR_CPU_OK 0x6f | ||
395 | #define DR_CPU_ERROR 0x65 | ||
396 | |||
397 | __u32 num_records; | ||
398 | }; | ||
399 | |||
400 | struct dr_cpu_resp_entry { | ||
401 | __u32 cpu; | ||
402 | __u32 result; | ||
403 | #define DR_CPU_RES_OK 0x00 | ||
404 | #define DR_CPU_RES_FAILURE 0x01 | ||
405 | #define DR_CPU_RES_BLOCKED 0x02 | ||
406 | #define DR_CPU_RES_CPU_NOT_RESPONDING 0x03 | ||
407 | #define DR_CPU_RES_NOT_IN_MD 0x04 | ||
408 | |||
409 | __u32 stat; | ||
410 | #define DR_CPU_STAT_NOT_PRESENT 0x00 | ||
411 | #define DR_CPU_STAT_UNCONFIGURED 0x01 | ||
412 | #define DR_CPU_STAT_CONFIGURED 0x02 | ||
413 | |||
414 | __u32 str_off; | ||
415 | }; | ||
416 | |||
417 | static void __dr_cpu_send_error(struct ds_info *dp, | ||
418 | struct ds_cap_state *cp, | ||
419 | struct ds_data *data) | ||
420 | { | ||
421 | struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1); | ||
422 | struct { | ||
423 | struct ds_data data; | ||
424 | struct dr_cpu_tag tag; | ||
425 | } pkt; | ||
426 | int msg_len; | ||
427 | |||
428 | memset(&pkt, 0, sizeof(pkt)); | ||
429 | pkt.data.tag.type = DS_DATA; | ||
430 | pkt.data.handle = cp->handle; | ||
431 | pkt.tag.req_num = tag->req_num; | ||
432 | pkt.tag.type = DR_CPU_ERROR; | ||
433 | pkt.tag.num_records = 0; | ||
434 | |||
435 | msg_len = (sizeof(struct ds_data) + | ||
436 | sizeof(struct dr_cpu_tag)); | ||
437 | |||
438 | pkt.data.tag.len = msg_len - sizeof(struct ds_msg_tag); | ||
439 | |||
440 | __ds_send(dp->lp, &pkt, msg_len); | ||
441 | } | ||
442 | |||
443 | static void dr_cpu_send_error(struct ds_info *dp, | ||
444 | struct ds_cap_state *cp, | ||
445 | struct ds_data *data) | ||
446 | { | ||
447 | unsigned long flags; | ||
448 | |||
449 | spin_lock_irqsave(&ds_lock, flags); | ||
450 | __dr_cpu_send_error(dp, cp, data); | ||
451 | spin_unlock_irqrestore(&ds_lock, flags); | ||
452 | } | ||
453 | |||
454 | #define CPU_SENTINEL 0xffffffff | ||
455 | |||
456 | static void purge_dups(u32 *list, u32 num_ents) | ||
457 | { | ||
458 | unsigned int i; | ||
459 | |||
460 | for (i = 0; i < num_ents; i++) { | ||
461 | u32 cpu = list[i]; | ||
462 | unsigned int j; | ||
463 | |||
464 | if (cpu == CPU_SENTINEL) | ||
465 | continue; | ||
466 | |||
467 | for (j = i + 1; j < num_ents; j++) { | ||
468 | if (list[j] == cpu) | ||
469 | list[j] = CPU_SENTINEL; | ||
470 | } | ||
471 | } | ||
472 | } | ||
473 | |||
474 | static int dr_cpu_size_response(int ncpus) | ||
475 | { | ||
476 | return (sizeof(struct ds_data) + | ||
477 | sizeof(struct dr_cpu_tag) + | ||
478 | (sizeof(struct dr_cpu_resp_entry) * ncpus)); | ||
479 | } | ||
480 | |||
481 | static void dr_cpu_init_response(struct ds_data *resp, u64 req_num, | ||
482 | u64 handle, int resp_len, int ncpus, | ||
483 | cpumask_t *mask, u32 default_stat) | ||
484 | { | ||
485 | struct dr_cpu_resp_entry *ent; | ||
486 | struct dr_cpu_tag *tag; | ||
487 | int i, cpu; | ||
488 | |||
489 | tag = (struct dr_cpu_tag *) (resp + 1); | ||
490 | ent = (struct dr_cpu_resp_entry *) (tag + 1); | ||
491 | |||
492 | resp->tag.type = DS_DATA; | ||
493 | resp->tag.len = resp_len - sizeof(struct ds_msg_tag); | ||
494 | resp->handle = handle; | ||
495 | tag->req_num = req_num; | ||
496 | tag->type = DR_CPU_OK; | ||
497 | tag->num_records = ncpus; | ||
498 | |||
499 | i = 0; | ||
500 | for_each_cpu_mask(cpu, *mask) { | ||
501 | ent[i].cpu = cpu; | ||
502 | ent[i].result = DR_CPU_RES_OK; | ||
503 | ent[i].stat = default_stat; | ||
504 | i++; | ||
505 | } | ||
506 | BUG_ON(i != ncpus); | ||
507 | } | ||
508 | |||
509 | static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus, | ||
510 | u32 res, u32 stat) | ||
511 | { | ||
512 | struct dr_cpu_resp_entry *ent; | ||
513 | struct dr_cpu_tag *tag; | ||
514 | int i; | ||
515 | |||
516 | tag = (struct dr_cpu_tag *) (resp + 1); | ||
517 | ent = (struct dr_cpu_resp_entry *) (tag + 1); | ||
518 | |||
519 | for (i = 0; i < ncpus; i++) { | ||
520 | if (ent[i].cpu != cpu) | ||
521 | continue; | ||
522 | ent[i].result = res; | ||
523 | ent[i].stat = stat; | ||
524 | break; | ||
525 | } | ||
526 | } | ||
527 | |||
528 | static int __cpuinit dr_cpu_configure(struct ds_info *dp, | ||
529 | struct ds_cap_state *cp, | ||
530 | u64 req_num, | ||
531 | cpumask_t *mask) | ||
532 | { | ||
533 | struct ds_data *resp; | ||
534 | int resp_len, ncpus, cpu; | ||
535 | unsigned long flags; | ||
536 | |||
537 | ncpus = cpus_weight(*mask); | ||
538 | resp_len = dr_cpu_size_response(ncpus); | ||
539 | resp = kzalloc(resp_len, GFP_KERNEL); | ||
540 | if (!resp) | ||
541 | return -ENOMEM; | ||
542 | |||
543 | dr_cpu_init_response(resp, req_num, cp->handle, | ||
544 | resp_len, ncpus, mask, | ||
545 | DR_CPU_STAT_CONFIGURED); | ||
546 | |||
547 | mdesc_fill_in_cpu_data(*mask); | ||
548 | |||
549 | for_each_cpu_mask(cpu, *mask) { | ||
550 | int err; | ||
551 | |||
552 | printk(KERN_INFO "ds-%lu: Starting cpu %d...\n", | ||
553 | dp->id, cpu); | ||
554 | err = cpu_up(cpu); | ||
555 | if (err) { | ||
556 | __u32 res = DR_CPU_RES_FAILURE; | ||
557 | __u32 stat = DR_CPU_STAT_UNCONFIGURED; | ||
558 | |||
559 | if (!cpu_present(cpu)) { | ||
560 | /* CPU not present in MD */ | ||
561 | res = DR_CPU_RES_NOT_IN_MD; | ||
562 | stat = DR_CPU_STAT_NOT_PRESENT; | ||
563 | } else if (err == -ENODEV) { | ||
564 | /* CPU did not call in successfully */ | ||
565 | res = DR_CPU_RES_CPU_NOT_RESPONDING; | ||
566 | } | ||
567 | |||
568 | printk(KERN_INFO "ds-%lu: CPU startup failed err=%d\n", | ||
569 | dp->id, err); | ||
570 | dr_cpu_mark(resp, cpu, ncpus, res, stat); | ||
571 | } | ||
572 | } | ||
573 | |||
574 | spin_lock_irqsave(&ds_lock, flags); | ||
575 | __ds_send(dp->lp, resp, resp_len); | ||
576 | spin_unlock_irqrestore(&ds_lock, flags); | ||
577 | |||
578 | kfree(resp); | ||
579 | |||
580 | /* Redistribute IRQs, taking into account the new cpus. */ | ||
581 | fixup_irqs(); | ||
582 | |||
583 | return 0; | ||
584 | } | ||
585 | |||
586 | static int dr_cpu_unconfigure(struct ds_info *dp, | ||
587 | struct ds_cap_state *cp, | ||
588 | u64 req_num, | ||
589 | cpumask_t *mask) | ||
590 | { | ||
591 | struct ds_data *resp; | ||
592 | int resp_len, ncpus, cpu; | ||
593 | unsigned long flags; | ||
594 | |||
595 | ncpus = cpus_weight(*mask); | ||
596 | resp_len = dr_cpu_size_response(ncpus); | ||
597 | resp = kzalloc(resp_len, GFP_KERNEL); | ||
598 | if (!resp) | ||
599 | return -ENOMEM; | ||
600 | |||
601 | dr_cpu_init_response(resp, req_num, cp->handle, | ||
602 | resp_len, ncpus, mask, | ||
603 | DR_CPU_STAT_UNCONFIGURED); | ||
604 | |||
605 | for_each_cpu_mask(cpu, *mask) { | ||
606 | int err; | ||
607 | |||
608 | printk(KERN_INFO "ds-%lu: Shutting down cpu %d...\n", | ||
609 | dp->id, cpu); | ||
610 | err = cpu_down(cpu); | ||
611 | if (err) | ||
612 | dr_cpu_mark(resp, cpu, ncpus, | ||
613 | DR_CPU_RES_FAILURE, | ||
614 | DR_CPU_STAT_CONFIGURED); | ||
615 | } | ||
616 | |||
617 | spin_lock_irqsave(&ds_lock, flags); | ||
618 | __ds_send(dp->lp, resp, resp_len); | ||
619 | spin_unlock_irqrestore(&ds_lock, flags); | ||
620 | |||
621 | kfree(resp); | ||
622 | |||
623 | return 0; | ||
624 | } | ||
625 | |||
626 | static void __cpuinit dr_cpu_data(struct ds_info *dp, | ||
627 | struct ds_cap_state *cp, | ||
628 | void *buf, int len) | ||
629 | { | ||
630 | struct ds_data *data = buf; | ||
631 | struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1); | ||
632 | u32 *cpu_list = (u32 *) (tag + 1); | ||
633 | u64 req_num = tag->req_num; | ||
634 | cpumask_t mask; | ||
635 | unsigned int i; | ||
636 | int err; | ||
637 | |||
638 | switch (tag->type) { | ||
639 | case DR_CPU_CONFIGURE: | ||
640 | case DR_CPU_UNCONFIGURE: | ||
641 | case DR_CPU_FORCE_UNCONFIGURE: | ||
642 | break; | ||
643 | |||
644 | default: | ||
645 | dr_cpu_send_error(dp, cp, data); | ||
646 | return; | ||
647 | } | ||
648 | |||
649 | purge_dups(cpu_list, tag->num_records); | ||
650 | |||
651 | cpus_clear(mask); | ||
652 | for (i = 0; i < tag->num_records; i++) { | ||
653 | if (cpu_list[i] == CPU_SENTINEL) | ||
654 | continue; | ||
655 | |||
656 | if (cpu_list[i] < NR_CPUS) | ||
657 | cpu_set(cpu_list[i], mask); | ||
658 | } | ||
659 | |||
660 | if (tag->type == DR_CPU_CONFIGURE) | ||
661 | err = dr_cpu_configure(dp, cp, req_num, &mask); | ||
662 | else | ||
663 | err = dr_cpu_unconfigure(dp, cp, req_num, &mask); | ||
664 | |||
665 | if (err) | ||
666 | dr_cpu_send_error(dp, cp, data); | ||
667 | } | ||
668 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
669 | |||
670 | struct ds_pri_msg { | ||
671 | __u64 req_num; | ||
672 | __u64 type; | ||
673 | #define DS_PRI_REQUEST 0x00 | ||
674 | #define DS_PRI_DATA 0x01 | ||
675 | #define DS_PRI_UPDATE 0x02 | ||
676 | }; | ||
677 | |||
678 | static void ds_pri_data(struct ds_info *dp, | ||
679 | struct ds_cap_state *cp, | ||
680 | void *buf, int len) | ||
681 | { | ||
682 | struct ds_data *dpkt = buf; | ||
683 | struct ds_pri_msg *rp; | ||
684 | |||
685 | rp = (struct ds_pri_msg *) (dpkt + 1); | ||
686 | |||
687 | printk(KERN_INFO "ds-%lu: PRI REQ [%lx:%lx], len=%d\n", | ||
688 | dp->id, rp->req_num, rp->type, len); | ||
689 | } | ||
690 | |||
691 | struct ds_var_hdr { | ||
692 | __u32 type; | ||
693 | #define DS_VAR_SET_REQ 0x00 | ||
694 | #define DS_VAR_DELETE_REQ 0x01 | ||
695 | #define DS_VAR_SET_RESP 0x02 | ||
696 | #define DS_VAR_DELETE_RESP 0x03 | ||
697 | }; | ||
698 | |||
699 | struct ds_var_set_msg { | ||
700 | struct ds_var_hdr hdr; | ||
701 | char name_and_value[0]; | ||
702 | }; | ||
703 | |||
704 | struct ds_var_delete_msg { | ||
705 | struct ds_var_hdr hdr; | ||
706 | char name[0]; | ||
707 | }; | ||
708 | |||
709 | struct ds_var_resp { | ||
710 | struct ds_var_hdr hdr; | ||
711 | __u32 result; | ||
712 | #define DS_VAR_SUCCESS 0x00 | ||
713 | #define DS_VAR_NO_SPACE 0x01 | ||
714 | #define DS_VAR_INVALID_VAR 0x02 | ||
715 | #define DS_VAR_INVALID_VAL 0x03 | ||
716 | #define DS_VAR_NOT_PRESENT 0x04 | ||
717 | }; | ||
718 | |||
719 | static DEFINE_MUTEX(ds_var_mutex); | ||
720 | static int ds_var_doorbell; | ||
721 | static int ds_var_response; | ||
722 | |||
723 | static void ds_var_data(struct ds_info *dp, | ||
724 | struct ds_cap_state *cp, | ||
725 | void *buf, int len) | ||
726 | { | ||
727 | struct ds_data *dpkt = buf; | ||
728 | struct ds_var_resp *rp; | ||
729 | |||
730 | rp = (struct ds_var_resp *) (dpkt + 1); | ||
731 | |||
732 | if (rp->hdr.type != DS_VAR_SET_RESP && | ||
733 | rp->hdr.type != DS_VAR_DELETE_RESP) | ||
734 | return; | ||
735 | |||
736 | ds_var_response = rp->result; | ||
737 | wmb(); | ||
738 | ds_var_doorbell = 1; | ||
739 | } | ||
740 | |||
741 | void ldom_set_var(const char *var, const char *value) | ||
742 | { | ||
743 | struct ds_cap_state *cp; | ||
744 | struct ds_info *dp; | ||
745 | unsigned long flags; | ||
746 | |||
747 | spin_lock_irqsave(&ds_lock, flags); | ||
748 | cp = NULL; | ||
749 | for (dp = ds_info_list; dp; dp = dp->next) { | ||
750 | struct ds_cap_state *tmp; | ||
751 | |||
752 | tmp = find_cap_by_string(dp, "var-config"); | ||
753 | if (tmp && tmp->state == CAP_STATE_REGISTERED) { | ||
754 | cp = tmp; | ||
755 | break; | ||
756 | } | ||
757 | } | ||
758 | if (!cp) { | ||
759 | for (dp = ds_info_list; dp; dp = dp->next) { | ||
760 | struct ds_cap_state *tmp; | ||
761 | |||
762 | tmp = find_cap_by_string(dp, "var-config-backup"); | ||
763 | if (tmp && tmp->state == CAP_STATE_REGISTERED) { | ||
764 | cp = tmp; | ||
765 | break; | ||
766 | } | ||
767 | } | ||
768 | } | ||
769 | spin_unlock_irqrestore(&ds_lock, flags); | ||
770 | |||
771 | if (cp) { | ||
772 | union { | ||
773 | struct { | ||
774 | struct ds_data data; | ||
775 | struct ds_var_set_msg msg; | ||
776 | } header; | ||
777 | char all[512]; | ||
778 | } pkt; | ||
779 | char *base, *p; | ||
780 | int msg_len, loops; | ||
781 | |||
782 | memset(&pkt, 0, sizeof(pkt)); | ||
783 | pkt.header.data.tag.type = DS_DATA; | ||
784 | pkt.header.data.handle = cp->handle; | ||
785 | pkt.header.msg.hdr.type = DS_VAR_SET_REQ; | ||
786 | base = p = &pkt.header.msg.name_and_value[0]; | ||
787 | strcpy(p, var); | ||
788 | p += strlen(var) + 1; | ||
789 | strcpy(p, value); | ||
790 | p += strlen(value) + 1; | ||
791 | |||
792 | msg_len = (sizeof(struct ds_data) + | ||
793 | sizeof(struct ds_var_set_msg) + | ||
794 | (p - base)); | ||
795 | msg_len = (msg_len + 3) & ~3; | ||
796 | pkt.header.data.tag.len = msg_len - sizeof(struct ds_msg_tag); | ||
797 | |||
798 | mutex_lock(&ds_var_mutex); | ||
799 | |||
800 | spin_lock_irqsave(&ds_lock, flags); | ||
801 | ds_var_doorbell = 0; | ||
802 | ds_var_response = -1; | ||
803 | |||
804 | __ds_send(dp->lp, &pkt, msg_len); | ||
805 | spin_unlock_irqrestore(&ds_lock, flags); | ||
806 | |||
807 | loops = 1000; | ||
808 | while (ds_var_doorbell == 0) { | ||
809 | if (loops-- < 0) | ||
810 | break; | ||
811 | barrier(); | ||
812 | udelay(100); | ||
813 | } | ||
814 | |||
815 | mutex_unlock(&ds_var_mutex); | ||
816 | |||
817 | if (ds_var_doorbell == 0 || | ||
818 | ds_var_response != DS_VAR_SUCCESS) | ||
819 | printk(KERN_ERR "ds-%lu: var-config [%s:%s] " | ||
820 | "failed, response(%d).\n", | ||
821 | dp->id, var, value, | ||
822 | ds_var_response); | ||
823 | } else { | ||
824 | printk(KERN_ERR PFX "var-config not registered so " | ||
825 | "could not set (%s) variable to (%s).\n", | ||
826 | var, value); | ||
827 | } | ||
828 | } | ||
829 | |||
830 | void ldom_reboot(const char *boot_command) | ||
831 | { | ||
832 | /* Don't bother with any of this if the boot_command | ||
833 | * is empty. | ||
834 | */ | ||
835 | if (boot_command && strlen(boot_command)) { | ||
836 | char full_boot_str[256]; | ||
837 | |||
838 | strcpy(full_boot_str, "boot "); | ||
839 | strcpy(full_boot_str + strlen("boot "), boot_command); | ||
840 | |||
841 | ldom_set_var("reboot-command", full_boot_str); | ||
842 | } | ||
843 | sun4v_mach_sir(); | ||
844 | } | ||
845 | |||
846 | void ldom_power_off(void) | ||
847 | { | ||
848 | sun4v_mach_exit(0); | ||
849 | } | ||
850 | |||
851 | static void ds_conn_reset(struct ds_info *dp) | ||
852 | { | ||
853 | printk(KERN_ERR "ds-%lu: ds_conn_reset() from %p\n", | ||
854 | dp->id, __builtin_return_address(0)); | ||
855 | } | ||
856 | |||
857 | static int register_services(struct ds_info *dp) | ||
858 | { | ||
859 | struct ldc_channel *lp = dp->lp; | ||
860 | int i; | ||
861 | |||
862 | for (i = 0; i < dp->num_ds_states; i++) { | ||
863 | struct { | ||
864 | struct ds_reg_req req; | ||
865 | u8 id_buf[256]; | ||
866 | } pbuf; | ||
867 | struct ds_cap_state *cp = &dp->ds_states[i]; | ||
868 | int err, msg_len; | ||
869 | u64 new_count; | ||
870 | |||
871 | if (cp->state == CAP_STATE_REGISTERED) | ||
872 | continue; | ||
873 | |||
874 | new_count = sched_clock() & 0xffffffff; | ||
875 | cp->handle = ((u64) i << 32) | new_count; | ||
876 | |||
877 | msg_len = (sizeof(struct ds_reg_req) + | ||
878 | strlen(cp->service_id)); | ||
879 | |||
880 | memset(&pbuf, 0, sizeof(pbuf)); | ||
881 | pbuf.req.tag.type = DS_REG_REQ; | ||
882 | pbuf.req.tag.len = (msg_len - sizeof(struct ds_msg_tag)); | ||
883 | pbuf.req.handle = cp->handle; | ||
884 | pbuf.req.major = 1; | ||
885 | pbuf.req.minor = 0; | ||
886 | strcpy(pbuf.req.svc_id, cp->service_id); | ||
887 | |||
888 | err = __ds_send(lp, &pbuf, msg_len); | ||
889 | if (err > 0) | ||
890 | cp->state = CAP_STATE_REG_SENT; | ||
891 | } | ||
892 | return 0; | ||
893 | } | ||
894 | |||
895 | static int ds_handshake(struct ds_info *dp, struct ds_msg_tag *pkt) | ||
896 | { | ||
897 | |||
898 | if (dp->hs_state == DS_HS_START) { | ||
899 | if (pkt->type != DS_INIT_ACK) | ||
900 | goto conn_reset; | ||
901 | |||
902 | dp->hs_state = DS_HS_DONE; | ||
903 | |||
904 | return register_services(dp); | ||
905 | } | ||
906 | |||
907 | if (dp->hs_state != DS_HS_DONE) | ||
908 | goto conn_reset; | ||
909 | |||
910 | if (pkt->type == DS_REG_ACK) { | ||
911 | struct ds_reg_ack *ap = (struct ds_reg_ack *) pkt; | ||
912 | struct ds_cap_state *cp = find_cap(dp, ap->handle); | ||
913 | |||
914 | if (!cp) { | ||
915 | printk(KERN_ERR "ds-%lu: REG ACK for unknown " | ||
916 | "handle %lx\n", dp->id, ap->handle); | ||
917 | return 0; | ||
918 | } | ||
919 | printk(KERN_INFO "ds-%lu: Registered %s service.\n", | ||
920 | dp->id, cp->service_id); | ||
921 | cp->state = CAP_STATE_REGISTERED; | ||
922 | } else if (pkt->type == DS_REG_NACK) { | ||
923 | struct ds_reg_nack *np = (struct ds_reg_nack *) pkt; | ||
924 | struct ds_cap_state *cp = find_cap(dp, np->handle); | ||
925 | |||
926 | if (!cp) { | ||
927 | printk(KERN_ERR "ds-%lu: REG NACK for " | ||
928 | "unknown handle %lx\n", | ||
929 | dp->id, np->handle); | ||
930 | return 0; | ||
931 | } | ||
932 | cp->state = CAP_STATE_UNKNOWN; | ||
933 | } | ||
934 | |||
935 | return 0; | ||
936 | |||
937 | conn_reset: | ||
938 | ds_conn_reset(dp); | ||
939 | return -ECONNRESET; | ||
940 | } | ||
941 | |||
942 | static void __send_ds_nack(struct ds_info *dp, u64 handle) | ||
943 | { | ||
944 | struct ds_data_nack nack = { | ||
945 | .tag = { | ||
946 | .type = DS_NACK, | ||
947 | .len = (sizeof(struct ds_data_nack) - | ||
948 | sizeof(struct ds_msg_tag)), | ||
949 | }, | ||
950 | .handle = handle, | ||
951 | .result = DS_INV_HDL, | ||
952 | }; | ||
953 | |||
954 | __ds_send(dp->lp, &nack, sizeof(nack)); | ||
955 | } | ||
956 | |||
957 | static LIST_HEAD(ds_work_list); | ||
958 | static DECLARE_WAIT_QUEUE_HEAD(ds_wait); | ||
959 | |||
960 | struct ds_queue_entry { | ||
961 | struct list_head list; | ||
962 | struct ds_info *dp; | ||
963 | int req_len; | ||
964 | int __pad; | ||
965 | u64 req[0]; | ||
966 | }; | ||
967 | |||
968 | static void process_ds_work(void) | ||
969 | { | ||
970 | struct ds_queue_entry *qp, *tmp; | ||
971 | unsigned long flags; | ||
972 | LIST_HEAD(todo); | ||
973 | |||
974 | spin_lock_irqsave(&ds_lock, flags); | ||
975 | list_splice_init(&ds_work_list, &todo); | ||
976 | spin_unlock_irqrestore(&ds_lock, flags); | ||
977 | |||
978 | list_for_each_entry_safe(qp, tmp, &todo, list) { | ||
979 | struct ds_data *dpkt = (struct ds_data *) qp->req; | ||
980 | struct ds_info *dp = qp->dp; | ||
981 | struct ds_cap_state *cp = find_cap(dp, dpkt->handle); | ||
982 | int req_len = qp->req_len; | ||
983 | |||
984 | if (!cp) { | ||
985 | printk(KERN_ERR "ds-%lu: Data for unknown " | ||
986 | "handle %lu\n", | ||
987 | dp->id, dpkt->handle); | ||
988 | |||
989 | spin_lock_irqsave(&ds_lock, flags); | ||
990 | __send_ds_nack(dp, dpkt->handle); | ||
991 | spin_unlock_irqrestore(&ds_lock, flags); | ||
992 | } else { | ||
993 | cp->data(dp, cp, dpkt, req_len); | ||
994 | } | ||
995 | |||
996 | list_del(&qp->list); | ||
997 | kfree(qp); | ||
998 | } | ||
999 | } | ||
1000 | |||
1001 | static int ds_thread(void *__unused) | ||
1002 | { | ||
1003 | DEFINE_WAIT(wait); | ||
1004 | |||
1005 | while (1) { | ||
1006 | prepare_to_wait(&ds_wait, &wait, TASK_INTERRUPTIBLE); | ||
1007 | if (list_empty(&ds_work_list)) | ||
1008 | schedule(); | ||
1009 | finish_wait(&ds_wait, &wait); | ||
1010 | |||
1011 | if (kthread_should_stop()) | ||
1012 | break; | ||
1013 | |||
1014 | process_ds_work(); | ||
1015 | } | ||
1016 | |||
1017 | return 0; | ||
1018 | } | ||
1019 | |||
1020 | static int ds_data(struct ds_info *dp, struct ds_msg_tag *pkt, int len) | ||
1021 | { | ||
1022 | struct ds_data *dpkt = (struct ds_data *) pkt; | ||
1023 | struct ds_queue_entry *qp; | ||
1024 | |||
1025 | qp = kmalloc(sizeof(struct ds_queue_entry) + len, GFP_ATOMIC); | ||
1026 | if (!qp) { | ||
1027 | __send_ds_nack(dp, dpkt->handle); | ||
1028 | } else { | ||
1029 | qp->dp = dp; | ||
1030 | memcpy(&qp->req, pkt, len); | ||
1031 | list_add_tail(&qp->list, &ds_work_list); | ||
1032 | wake_up(&ds_wait); | ||
1033 | } | ||
1034 | return 0; | ||
1035 | } | ||
1036 | |||
1037 | static void ds_up(struct ds_info *dp) | ||
1038 | { | ||
1039 | struct ldc_channel *lp = dp->lp; | ||
1040 | struct ds_ver_req req; | ||
1041 | int err; | ||
1042 | |||
1043 | req.tag.type = DS_INIT_REQ; | ||
1044 | req.tag.len = sizeof(req) - sizeof(struct ds_msg_tag); | ||
1045 | req.ver.major = 1; | ||
1046 | req.ver.minor = 0; | ||
1047 | |||
1048 | err = __ds_send(lp, &req, sizeof(req)); | ||
1049 | if (err > 0) | ||
1050 | dp->hs_state = DS_HS_START; | ||
1051 | } | ||
1052 | |||
1053 | static void ds_reset(struct ds_info *dp) | ||
1054 | { | ||
1055 | int i; | ||
1056 | |||
1057 | dp->hs_state = 0; | ||
1058 | |||
1059 | for (i = 0; i < dp->num_ds_states; i++) { | ||
1060 | struct ds_cap_state *cp = &dp->ds_states[i]; | ||
1061 | |||
1062 | cp->state = CAP_STATE_UNKNOWN; | ||
1063 | } | ||
1064 | } | ||
1065 | |||
1066 | static void ds_event(void *arg, int event) | ||
1067 | { | ||
1068 | struct ds_info *dp = arg; | ||
1069 | struct ldc_channel *lp = dp->lp; | ||
1070 | unsigned long flags; | ||
1071 | int err; | ||
1072 | |||
1073 | spin_lock_irqsave(&ds_lock, flags); | ||
1074 | |||
1075 | if (event == LDC_EVENT_UP) { | ||
1076 | ds_up(dp); | ||
1077 | spin_unlock_irqrestore(&ds_lock, flags); | ||
1078 | return; | ||
1079 | } | ||
1080 | |||
1081 | if (event == LDC_EVENT_RESET) { | ||
1082 | ds_reset(dp); | ||
1083 | spin_unlock_irqrestore(&ds_lock, flags); | ||
1084 | return; | ||
1085 | } | ||
1086 | |||
1087 | if (event != LDC_EVENT_DATA_READY) { | ||
1088 | printk(KERN_WARNING "ds-%lu: Unexpected LDC event %d\n", | ||
1089 | dp->id, event); | ||
1090 | spin_unlock_irqrestore(&ds_lock, flags); | ||
1091 | return; | ||
1092 | } | ||
1093 | |||
1094 | err = 0; | ||
1095 | while (1) { | ||
1096 | struct ds_msg_tag *tag; | ||
1097 | |||
1098 | err = ldc_read(lp, dp->rcv_buf, sizeof(*tag)); | ||
1099 | |||
1100 | if (unlikely(err < 0)) { | ||
1101 | if (err == -ECONNRESET) | ||
1102 | ds_conn_reset(dp); | ||
1103 | break; | ||
1104 | } | ||
1105 | if (err == 0) | ||
1106 | break; | ||
1107 | |||
1108 | tag = dp->rcv_buf; | ||
1109 | err = ldc_read(lp, tag + 1, tag->len); | ||
1110 | |||
1111 | if (unlikely(err < 0)) { | ||
1112 | if (err == -ECONNRESET) | ||
1113 | ds_conn_reset(dp); | ||
1114 | break; | ||
1115 | } | ||
1116 | if (err < tag->len) | ||
1117 | break; | ||
1118 | |||
1119 | if (tag->type < DS_DATA) | ||
1120 | err = ds_handshake(dp, dp->rcv_buf); | ||
1121 | else | ||
1122 | err = ds_data(dp, dp->rcv_buf, | ||
1123 | sizeof(*tag) + err); | ||
1124 | if (err == -ECONNRESET) | ||
1125 | break; | ||
1126 | } | ||
1127 | |||
1128 | spin_unlock_irqrestore(&ds_lock, flags); | ||
1129 | } | ||
1130 | |||
1131 | static int __devinit ds_probe(struct vio_dev *vdev, | ||
1132 | const struct vio_device_id *id) | ||
1133 | { | ||
1134 | static int ds_version_printed; | ||
1135 | struct ldc_channel_config ds_cfg = { | ||
1136 | .event = ds_event, | ||
1137 | .mtu = 4096, | ||
1138 | .mode = LDC_MODE_STREAM, | ||
1139 | }; | ||
1140 | struct mdesc_handle *hp; | ||
1141 | struct ldc_channel *lp; | ||
1142 | struct ds_info *dp; | ||
1143 | const u64 *val; | ||
1144 | int err, i; | ||
1145 | |||
1146 | if (ds_version_printed++ == 0) | ||
1147 | printk(KERN_INFO "%s", version); | ||
1148 | |||
1149 | dp = kzalloc(sizeof(*dp), GFP_KERNEL); | ||
1150 | err = -ENOMEM; | ||
1151 | if (!dp) | ||
1152 | goto out_err; | ||
1153 | |||
1154 | hp = mdesc_grab(); | ||
1155 | val = mdesc_get_property(hp, vdev->mp, "id", NULL); | ||
1156 | if (val) | ||
1157 | dp->id = *val; | ||
1158 | mdesc_release(hp); | ||
1159 | |||
1160 | dp->rcv_buf = kzalloc(4096, GFP_KERNEL); | ||
1161 | if (!dp->rcv_buf) | ||
1162 | goto out_free_dp; | ||
1163 | |||
1164 | dp->rcv_buf_len = 4096; | ||
1165 | |||
1166 | dp->ds_states = kzalloc(sizeof(ds_states_template), | ||
1167 | GFP_KERNEL); | ||
1168 | if (!dp->ds_states) | ||
1169 | goto out_free_rcv_buf; | ||
1170 | |||
1171 | memcpy(dp->ds_states, ds_states_template, | ||
1172 | sizeof(ds_states_template)); | ||
1173 | dp->num_ds_states = ARRAY_SIZE(ds_states_template); | ||
1174 | |||
1175 | for (i = 0; i < dp->num_ds_states; i++) | ||
1176 | dp->ds_states[i].handle = ((u64)i << 32); | ||
1177 | |||
1178 | ds_cfg.tx_irq = vdev->tx_irq; | ||
1179 | ds_cfg.rx_irq = vdev->rx_irq; | ||
1180 | |||
1181 | lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp); | ||
1182 | if (IS_ERR(lp)) { | ||
1183 | err = PTR_ERR(lp); | ||
1184 | goto out_free_ds_states; | ||
1185 | } | ||
1186 | dp->lp = lp; | ||
1187 | |||
1188 | err = ldc_bind(lp, "DS"); | ||
1189 | if (err) | ||
1190 | goto out_free_ldc; | ||
1191 | |||
1192 | spin_lock_irq(&ds_lock); | ||
1193 | dp->next = ds_info_list; | ||
1194 | ds_info_list = dp; | ||
1195 | spin_unlock_irq(&ds_lock); | ||
1196 | |||
1197 | return err; | ||
1198 | |||
1199 | out_free_ldc: | ||
1200 | ldc_free(dp->lp); | ||
1201 | |||
1202 | out_free_ds_states: | ||
1203 | kfree(dp->ds_states); | ||
1204 | |||
1205 | out_free_rcv_buf: | ||
1206 | kfree(dp->rcv_buf); | ||
1207 | |||
1208 | out_free_dp: | ||
1209 | kfree(dp); | ||
1210 | |||
1211 | out_err: | ||
1212 | return err; | ||
1213 | } | ||
1214 | |||
1215 | static int ds_remove(struct vio_dev *vdev) | ||
1216 | { | ||
1217 | return 0; | ||
1218 | } | ||
1219 | |||
1220 | static struct vio_device_id __initdata ds_match[] = { | ||
1221 | { | ||
1222 | .type = "domain-services-port", | ||
1223 | }, | ||
1224 | {}, | ||
1225 | }; | ||
1226 | |||
1227 | static struct vio_driver ds_driver = { | ||
1228 | .id_table = ds_match, | ||
1229 | .probe = ds_probe, | ||
1230 | .remove = ds_remove, | ||
1231 | .driver = { | ||
1232 | .name = "ds", | ||
1233 | .owner = THIS_MODULE, | ||
1234 | } | ||
1235 | }; | ||
1236 | |||
1237 | static int __init ds_init(void) | ||
1238 | { | ||
1239 | kthread_run(ds_thread, NULL, "kldomd"); | ||
1240 | |||
1241 | return vio_register_driver(&ds_driver); | ||
1242 | } | ||
1243 | |||
1244 | subsys_initcall(ds_init); | ||
diff --git a/arch/sparc/kernel/dtlb_miss.S b/arch/sparc/kernel/dtlb_miss.S new file mode 100644 index 000000000000..09a6a15a7105 --- /dev/null +++ b/arch/sparc/kernel/dtlb_miss.S | |||
@@ -0,0 +1,39 @@ | |||
1 | /* DTLB ** ICACHE line 1: Context 0 check and TSB load */ | ||
2 | ldxa [%g0] ASI_DMMU_TSB_8KB_PTR, %g1 ! Get TSB 8K pointer | ||
3 | ldxa [%g0] ASI_DMMU, %g6 ! Get TAG TARGET | ||
4 | srlx %g6, 48, %g5 ! Get context | ||
5 | sllx %g6, 22, %g6 ! Zero out context | ||
6 | brz,pn %g5, kvmap_dtlb ! Context 0 processing | ||
7 | srlx %g6, 22, %g6 ! Delay slot | ||
8 | TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry | ||
9 | cmp %g4, %g6 ! Compare TAG | ||
10 | |||
11 | /* DTLB ** ICACHE line 2: TSB compare and TLB load */ | ||
12 | bne,pn %xcc, tsb_miss_dtlb ! Miss | ||
13 | mov FAULT_CODE_DTLB, %g3 | ||
14 | stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Load TLB | ||
15 | retry ! Trap done | ||
16 | nop | ||
17 | nop | ||
18 | nop | ||
19 | nop | ||
20 | |||
21 | /* DTLB ** ICACHE line 3: */ | ||
22 | nop | ||
23 | nop | ||
24 | nop | ||
25 | nop | ||
26 | nop | ||
27 | nop | ||
28 | nop | ||
29 | nop | ||
30 | |||
31 | /* DTLB ** ICACHE line 4: */ | ||
32 | nop | ||
33 | nop | ||
34 | nop | ||
35 | nop | ||
36 | nop | ||
37 | nop | ||
38 | nop | ||
39 | nop | ||
diff --git a/arch/sparc/kernel/dtlb_prot.S b/arch/sparc/kernel/dtlb_prot.S new file mode 100644 index 000000000000..b2c2c5be281c --- /dev/null +++ b/arch/sparc/kernel/dtlb_prot.S | |||
@@ -0,0 +1,54 @@ | |||
1 | /* | ||
2 | * dtlb_prot.S: DTLB protection trap strategy. | ||
3 | * This is included directly into the trap table. | ||
4 | * | ||
5 | * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com) | ||
6 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz) | ||
7 | */ | ||
8 | |||
9 | /* Ways we can get here: | ||
10 | * | ||
11 | * [TL == 0] 1) User stores to readonly pages. | ||
12 | * [TL == 0] 2) Nucleus stores to user readonly pages. | ||
13 | * [TL > 0] 3) Nucleus stores to user readonly stack frame. | ||
14 | */ | ||
15 | |||
16 | /* PROT ** ICACHE line 1: User DTLB protection trap */ | ||
17 | mov TLB_SFSR, %g1 | ||
18 | stxa %g0, [%g1] ASI_DMMU ! Clear FaultValid bit | ||
19 | membar #Sync ! Synchronize stores | ||
20 | rdpr %pstate, %g5 ! Move into alt-globals | ||
21 | wrpr %g5, PSTATE_AG|PSTATE_MG, %pstate | ||
22 | rdpr %tl, %g1 ! Need a winfixup? | ||
23 | cmp %g1, 1 ! Trap level >1? | ||
24 | mov TLB_TAG_ACCESS, %g4 ! For reload of vaddr | ||
25 | |||
26 | /* PROT ** ICACHE line 2: More real fault processing */ | ||
27 | bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup | ||
28 | ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5 | ||
29 | ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault | ||
30 | mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4 | ||
31 | nop | ||
32 | nop | ||
33 | nop | ||
34 | nop | ||
35 | |||
36 | /* PROT ** ICACHE line 3: Unused... */ | ||
37 | nop | ||
38 | nop | ||
39 | nop | ||
40 | nop | ||
41 | nop | ||
42 | nop | ||
43 | nop | ||
44 | nop | ||
45 | |||
46 | /* PROT ** ICACHE line 4: Unused... */ | ||
47 | nop | ||
48 | nop | ||
49 | nop | ||
50 | nop | ||
51 | nop | ||
52 | nop | ||
53 | nop | ||
54 | nop | ||
diff --git a/arch/sparc/kernel/ebus.c b/arch/sparc/kernel/ebus.c new file mode 100644 index 000000000000..77dbf6d45faf --- /dev/null +++ b/arch/sparc/kernel/ebus.c | |||
@@ -0,0 +1,257 @@ | |||
1 | /* ebus.c: EBUS DMA library code. | ||
2 | * | ||
3 | * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) | ||
4 | * Copyright (C) 1999 David S. Miller (davem@redhat.com) | ||
5 | */ | ||
6 | |||
7 | #include <linux/module.h> | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/types.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/delay.h> | ||
13 | |||
14 | #include <asm/ebus_dma.h> | ||
15 | #include <asm/io.h> | ||
16 | |||
17 | #define EBDMA_CSR 0x00UL /* Control/Status */ | ||
18 | #define EBDMA_ADDR 0x04UL /* DMA Address */ | ||
19 | #define EBDMA_COUNT 0x08UL /* DMA Count */ | ||
20 | |||
21 | #define EBDMA_CSR_INT_PEND 0x00000001 | ||
22 | #define EBDMA_CSR_ERR_PEND 0x00000002 | ||
23 | #define EBDMA_CSR_DRAIN 0x00000004 | ||
24 | #define EBDMA_CSR_INT_EN 0x00000010 | ||
25 | #define EBDMA_CSR_RESET 0x00000080 | ||
26 | #define EBDMA_CSR_WRITE 0x00000100 | ||
27 | #define EBDMA_CSR_EN_DMA 0x00000200 | ||
28 | #define EBDMA_CSR_CYC_PEND 0x00000400 | ||
29 | #define EBDMA_CSR_DIAG_RD_DONE 0x00000800 | ||
30 | #define EBDMA_CSR_DIAG_WR_DONE 0x00001000 | ||
31 | #define EBDMA_CSR_EN_CNT 0x00002000 | ||
32 | #define EBDMA_CSR_TC 0x00004000 | ||
33 | #define EBDMA_CSR_DIS_CSR_DRN 0x00010000 | ||
34 | #define EBDMA_CSR_BURST_SZ_MASK 0x000c0000 | ||
35 | #define EBDMA_CSR_BURST_SZ_1 0x00080000 | ||
36 | #define EBDMA_CSR_BURST_SZ_4 0x00000000 | ||
37 | #define EBDMA_CSR_BURST_SZ_8 0x00040000 | ||
38 | #define EBDMA_CSR_BURST_SZ_16 0x000c0000 | ||
39 | #define EBDMA_CSR_DIAG_EN 0x00100000 | ||
40 | #define EBDMA_CSR_DIS_ERR_PEND 0x00400000 | ||
41 | #define EBDMA_CSR_TCI_DIS 0x00800000 | ||
42 | #define EBDMA_CSR_EN_NEXT 0x01000000 | ||
43 | #define EBDMA_CSR_DMA_ON 0x02000000 | ||
44 | #define EBDMA_CSR_A_LOADED 0x04000000 | ||
45 | #define EBDMA_CSR_NA_LOADED 0x08000000 | ||
46 | #define EBDMA_CSR_DEV_ID_MASK 0xf0000000 | ||
47 | |||
48 | #define EBUS_DMA_RESET_TIMEOUT 10000 | ||
49 | |||
50 | static void __ebus_dma_reset(struct ebus_dma_info *p, int no_drain) | ||
51 | { | ||
52 | int i; | ||
53 | u32 val = 0; | ||
54 | |||
55 | writel(EBDMA_CSR_RESET, p->regs + EBDMA_CSR); | ||
56 | udelay(1); | ||
57 | |||
58 | if (no_drain) | ||
59 | return; | ||
60 | |||
61 | for (i = EBUS_DMA_RESET_TIMEOUT; i > 0; i--) { | ||
62 | val = readl(p->regs + EBDMA_CSR); | ||
63 | |||
64 | if (!(val & (EBDMA_CSR_DRAIN | EBDMA_CSR_CYC_PEND))) | ||
65 | break; | ||
66 | udelay(10); | ||
67 | } | ||
68 | } | ||
69 | |||
70 | static irqreturn_t ebus_dma_irq(int irq, void *dev_id) | ||
71 | { | ||
72 | struct ebus_dma_info *p = dev_id; | ||
73 | unsigned long flags; | ||
74 | u32 csr = 0; | ||
75 | |||
76 | spin_lock_irqsave(&p->lock, flags); | ||
77 | csr = readl(p->regs + EBDMA_CSR); | ||
78 | writel(csr, p->regs + EBDMA_CSR); | ||
79 | spin_unlock_irqrestore(&p->lock, flags); | ||
80 | |||
81 | if (csr & EBDMA_CSR_ERR_PEND) { | ||
82 | printk(KERN_CRIT "ebus_dma(%s): DMA error!\n", p->name); | ||
83 | p->callback(p, EBUS_DMA_EVENT_ERROR, p->client_cookie); | ||
84 | return IRQ_HANDLED; | ||
85 | } else if (csr & EBDMA_CSR_INT_PEND) { | ||
86 | p->callback(p, | ||
87 | (csr & EBDMA_CSR_TC) ? | ||
88 | EBUS_DMA_EVENT_DMA : EBUS_DMA_EVENT_DEVICE, | ||
89 | p->client_cookie); | ||
90 | return IRQ_HANDLED; | ||
91 | } | ||
92 | |||
93 | return IRQ_NONE; | ||
94 | |||
95 | } | ||
96 | |||
97 | int ebus_dma_register(struct ebus_dma_info *p) | ||
98 | { | ||
99 | u32 csr; | ||
100 | |||
101 | if (!p->regs) | ||
102 | return -EINVAL; | ||
103 | if (p->flags & ~(EBUS_DMA_FLAG_USE_EBDMA_HANDLER | | ||
104 | EBUS_DMA_FLAG_TCI_DISABLE)) | ||
105 | return -EINVAL; | ||
106 | if ((p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) && !p->callback) | ||
107 | return -EINVAL; | ||
108 | if (!strlen(p->name)) | ||
109 | return -EINVAL; | ||
110 | |||
111 | __ebus_dma_reset(p, 1); | ||
112 | |||
113 | csr = EBDMA_CSR_BURST_SZ_16 | EBDMA_CSR_EN_CNT; | ||
114 | |||
115 | if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE) | ||
116 | csr |= EBDMA_CSR_TCI_DIS; | ||
117 | |||
118 | writel(csr, p->regs + EBDMA_CSR); | ||
119 | |||
120 | return 0; | ||
121 | } | ||
122 | EXPORT_SYMBOL(ebus_dma_register); | ||
123 | |||
124 | int ebus_dma_irq_enable(struct ebus_dma_info *p, int on) | ||
125 | { | ||
126 | unsigned long flags; | ||
127 | u32 csr; | ||
128 | |||
129 | if (on) { | ||
130 | if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) { | ||
131 | if (request_irq(p->irq, ebus_dma_irq, IRQF_SHARED, p->name, p)) | ||
132 | return -EBUSY; | ||
133 | } | ||
134 | |||
135 | spin_lock_irqsave(&p->lock, flags); | ||
136 | csr = readl(p->regs + EBDMA_CSR); | ||
137 | csr |= EBDMA_CSR_INT_EN; | ||
138 | writel(csr, p->regs + EBDMA_CSR); | ||
139 | spin_unlock_irqrestore(&p->lock, flags); | ||
140 | } else { | ||
141 | spin_lock_irqsave(&p->lock, flags); | ||
142 | csr = readl(p->regs + EBDMA_CSR); | ||
143 | csr &= ~EBDMA_CSR_INT_EN; | ||
144 | writel(csr, p->regs + EBDMA_CSR); | ||
145 | spin_unlock_irqrestore(&p->lock, flags); | ||
146 | |||
147 | if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) { | ||
148 | free_irq(p->irq, p); | ||
149 | } | ||
150 | } | ||
151 | |||
152 | return 0; | ||
153 | } | ||
154 | EXPORT_SYMBOL(ebus_dma_irq_enable); | ||
155 | |||
156 | void ebus_dma_unregister(struct ebus_dma_info *p) | ||
157 | { | ||
158 | unsigned long flags; | ||
159 | u32 csr; | ||
160 | int irq_on = 0; | ||
161 | |||
162 | spin_lock_irqsave(&p->lock, flags); | ||
163 | csr = readl(p->regs + EBDMA_CSR); | ||
164 | if (csr & EBDMA_CSR_INT_EN) { | ||
165 | csr &= ~EBDMA_CSR_INT_EN; | ||
166 | writel(csr, p->regs + EBDMA_CSR); | ||
167 | irq_on = 1; | ||
168 | } | ||
169 | spin_unlock_irqrestore(&p->lock, flags); | ||
170 | |||
171 | if (irq_on) | ||
172 | free_irq(p->irq, p); | ||
173 | } | ||
174 | EXPORT_SYMBOL(ebus_dma_unregister); | ||
175 | |||
176 | int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr, size_t len) | ||
177 | { | ||
178 | unsigned long flags; | ||
179 | u32 csr; | ||
180 | int err; | ||
181 | |||
182 | if (len >= (1 << 24)) | ||
183 | return -EINVAL; | ||
184 | |||
185 | spin_lock_irqsave(&p->lock, flags); | ||
186 | csr = readl(p->regs + EBDMA_CSR); | ||
187 | err = -EINVAL; | ||
188 | if (!(csr & EBDMA_CSR_EN_DMA)) | ||
189 | goto out; | ||
190 | err = -EBUSY; | ||
191 | if (csr & EBDMA_CSR_NA_LOADED) | ||
192 | goto out; | ||
193 | |||
194 | writel(len, p->regs + EBDMA_COUNT); | ||
195 | writel(bus_addr, p->regs + EBDMA_ADDR); | ||
196 | err = 0; | ||
197 | |||
198 | out: | ||
199 | spin_unlock_irqrestore(&p->lock, flags); | ||
200 | |||
201 | return err; | ||
202 | } | ||
203 | EXPORT_SYMBOL(ebus_dma_request); | ||
204 | |||
205 | void ebus_dma_prepare(struct ebus_dma_info *p, int write) | ||
206 | { | ||
207 | unsigned long flags; | ||
208 | u32 csr; | ||
209 | |||
210 | spin_lock_irqsave(&p->lock, flags); | ||
211 | __ebus_dma_reset(p, 0); | ||
212 | |||
213 | csr = (EBDMA_CSR_INT_EN | | ||
214 | EBDMA_CSR_EN_CNT | | ||
215 | EBDMA_CSR_BURST_SZ_16 | | ||
216 | EBDMA_CSR_EN_NEXT); | ||
217 | |||
218 | if (write) | ||
219 | csr |= EBDMA_CSR_WRITE; | ||
220 | if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE) | ||
221 | csr |= EBDMA_CSR_TCI_DIS; | ||
222 | |||
223 | writel(csr, p->regs + EBDMA_CSR); | ||
224 | |||
225 | spin_unlock_irqrestore(&p->lock, flags); | ||
226 | } | ||
227 | EXPORT_SYMBOL(ebus_dma_prepare); | ||
228 | |||
229 | unsigned int ebus_dma_residue(struct ebus_dma_info *p) | ||
230 | { | ||
231 | return readl(p->regs + EBDMA_COUNT); | ||
232 | } | ||
233 | EXPORT_SYMBOL(ebus_dma_residue); | ||
234 | |||
235 | unsigned int ebus_dma_addr(struct ebus_dma_info *p) | ||
236 | { | ||
237 | return readl(p->regs + EBDMA_ADDR); | ||
238 | } | ||
239 | EXPORT_SYMBOL(ebus_dma_addr); | ||
240 | |||
241 | void ebus_dma_enable(struct ebus_dma_info *p, int on) | ||
242 | { | ||
243 | unsigned long flags; | ||
244 | u32 orig_csr, csr; | ||
245 | |||
246 | spin_lock_irqsave(&p->lock, flags); | ||
247 | orig_csr = csr = readl(p->regs + EBDMA_CSR); | ||
248 | if (on) | ||
249 | csr |= EBDMA_CSR_EN_DMA; | ||
250 | else | ||
251 | csr &= ~EBDMA_CSR_EN_DMA; | ||
252 | if ((orig_csr & EBDMA_CSR_EN_DMA) != | ||
253 | (csr & EBDMA_CSR_EN_DMA)) | ||
254 | writel(csr, p->regs + EBDMA_CSR); | ||
255 | spin_unlock_irqrestore(&p->lock, flags); | ||
256 | } | ||
257 | EXPORT_SYMBOL(ebus_dma_enable); | ||
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h new file mode 100644 index 000000000000..34d7ab5e10d2 --- /dev/null +++ b/arch/sparc/kernel/entry.h | |||
@@ -0,0 +1,195 @@ | |||
1 | #ifndef _ENTRY_H | ||
2 | #define _ENTRY_H | ||
3 | |||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <linux/init.h> | ||
7 | |||
8 | extern const char *sparc_cpu_type; | ||
9 | extern const char *sparc_fpu_type; | ||
10 | |||
11 | extern void __init per_cpu_patch(void); | ||
12 | extern void __init sun4v_patch(void); | ||
13 | extern void __init boot_cpu_id_too_large(int cpu); | ||
14 | extern unsigned int dcache_parity_tl1_occurred; | ||
15 | extern unsigned int icache_parity_tl1_occurred; | ||
16 | |||
17 | extern asmlinkage void update_perfctrs(void); | ||
18 | extern asmlinkage void sparc_breakpoint(struct pt_regs *regs); | ||
19 | extern void timer_interrupt(int irq, struct pt_regs *regs); | ||
20 | |||
21 | extern void do_notify_resume(struct pt_regs *regs, | ||
22 | unsigned long orig_i0, | ||
23 | unsigned long thread_info_flags); | ||
24 | |||
25 | extern asmlinkage int syscall_trace_enter(struct pt_regs *regs); | ||
26 | extern asmlinkage void syscall_trace_leave(struct pt_regs *regs); | ||
27 | |||
28 | extern void bad_trap_tl1(struct pt_regs *regs, long lvl); | ||
29 | |||
30 | extern void do_fpe_common(struct pt_regs *regs); | ||
31 | extern void do_fpieee(struct pt_regs *regs); | ||
32 | extern void do_fpother(struct pt_regs *regs); | ||
33 | extern void do_tof(struct pt_regs *regs); | ||
34 | extern void do_div0(struct pt_regs *regs); | ||
35 | extern void do_illegal_instruction(struct pt_regs *regs); | ||
36 | extern void mem_address_unaligned(struct pt_regs *regs, | ||
37 | unsigned long sfar, | ||
38 | unsigned long sfsr); | ||
39 | extern void sun4v_do_mna(struct pt_regs *regs, | ||
40 | unsigned long addr, | ||
41 | unsigned long type_ctx); | ||
42 | extern void do_privop(struct pt_regs *regs); | ||
43 | extern void do_privact(struct pt_regs *regs); | ||
44 | extern void do_cee(struct pt_regs *regs); | ||
45 | extern void do_cee_tl1(struct pt_regs *regs); | ||
46 | extern void do_dae_tl1(struct pt_regs *regs); | ||
47 | extern void do_iae_tl1(struct pt_regs *regs); | ||
48 | extern void do_div0_tl1(struct pt_regs *regs); | ||
49 | extern void do_fpdis_tl1(struct pt_regs *regs); | ||
50 | extern void do_fpieee_tl1(struct pt_regs *regs); | ||
51 | extern void do_fpother_tl1(struct pt_regs *regs); | ||
52 | extern void do_ill_tl1(struct pt_regs *regs); | ||
53 | extern void do_irq_tl1(struct pt_regs *regs); | ||
54 | extern void do_lddfmna_tl1(struct pt_regs *regs); | ||
55 | extern void do_stdfmna_tl1(struct pt_regs *regs); | ||
56 | extern void do_paw(struct pt_regs *regs); | ||
57 | extern void do_paw_tl1(struct pt_regs *regs); | ||
58 | extern void do_vaw(struct pt_regs *regs); | ||
59 | extern void do_vaw_tl1(struct pt_regs *regs); | ||
60 | extern void do_tof_tl1(struct pt_regs *regs); | ||
61 | extern void do_getpsr(struct pt_regs *regs); | ||
62 | |||
63 | extern void spitfire_insn_access_exception(struct pt_regs *regs, | ||
64 | unsigned long sfsr, | ||
65 | unsigned long sfar); | ||
66 | extern void spitfire_insn_access_exception_tl1(struct pt_regs *regs, | ||
67 | unsigned long sfsr, | ||
68 | unsigned long sfar); | ||
69 | extern void spitfire_data_access_exception(struct pt_regs *regs, | ||
70 | unsigned long sfsr, | ||
71 | unsigned long sfar); | ||
72 | extern void spitfire_data_access_exception_tl1(struct pt_regs *regs, | ||
73 | unsigned long sfsr, | ||
74 | unsigned long sfar); | ||
75 | extern void spitfire_access_error(struct pt_regs *regs, | ||
76 | unsigned long status_encoded, | ||
77 | unsigned long afar); | ||
78 | |||
79 | extern void cheetah_fecc_handler(struct pt_regs *regs, | ||
80 | unsigned long afsr, | ||
81 | unsigned long afar); | ||
82 | extern void cheetah_cee_handler(struct pt_regs *regs, | ||
83 | unsigned long afsr, | ||
84 | unsigned long afar); | ||
85 | extern void cheetah_deferred_handler(struct pt_regs *regs, | ||
86 | unsigned long afsr, | ||
87 | unsigned long afar); | ||
88 | extern void cheetah_plus_parity_error(int type, struct pt_regs *regs); | ||
89 | |||
90 | extern void sun4v_insn_access_exception(struct pt_regs *regs, | ||
91 | unsigned long addr, | ||
92 | unsigned long type_ctx); | ||
93 | extern void sun4v_insn_access_exception_tl1(struct pt_regs *regs, | ||
94 | unsigned long addr, | ||
95 | unsigned long type_ctx); | ||
96 | extern void sun4v_data_access_exception(struct pt_regs *regs, | ||
97 | unsigned long addr, | ||
98 | unsigned long type_ctx); | ||
99 | extern void sun4v_data_access_exception_tl1(struct pt_regs *regs, | ||
100 | unsigned long addr, | ||
101 | unsigned long type_ctx); | ||
102 | extern void sun4v_resum_error(struct pt_regs *regs, | ||
103 | unsigned long offset); | ||
104 | extern void sun4v_resum_overflow(struct pt_regs *regs); | ||
105 | extern void sun4v_nonresum_error(struct pt_regs *regs, | ||
106 | unsigned long offset); | ||
107 | extern void sun4v_nonresum_overflow(struct pt_regs *regs); | ||
108 | |||
109 | extern unsigned long sun4v_err_itlb_vaddr; | ||
110 | extern unsigned long sun4v_err_itlb_ctx; | ||
111 | extern unsigned long sun4v_err_itlb_pte; | ||
112 | extern unsigned long sun4v_err_itlb_error; | ||
113 | |||
114 | extern void sun4v_itlb_error_report(struct pt_regs *regs, int tl); | ||
115 | |||
116 | extern unsigned long sun4v_err_dtlb_vaddr; | ||
117 | extern unsigned long sun4v_err_dtlb_ctx; | ||
118 | extern unsigned long sun4v_err_dtlb_pte; | ||
119 | extern unsigned long sun4v_err_dtlb_error; | ||
120 | |||
121 | extern void sun4v_dtlb_error_report(struct pt_regs *regs, int tl); | ||
122 | extern void hypervisor_tlbop_error(unsigned long err, | ||
123 | unsigned long op); | ||
124 | extern void hypervisor_tlbop_error_xcall(unsigned long err, | ||
125 | unsigned long op); | ||
126 | |||
127 | /* WARNING: The error trap handlers in assembly know the precise | ||
128 | * layout of the following structure. | ||
129 | * | ||
130 | * C-level handlers in traps.c use this information to log the | ||
131 | * error and then determine how to recover (if possible). | ||
132 | */ | ||
133 | struct cheetah_err_info { | ||
134 | /*0x00*/u64 afsr; | ||
135 | /*0x08*/u64 afar; | ||
136 | |||
137 | /* D-cache state */ | ||
138 | /*0x10*/u64 dcache_data[4]; /* The actual data */ | ||
139 | /*0x30*/u64 dcache_index; /* D-cache index */ | ||
140 | /*0x38*/u64 dcache_tag; /* D-cache tag/valid */ | ||
141 | /*0x40*/u64 dcache_utag; /* D-cache microtag */ | ||
142 | /*0x48*/u64 dcache_stag; /* D-cache snooptag */ | ||
143 | |||
144 | /* I-cache state */ | ||
145 | /*0x50*/u64 icache_data[8]; /* The actual insns + predecode */ | ||
146 | /*0x90*/u64 icache_index; /* I-cache index */ | ||
147 | /*0x98*/u64 icache_tag; /* I-cache phys tag */ | ||
148 | /*0xa0*/u64 icache_utag; /* I-cache microtag */ | ||
149 | /*0xa8*/u64 icache_stag; /* I-cache snooptag */ | ||
150 | /*0xb0*/u64 icache_upper; /* I-cache upper-tag */ | ||
151 | /*0xb8*/u64 icache_lower; /* I-cache lower-tag */ | ||
152 | |||
153 | /* E-cache state */ | ||
154 | /*0xc0*/u64 ecache_data[4]; /* 32 bytes from staging registers */ | ||
155 | /*0xe0*/u64 ecache_index; /* E-cache index */ | ||
156 | /*0xe8*/u64 ecache_tag; /* E-cache tag/state */ | ||
157 | |||
158 | /*0xf0*/u64 __pad[32 - 30]; | ||
159 | }; | ||
160 | #define CHAFSR_INVALID ((u64)-1L) | ||
161 | |||
162 | /* This is allocated at boot time based upon the largest hardware | ||
163 | * cpu ID in the system. We allocate two entries per cpu, one for | ||
164 | * TL==0 logging and one for TL >= 1 logging. | ||
165 | */ | ||
166 | extern struct cheetah_err_info *cheetah_error_log; | ||
167 | |||
168 | /* UPA nodes send interrupt packet to UltraSparc with first data reg | ||
169 | * value low 5 (7 on Starfire) bits holding the IRQ identifier being | ||
170 | * delivered. We must translate this into a non-vector IRQ so we can | ||
171 | * set the softint on this cpu. | ||
172 | * | ||
173 | * To make processing these packets efficient and race free we use | ||
174 | * an array of irq buckets below. The interrupt vector handler in | ||
175 | * entry.S feeds incoming packets into per-cpu pil-indexed lists. | ||
176 | * | ||
177 | * If you make changes to ino_bucket, please update hand coded assembler | ||
178 | * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S | ||
179 | */ | ||
180 | struct ino_bucket { | ||
181 | /*0x00*/unsigned long __irq_chain_pa; | ||
182 | |||
183 | /* Virtual interrupt number assigned to this INO. */ | ||
184 | /*0x08*/unsigned int __virt_irq; | ||
185 | /*0x0c*/unsigned int __pad; | ||
186 | }; | ||
187 | |||
188 | extern struct ino_bucket *ivector_table; | ||
189 | extern unsigned long ivector_table_pa; | ||
190 | |||
191 | extern void handler_irq(int irq, struct pt_regs *regs); | ||
192 | extern void init_irqwork_curcpu(void); | ||
193 | extern void __cpuinit sun4v_register_mondo_queues(int this_cpu); | ||
194 | |||
195 | #endif /* _ENTRY_H */ | ||
diff --git a/arch/sparc/kernel/etrap_64.S b/arch/sparc/kernel/etrap_64.S new file mode 100644 index 000000000000..786b185e6e3f --- /dev/null +++ b/arch/sparc/kernel/etrap_64.S | |||
@@ -0,0 +1,236 @@ | |||
1 | /* | ||
2 | * etrap.S: Preparing for entry into the kernel on Sparc V9. | ||
3 | * | ||
4 | * Copyright (C) 1996, 1997 David S. Miller (davem@caip.rutgers.edu) | ||
5 | * Copyright (C) 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz) | ||
6 | */ | ||
7 | |||
8 | |||
9 | #include <asm/asi.h> | ||
10 | #include <asm/pstate.h> | ||
11 | #include <asm/ptrace.h> | ||
12 | #include <asm/page.h> | ||
13 | #include <asm/spitfire.h> | ||
14 | #include <asm/head.h> | ||
15 | #include <asm/processor.h> | ||
16 | #include <asm/mmu.h> | ||
17 | |||
18 | #define TASK_REGOFF (THREAD_SIZE-TRACEREG_SZ-STACKFRAME_SZ) | ||
19 | #define ETRAP_PSTATE1 (PSTATE_TSO | PSTATE_PRIV) | ||
20 | #define ETRAP_PSTATE2 \ | ||
21 | (PSTATE_TSO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE) | ||
22 | |||
23 | /* | ||
24 | * On entry, %g7 is return address - 0x4. | ||
25 | * %g4 and %g5 will be preserved %l4 and %l5 respectively. | ||
26 | */ | ||
27 | |||
28 | .text | ||
29 | .align 64 | ||
30 | .globl etrap_syscall, etrap, etrap_irq, etraptl1 | ||
31 | etrap: rdpr %pil, %g2 | ||
32 | etrap_irq: clr %g3 | ||
33 | etrap_syscall: TRAP_LOAD_THREAD_REG(%g6, %g1) | ||
34 | rdpr %tstate, %g1 | ||
35 | or %g1, %g3, %g1 | ||
36 | sllx %g2, 20, %g3 | ||
37 | andcc %g1, TSTATE_PRIV, %g0 | ||
38 | or %g1, %g3, %g1 | ||
39 | bne,pn %xcc, 1f | ||
40 | sub %sp, STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS, %g2 | ||
41 | wrpr %g0, 7, %cleanwin | ||
42 | |||
43 | sethi %hi(TASK_REGOFF), %g2 | ||
44 | sethi %hi(TSTATE_PEF), %g3 | ||
45 | or %g2, %lo(TASK_REGOFF), %g2 | ||
46 | and %g1, %g3, %g3 | ||
47 | brnz,pn %g3, 1f | ||
48 | add %g6, %g2, %g2 | ||
49 | wr %g0, 0, %fprs | ||
50 | 1: rdpr %tpc, %g3 | ||
51 | |||
52 | stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TSTATE] | ||
53 | rdpr %tnpc, %g1 | ||
54 | stx %g3, [%g2 + STACKFRAME_SZ + PT_V9_TPC] | ||
55 | rd %y, %g3 | ||
56 | stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC] | ||
57 | rdpr %tt, %g1 | ||
58 | st %g3, [%g2 + STACKFRAME_SZ + PT_V9_Y] | ||
59 | sethi %hi(PT_REGS_MAGIC), %g3 | ||
60 | or %g3, %g1, %g1 | ||
61 | st %g1, [%g2 + STACKFRAME_SZ + PT_V9_MAGIC] | ||
62 | |||
63 | rdpr %cansave, %g1 | ||
64 | brnz,pt %g1, etrap_save | ||
65 | nop | ||
66 | |||
67 | rdpr %cwp, %g1 | ||
68 | add %g1, 2, %g1 | ||
69 | wrpr %g1, %cwp | ||
70 | be,pt %xcc, etrap_user_spill | ||
71 | mov ASI_AIUP, %g3 | ||
72 | |||
73 | rdpr %otherwin, %g3 | ||
74 | brz %g3, etrap_kernel_spill | ||
75 | mov ASI_AIUS, %g3 | ||
76 | |||
77 | etrap_user_spill: | ||
78 | |||
79 | wr %g3, 0x0, %asi | ||
80 | ldx [%g6 + TI_FLAGS], %g3 | ||
81 | and %g3, _TIF_32BIT, %g3 | ||
82 | brnz,pt %g3, etrap_user_spill_32bit | ||
83 | nop | ||
84 | ba,a,pt %xcc, etrap_user_spill_64bit | ||
85 | |||
86 | etrap_save: save %g2, -STACK_BIAS, %sp | ||
87 | mov %g6, %l6 | ||
88 | |||
89 | bne,pn %xcc, 3f | ||
90 | mov PRIMARY_CONTEXT, %l4 | ||
91 | rdpr %canrestore, %g3 | ||
92 | rdpr %wstate, %g2 | ||
93 | wrpr %g0, 0, %canrestore | ||
94 | sll %g2, 3, %g2 | ||
95 | mov 1, %l5 | ||
96 | stb %l5, [%l6 + TI_FPDEPTH] | ||
97 | |||
98 | wrpr %g3, 0, %otherwin | ||
99 | wrpr %g2, 0, %wstate | ||
100 | sethi %hi(sparc64_kern_pri_context), %g2 | ||
101 | ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 | ||
102 | |||
103 | 661: stxa %g3, [%l4] ASI_DMMU | ||
104 | .section .sun4v_1insn_patch, "ax" | ||
105 | .word 661b | ||
106 | stxa %g3, [%l4] ASI_MMU | ||
107 | .previous | ||
108 | |||
109 | sethi %hi(KERNBASE), %l4 | ||
110 | flush %l4 | ||
111 | mov ASI_AIUS, %l7 | ||
112 | 2: mov %g4, %l4 | ||
113 | mov %g5, %l5 | ||
114 | add %g7, 4, %l2 | ||
115 | |||
116 | /* Go to trap time globals so we can save them. */ | ||
117 | 661: wrpr %g0, ETRAP_PSTATE1, %pstate | ||
118 | .section .sun4v_1insn_patch, "ax" | ||
119 | .word 661b | ||
120 | SET_GL(0) | ||
121 | .previous | ||
122 | |||
123 | stx %g1, [%sp + PTREGS_OFF + PT_V9_G1] | ||
124 | stx %g2, [%sp + PTREGS_OFF + PT_V9_G2] | ||
125 | sllx %l7, 24, %l7 | ||
126 | stx %g3, [%sp + PTREGS_OFF + PT_V9_G3] | ||
127 | rdpr %cwp, %l0 | ||
128 | stx %g4, [%sp + PTREGS_OFF + PT_V9_G4] | ||
129 | stx %g5, [%sp + PTREGS_OFF + PT_V9_G5] | ||
130 | stx %g6, [%sp + PTREGS_OFF + PT_V9_G6] | ||
131 | stx %g7, [%sp + PTREGS_OFF + PT_V9_G7] | ||
132 | or %l7, %l0, %l7 | ||
133 | sethi %hi(TSTATE_TSO | TSTATE_PEF), %l0 | ||
134 | or %l7, %l0, %l7 | ||
135 | wrpr %l2, %tnpc | ||
136 | wrpr %l7, (TSTATE_PRIV | TSTATE_IE), %tstate | ||
137 | stx %i0, [%sp + PTREGS_OFF + PT_V9_I0] | ||
138 | stx %i1, [%sp + PTREGS_OFF + PT_V9_I1] | ||
139 | stx %i2, [%sp + PTREGS_OFF + PT_V9_I2] | ||
140 | stx %i3, [%sp + PTREGS_OFF + PT_V9_I3] | ||
141 | stx %i4, [%sp + PTREGS_OFF + PT_V9_I4] | ||
142 | stx %i5, [%sp + PTREGS_OFF + PT_V9_I5] | ||
143 | stx %i6, [%sp + PTREGS_OFF + PT_V9_I6] | ||
144 | mov %l6, %g6 | ||
145 | stx %i7, [%sp + PTREGS_OFF + PT_V9_I7] | ||
146 | LOAD_PER_CPU_BASE(%g5, %g6, %g4, %g3, %l1) | ||
147 | ldx [%g6 + TI_TASK], %g4 | ||
148 | done | ||
149 | |||
150 | 3: mov ASI_P, %l7 | ||
151 | ldub [%l6 + TI_FPDEPTH], %l5 | ||
152 | add %l6, TI_FPSAVED + 1, %l4 | ||
153 | srl %l5, 1, %l3 | ||
154 | add %l5, 2, %l5 | ||
155 | stb %l5, [%l6 + TI_FPDEPTH] | ||
156 | ba,pt %xcc, 2b | ||
157 | stb %g0, [%l4 + %l3] | ||
158 | nop | ||
159 | |||
160 | etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself. | ||
161 | * We place this right after pt_regs on the trap stack. | ||
162 | * The layout is: | ||
163 | * 0x00 TL1's TSTATE | ||
164 | * 0x08 TL1's TPC | ||
165 | * 0x10 TL1's TNPC | ||
166 | * 0x18 TL1's TT | ||
167 | * ... | ||
168 | * 0x58 TL4's TT | ||
169 | * 0x60 TL | ||
170 | */ | ||
171 | TRAP_LOAD_THREAD_REG(%g6, %g1) | ||
172 | sub %sp, ((4 * 8) * 4) + 8, %g2 | ||
173 | rdpr %tl, %g1 | ||
174 | |||
175 | wrpr %g0, 1, %tl | ||
176 | rdpr %tstate, %g3 | ||
177 | stx %g3, [%g2 + STACK_BIAS + 0x00] | ||
178 | rdpr %tpc, %g3 | ||
179 | stx %g3, [%g2 + STACK_BIAS + 0x08] | ||
180 | rdpr %tnpc, %g3 | ||
181 | stx %g3, [%g2 + STACK_BIAS + 0x10] | ||
182 | rdpr %tt, %g3 | ||
183 | stx %g3, [%g2 + STACK_BIAS + 0x18] | ||
184 | |||
185 | wrpr %g0, 2, %tl | ||
186 | rdpr %tstate, %g3 | ||
187 | stx %g3, [%g2 + STACK_BIAS + 0x20] | ||
188 | rdpr %tpc, %g3 | ||
189 | stx %g3, [%g2 + STACK_BIAS + 0x28] | ||
190 | rdpr %tnpc, %g3 | ||
191 | stx %g3, [%g2 + STACK_BIAS + 0x30] | ||
192 | rdpr %tt, %g3 | ||
193 | stx %g3, [%g2 + STACK_BIAS + 0x38] | ||
194 | |||
195 | sethi %hi(is_sun4v), %g3 | ||
196 | lduw [%g3 + %lo(is_sun4v)], %g3 | ||
197 | brnz,pn %g3, finish_tl1_capture | ||
198 | nop | ||
199 | |||
200 | wrpr %g0, 3, %tl | ||
201 | rdpr %tstate, %g3 | ||
202 | stx %g3, [%g2 + STACK_BIAS + 0x40] | ||
203 | rdpr %tpc, %g3 | ||
204 | stx %g3, [%g2 + STACK_BIAS + 0x48] | ||
205 | rdpr %tnpc, %g3 | ||
206 | stx %g3, [%g2 + STACK_BIAS + 0x50] | ||
207 | rdpr %tt, %g3 | ||
208 | stx %g3, [%g2 + STACK_BIAS + 0x58] | ||
209 | |||
210 | wrpr %g0, 4, %tl | ||
211 | rdpr %tstate, %g3 | ||
212 | stx %g3, [%g2 + STACK_BIAS + 0x60] | ||
213 | rdpr %tpc, %g3 | ||
214 | stx %g3, [%g2 + STACK_BIAS + 0x68] | ||
215 | rdpr %tnpc, %g3 | ||
216 | stx %g3, [%g2 + STACK_BIAS + 0x70] | ||
217 | rdpr %tt, %g3 | ||
218 | stx %g3, [%g2 + STACK_BIAS + 0x78] | ||
219 | |||
220 | stx %g1, [%g2 + STACK_BIAS + 0x80] | ||
221 | |||
222 | finish_tl1_capture: | ||
223 | wrpr %g0, 1, %tl | ||
224 | 661: nop | ||
225 | .section .sun4v_1insn_patch, "ax" | ||
226 | .word 661b | ||
227 | SET_GL(1) | ||
228 | .previous | ||
229 | |||
230 | rdpr %tstate, %g1 | ||
231 | sub %g2, STACKFRAME_SZ + TRACEREG_SZ - STACK_BIAS, %g2 | ||
232 | ba,pt %xcc, 1b | ||
233 | andcc %g1, TSTATE_PRIV, %g0 | ||
234 | |||
235 | #undef TASK_REGOFF | ||
236 | #undef ETRAP_PSTATE1 | ||
diff --git a/arch/sparc/kernel/fpu_traps.S b/arch/sparc/kernel/fpu_traps.S new file mode 100644 index 000000000000..a6864826a4bd --- /dev/null +++ b/arch/sparc/kernel/fpu_traps.S | |||
@@ -0,0 +1,384 @@ | |||
1 | /* This is trivial with the new code... */ | ||
2 | .globl do_fpdis | ||
3 | .type do_fpdis,#function | ||
4 | do_fpdis: | ||
5 | sethi %hi(TSTATE_PEF), %g4 | ||
6 | rdpr %tstate, %g5 | ||
7 | andcc %g5, %g4, %g0 | ||
8 | be,pt %xcc, 1f | ||
9 | nop | ||
10 | rd %fprs, %g5 | ||
11 | andcc %g5, FPRS_FEF, %g0 | ||
12 | be,pt %xcc, 1f | ||
13 | nop | ||
14 | |||
15 | /* Legal state when DCR_IFPOE is set in Cheetah %dcr. */ | ||
16 | sethi %hi(109f), %g7 | ||
17 | ba,pt %xcc, etrap | ||
18 | 109: or %g7, %lo(109b), %g7 | ||
19 | add %g0, %g0, %g0 | ||
20 | ba,a,pt %xcc, rtrap | ||
21 | |||
22 | 1: TRAP_LOAD_THREAD_REG(%g6, %g1) | ||
23 | ldub [%g6 + TI_FPSAVED], %g5 | ||
24 | wr %g0, FPRS_FEF, %fprs | ||
25 | andcc %g5, FPRS_FEF, %g0 | ||
26 | be,a,pt %icc, 1f | ||
27 | clr %g7 | ||
28 | ldx [%g6 + TI_GSR], %g7 | ||
29 | 1: andcc %g5, FPRS_DL, %g0 | ||
30 | bne,pn %icc, 2f | ||
31 | fzero %f0 | ||
32 | andcc %g5, FPRS_DU, %g0 | ||
33 | bne,pn %icc, 1f | ||
34 | fzero %f2 | ||
35 | faddd %f0, %f2, %f4 | ||
36 | fmuld %f0, %f2, %f6 | ||
37 | faddd %f0, %f2, %f8 | ||
38 | fmuld %f0, %f2, %f10 | ||
39 | faddd %f0, %f2, %f12 | ||
40 | fmuld %f0, %f2, %f14 | ||
41 | faddd %f0, %f2, %f16 | ||
42 | fmuld %f0, %f2, %f18 | ||
43 | faddd %f0, %f2, %f20 | ||
44 | fmuld %f0, %f2, %f22 | ||
45 | faddd %f0, %f2, %f24 | ||
46 | fmuld %f0, %f2, %f26 | ||
47 | faddd %f0, %f2, %f28 | ||
48 | fmuld %f0, %f2, %f30 | ||
49 | faddd %f0, %f2, %f32 | ||
50 | fmuld %f0, %f2, %f34 | ||
51 | faddd %f0, %f2, %f36 | ||
52 | fmuld %f0, %f2, %f38 | ||
53 | faddd %f0, %f2, %f40 | ||
54 | fmuld %f0, %f2, %f42 | ||
55 | faddd %f0, %f2, %f44 | ||
56 | fmuld %f0, %f2, %f46 | ||
57 | faddd %f0, %f2, %f48 | ||
58 | fmuld %f0, %f2, %f50 | ||
59 | faddd %f0, %f2, %f52 | ||
60 | fmuld %f0, %f2, %f54 | ||
61 | faddd %f0, %f2, %f56 | ||
62 | fmuld %f0, %f2, %f58 | ||
63 | b,pt %xcc, fpdis_exit2 | ||
64 | faddd %f0, %f2, %f60 | ||
65 | 1: mov SECONDARY_CONTEXT, %g3 | ||
66 | add %g6, TI_FPREGS + 0x80, %g1 | ||
67 | faddd %f0, %f2, %f4 | ||
68 | fmuld %f0, %f2, %f6 | ||
69 | |||
70 | 661: ldxa [%g3] ASI_DMMU, %g5 | ||
71 | .section .sun4v_1insn_patch, "ax" | ||
72 | .word 661b | ||
73 | ldxa [%g3] ASI_MMU, %g5 | ||
74 | .previous | ||
75 | |||
76 | sethi %hi(sparc64_kern_sec_context), %g2 | ||
77 | ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 | ||
78 | |||
79 | 661: stxa %g2, [%g3] ASI_DMMU | ||
80 | .section .sun4v_1insn_patch, "ax" | ||
81 | .word 661b | ||
82 | stxa %g2, [%g3] ASI_MMU | ||
83 | .previous | ||
84 | |||
85 | membar #Sync | ||
86 | add %g6, TI_FPREGS + 0xc0, %g2 | ||
87 | faddd %f0, %f2, %f8 | ||
88 | fmuld %f0, %f2, %f10 | ||
89 | membar #Sync | ||
90 | ldda [%g1] ASI_BLK_S, %f32 | ||
91 | ldda [%g2] ASI_BLK_S, %f48 | ||
92 | membar #Sync | ||
93 | faddd %f0, %f2, %f12 | ||
94 | fmuld %f0, %f2, %f14 | ||
95 | faddd %f0, %f2, %f16 | ||
96 | fmuld %f0, %f2, %f18 | ||
97 | faddd %f0, %f2, %f20 | ||
98 | fmuld %f0, %f2, %f22 | ||
99 | faddd %f0, %f2, %f24 | ||
100 | fmuld %f0, %f2, %f26 | ||
101 | faddd %f0, %f2, %f28 | ||
102 | fmuld %f0, %f2, %f30 | ||
103 | b,pt %xcc, fpdis_exit | ||
104 | nop | ||
105 | 2: andcc %g5, FPRS_DU, %g0 | ||
106 | bne,pt %icc, 3f | ||
107 | fzero %f32 | ||
108 | mov SECONDARY_CONTEXT, %g3 | ||
109 | fzero %f34 | ||
110 | |||
111 | 661: ldxa [%g3] ASI_DMMU, %g5 | ||
112 | .section .sun4v_1insn_patch, "ax" | ||
113 | .word 661b | ||
114 | ldxa [%g3] ASI_MMU, %g5 | ||
115 | .previous | ||
116 | |||
117 | add %g6, TI_FPREGS, %g1 | ||
118 | sethi %hi(sparc64_kern_sec_context), %g2 | ||
119 | ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 | ||
120 | |||
121 | 661: stxa %g2, [%g3] ASI_DMMU | ||
122 | .section .sun4v_1insn_patch, "ax" | ||
123 | .word 661b | ||
124 | stxa %g2, [%g3] ASI_MMU | ||
125 | .previous | ||
126 | |||
127 | membar #Sync | ||
128 | add %g6, TI_FPREGS + 0x40, %g2 | ||
129 | faddd %f32, %f34, %f36 | ||
130 | fmuld %f32, %f34, %f38 | ||
131 | membar #Sync | ||
132 | ldda [%g1] ASI_BLK_S, %f0 | ||
133 | ldda [%g2] ASI_BLK_S, %f16 | ||
134 | membar #Sync | ||
135 | faddd %f32, %f34, %f40 | ||
136 | fmuld %f32, %f34, %f42 | ||
137 | faddd %f32, %f34, %f44 | ||
138 | fmuld %f32, %f34, %f46 | ||
139 | faddd %f32, %f34, %f48 | ||
140 | fmuld %f32, %f34, %f50 | ||
141 | faddd %f32, %f34, %f52 | ||
142 | fmuld %f32, %f34, %f54 | ||
143 | faddd %f32, %f34, %f56 | ||
144 | fmuld %f32, %f34, %f58 | ||
145 | faddd %f32, %f34, %f60 | ||
146 | fmuld %f32, %f34, %f62 | ||
147 | ba,pt %xcc, fpdis_exit | ||
148 | nop | ||
149 | 3: mov SECONDARY_CONTEXT, %g3 | ||
150 | add %g6, TI_FPREGS, %g1 | ||
151 | |||
152 | 661: ldxa [%g3] ASI_DMMU, %g5 | ||
153 | .section .sun4v_1insn_patch, "ax" | ||
154 | .word 661b | ||
155 | ldxa [%g3] ASI_MMU, %g5 | ||
156 | .previous | ||
157 | |||
158 | sethi %hi(sparc64_kern_sec_context), %g2 | ||
159 | ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 | ||
160 | |||
161 | 661: stxa %g2, [%g3] ASI_DMMU | ||
162 | .section .sun4v_1insn_patch, "ax" | ||
163 | .word 661b | ||
164 | stxa %g2, [%g3] ASI_MMU | ||
165 | .previous | ||
166 | |||
167 | membar #Sync | ||
168 | mov 0x40, %g2 | ||
169 | membar #Sync | ||
170 | ldda [%g1] ASI_BLK_S, %f0 | ||
171 | ldda [%g1 + %g2] ASI_BLK_S, %f16 | ||
172 | add %g1, 0x80, %g1 | ||
173 | ldda [%g1] ASI_BLK_S, %f32 | ||
174 | ldda [%g1 + %g2] ASI_BLK_S, %f48 | ||
175 | membar #Sync | ||
176 | fpdis_exit: | ||
177 | |||
178 | 661: stxa %g5, [%g3] ASI_DMMU | ||
179 | .section .sun4v_1insn_patch, "ax" | ||
180 | .word 661b | ||
181 | stxa %g5, [%g3] ASI_MMU | ||
182 | .previous | ||
183 | |||
184 | membar #Sync | ||
185 | fpdis_exit2: | ||
186 | wr %g7, 0, %gsr | ||
187 | ldx [%g6 + TI_XFSR], %fsr | ||
188 | rdpr %tstate, %g3 | ||
189 | or %g3, %g4, %g3 ! anal... | ||
190 | wrpr %g3, %tstate | ||
191 | wr %g0, FPRS_FEF, %fprs ! clean DU/DL bits | ||
192 | retry | ||
193 | .size do_fpdis,.-do_fpdis | ||
194 | |||
195 | .align 32 | ||
196 | .type fp_other_bounce,#function | ||
197 | fp_other_bounce: | ||
198 | call do_fpother | ||
199 | add %sp, PTREGS_OFF, %o0 | ||
200 | ba,pt %xcc, rtrap | ||
201 | nop | ||
202 | .size fp_other_bounce,.-fp_other_bounce | ||
203 | |||
204 | .align 32 | ||
205 | .globl do_fpother_check_fitos | ||
206 | .type do_fpother_check_fitos,#function | ||
207 | do_fpother_check_fitos: | ||
208 | TRAP_LOAD_THREAD_REG(%g6, %g1) | ||
209 | sethi %hi(fp_other_bounce - 4), %g7 | ||
210 | or %g7, %lo(fp_other_bounce - 4), %g7 | ||
211 | |||
212 | /* NOTE: Need to preserve %g7 until we fully commit | ||
213 | * to the fitos fixup. | ||
214 | */ | ||
215 | stx %fsr, [%g6 + TI_XFSR] | ||
216 | rdpr %tstate, %g3 | ||
217 | andcc %g3, TSTATE_PRIV, %g0 | ||
218 | bne,pn %xcc, do_fptrap_after_fsr | ||
219 | nop | ||
220 | ldx [%g6 + TI_XFSR], %g3 | ||
221 | srlx %g3, 14, %g1 | ||
222 | and %g1, 7, %g1 | ||
223 | cmp %g1, 2 ! Unfinished FP-OP | ||
224 | bne,pn %xcc, do_fptrap_after_fsr | ||
225 | sethi %hi(1 << 23), %g1 ! Inexact | ||
226 | andcc %g3, %g1, %g0 | ||
227 | bne,pn %xcc, do_fptrap_after_fsr | ||
228 | rdpr %tpc, %g1 | ||
229 | lduwa [%g1] ASI_AIUP, %g3 ! This cannot ever fail | ||
230 | #define FITOS_MASK 0xc1f83fe0 | ||
231 | #define FITOS_COMPARE 0x81a01880 | ||
232 | sethi %hi(FITOS_MASK), %g1 | ||
233 | or %g1, %lo(FITOS_MASK), %g1 | ||
234 | and %g3, %g1, %g1 | ||
235 | sethi %hi(FITOS_COMPARE), %g2 | ||
236 | or %g2, %lo(FITOS_COMPARE), %g2 | ||
237 | cmp %g1, %g2 | ||
238 | bne,pn %xcc, do_fptrap_after_fsr | ||
239 | nop | ||
240 | std %f62, [%g6 + TI_FPREGS + (62 * 4)] | ||
241 | sethi %hi(fitos_table_1), %g1 | ||
242 | and %g3, 0x1f, %g2 | ||
243 | or %g1, %lo(fitos_table_1), %g1 | ||
244 | sllx %g2, 2, %g2 | ||
245 | jmpl %g1 + %g2, %g0 | ||
246 | ba,pt %xcc, fitos_emul_continue | ||
247 | |||
248 | fitos_table_1: | ||
249 | fitod %f0, %f62 | ||
250 | fitod %f1, %f62 | ||
251 | fitod %f2, %f62 | ||
252 | fitod %f3, %f62 | ||
253 | fitod %f4, %f62 | ||
254 | fitod %f5, %f62 | ||
255 | fitod %f6, %f62 | ||
256 | fitod %f7, %f62 | ||
257 | fitod %f8, %f62 | ||
258 | fitod %f9, %f62 | ||
259 | fitod %f10, %f62 | ||
260 | fitod %f11, %f62 | ||
261 | fitod %f12, %f62 | ||
262 | fitod %f13, %f62 | ||
263 | fitod %f14, %f62 | ||
264 | fitod %f15, %f62 | ||
265 | fitod %f16, %f62 | ||
266 | fitod %f17, %f62 | ||
267 | fitod %f18, %f62 | ||
268 | fitod %f19, %f62 | ||
269 | fitod %f20, %f62 | ||
270 | fitod %f21, %f62 | ||
271 | fitod %f22, %f62 | ||
272 | fitod %f23, %f62 | ||
273 | fitod %f24, %f62 | ||
274 | fitod %f25, %f62 | ||
275 | fitod %f26, %f62 | ||
276 | fitod %f27, %f62 | ||
277 | fitod %f28, %f62 | ||
278 | fitod %f29, %f62 | ||
279 | fitod %f30, %f62 | ||
280 | fitod %f31, %f62 | ||
281 | |||
282 | fitos_emul_continue: | ||
283 | sethi %hi(fitos_table_2), %g1 | ||
284 | srl %g3, 25, %g2 | ||
285 | or %g1, %lo(fitos_table_2), %g1 | ||
286 | and %g2, 0x1f, %g2 | ||
287 | sllx %g2, 2, %g2 | ||
288 | jmpl %g1 + %g2, %g0 | ||
289 | ba,pt %xcc, fitos_emul_fini | ||
290 | |||
291 | fitos_table_2: | ||
292 | fdtos %f62, %f0 | ||
293 | fdtos %f62, %f1 | ||
294 | fdtos %f62, %f2 | ||
295 | fdtos %f62, %f3 | ||
296 | fdtos %f62, %f4 | ||
297 | fdtos %f62, %f5 | ||
298 | fdtos %f62, %f6 | ||
299 | fdtos %f62, %f7 | ||
300 | fdtos %f62, %f8 | ||
301 | fdtos %f62, %f9 | ||
302 | fdtos %f62, %f10 | ||
303 | fdtos %f62, %f11 | ||
304 | fdtos %f62, %f12 | ||
305 | fdtos %f62, %f13 | ||
306 | fdtos %f62, %f14 | ||
307 | fdtos %f62, %f15 | ||
308 | fdtos %f62, %f16 | ||
309 | fdtos %f62, %f17 | ||
310 | fdtos %f62, %f18 | ||
311 | fdtos %f62, %f19 | ||
312 | fdtos %f62, %f20 | ||
313 | fdtos %f62, %f21 | ||
314 | fdtos %f62, %f22 | ||
315 | fdtos %f62, %f23 | ||
316 | fdtos %f62, %f24 | ||
317 | fdtos %f62, %f25 | ||
318 | fdtos %f62, %f26 | ||
319 | fdtos %f62, %f27 | ||
320 | fdtos %f62, %f28 | ||
321 | fdtos %f62, %f29 | ||
322 | fdtos %f62, %f30 | ||
323 | fdtos %f62, %f31 | ||
324 | |||
325 | fitos_emul_fini: | ||
326 | ldd [%g6 + TI_FPREGS + (62 * 4)], %f62 | ||
327 | done | ||
328 | .size do_fpother_check_fitos,.-do_fpother_check_fitos | ||
329 | |||
330 | .align 32 | ||
331 | .globl do_fptrap | ||
332 | .type do_fptrap,#function | ||
333 | do_fptrap: | ||
334 | TRAP_LOAD_THREAD_REG(%g6, %g1) | ||
335 | stx %fsr, [%g6 + TI_XFSR] | ||
336 | do_fptrap_after_fsr: | ||
337 | ldub [%g6 + TI_FPSAVED], %g3 | ||
338 | rd %fprs, %g1 | ||
339 | or %g3, %g1, %g3 | ||
340 | stb %g3, [%g6 + TI_FPSAVED] | ||
341 | rd %gsr, %g3 | ||
342 | stx %g3, [%g6 + TI_GSR] | ||
343 | mov SECONDARY_CONTEXT, %g3 | ||
344 | |||
345 | 661: ldxa [%g3] ASI_DMMU, %g5 | ||
346 | .section .sun4v_1insn_patch, "ax" | ||
347 | .word 661b | ||
348 | ldxa [%g3] ASI_MMU, %g5 | ||
349 | .previous | ||
350 | |||
351 | sethi %hi(sparc64_kern_sec_context), %g2 | ||
352 | ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 | ||
353 | |||
354 | 661: stxa %g2, [%g3] ASI_DMMU | ||
355 | .section .sun4v_1insn_patch, "ax" | ||
356 | .word 661b | ||
357 | stxa %g2, [%g3] ASI_MMU | ||
358 | .previous | ||
359 | |||
360 | membar #Sync | ||
361 | add %g6, TI_FPREGS, %g2 | ||
362 | andcc %g1, FPRS_DL, %g0 | ||
363 | be,pn %icc, 4f | ||
364 | mov 0x40, %g3 | ||
365 | stda %f0, [%g2] ASI_BLK_S | ||
366 | stda %f16, [%g2 + %g3] ASI_BLK_S | ||
367 | andcc %g1, FPRS_DU, %g0 | ||
368 | be,pn %icc, 5f | ||
369 | 4: add %g2, 128, %g2 | ||
370 | stda %f32, [%g2] ASI_BLK_S | ||
371 | stda %f48, [%g2 + %g3] ASI_BLK_S | ||
372 | 5: mov SECONDARY_CONTEXT, %g1 | ||
373 | membar #Sync | ||
374 | |||
375 | 661: stxa %g5, [%g1] ASI_DMMU | ||
376 | .section .sun4v_1insn_patch, "ax" | ||
377 | .word 661b | ||
378 | stxa %g5, [%g1] ASI_MMU | ||
379 | .previous | ||
380 | |||
381 | membar #Sync | ||
382 | ba,pt %xcc, etrap | ||
383 | wr %g0, 0, %fprs | ||
384 | .size do_fptrap,.-do_fptrap | ||
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c new file mode 100644 index 000000000000..d0218e73f982 --- /dev/null +++ b/arch/sparc/kernel/ftrace.c | |||
@@ -0,0 +1,76 @@ | |||
1 | #include <linux/spinlock.h> | ||
2 | #include <linux/hardirq.h> | ||
3 | #include <linux/ftrace.h> | ||
4 | #include <linux/percpu.h> | ||
5 | #include <linux/init.h> | ||
6 | #include <linux/list.h> | ||
7 | |||
8 | #include <asm/ftrace.h> | ||
9 | |||
10 | static const u32 ftrace_nop = 0x01000000; | ||
11 | |||
12 | unsigned char *ftrace_nop_replace(void) | ||
13 | { | ||
14 | return (char *)&ftrace_nop; | ||
15 | } | ||
16 | |||
17 | unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | ||
18 | { | ||
19 | static u32 call; | ||
20 | s32 off; | ||
21 | |||
22 | off = ((s32)addr - (s32)ip); | ||
23 | call = 0x40000000 | ((u32)off >> 2); | ||
24 | |||
25 | return (unsigned char *) &call; | ||
26 | } | ||
27 | |||
28 | int | ||
29 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, | ||
30 | unsigned char *new_code) | ||
31 | { | ||
32 | u32 old = *(u32 *)old_code; | ||
33 | u32 new = *(u32 *)new_code; | ||
34 | u32 replaced; | ||
35 | int faulted; | ||
36 | |||
37 | __asm__ __volatile__( | ||
38 | "1: cas [%[ip]], %[old], %[new]\n" | ||
39 | " flush %[ip]\n" | ||
40 | " mov 0, %[faulted]\n" | ||
41 | "2:\n" | ||
42 | " .section .fixup,#alloc,#execinstr\n" | ||
43 | " .align 4\n" | ||
44 | "3: sethi %%hi(2b), %[faulted]\n" | ||
45 | " jmpl %[faulted] + %%lo(2b), %%g0\n" | ||
46 | " mov 1, %[faulted]\n" | ||
47 | " .previous\n" | ||
48 | " .section __ex_table,\"a\"\n" | ||
49 | " .align 4\n" | ||
50 | " .word 1b, 3b\n" | ||
51 | " .previous\n" | ||
52 | : "=r" (replaced), [faulted] "=r" (faulted) | ||
53 | : [new] "0" (new), [old] "r" (old), [ip] "r" (ip) | ||
54 | : "memory"); | ||
55 | |||
56 | if (replaced != old && replaced != new) | ||
57 | faulted = 2; | ||
58 | |||
59 | return faulted; | ||
60 | } | ||
61 | |||
62 | int ftrace_update_ftrace_func(ftrace_func_t func) | ||
63 | { | ||
64 | unsigned long ip = (unsigned long)(&ftrace_call); | ||
65 | unsigned char old[MCOUNT_INSN_SIZE], *new; | ||
66 | |||
67 | memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); | ||
68 | new = ftrace_call_replace(ip, (unsigned long)func); | ||
69 | return ftrace_modify_code(ip, old, new); | ||
70 | } | ||
71 | |||
72 | int __init ftrace_dyn_arch_init(void *data) | ||
73 | { | ||
74 | ftrace_mcount_set(data); | ||
75 | return 0; | ||
76 | } | ||
diff --git a/arch/sparc/kernel/getsetcc.S b/arch/sparc/kernel/getsetcc.S new file mode 100644 index 000000000000..a14d272d2061 --- /dev/null +++ b/arch/sparc/kernel/getsetcc.S | |||
@@ -0,0 +1,24 @@ | |||
1 | .globl getcc | ||
2 | .type getcc,#function | ||
3 | getcc: | ||
4 | ldx [%o0 + PT_V9_TSTATE], %o1 | ||
5 | srlx %o1, 32, %o1 | ||
6 | and %o1, 0xf, %o1 | ||
7 | retl | ||
8 | stx %o1, [%o0 + PT_V9_G1] | ||
9 | .size getcc,.-getcc | ||
10 | |||
11 | .globl setcc | ||
12 | .type setcc,#function | ||
13 | setcc: | ||
14 | ldx [%o0 + PT_V9_TSTATE], %o1 | ||
15 | ldx [%o0 + PT_V9_G1], %o2 | ||
16 | or %g0, %ulo(TSTATE_ICC), %o3 | ||
17 | sllx %o3, 32, %o3 | ||
18 | andn %o1, %o3, %o1 | ||
19 | sllx %o2, 32, %o2 | ||
20 | and %o2, %o3, %o2 | ||
21 | or %o1, %o2, %o1 | ||
22 | retl | ||
23 | stx %o1, [%o0 + PT_V9_TSTATE] | ||
24 | .size setcc,.-setcc | ||
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S new file mode 100644 index 000000000000..8ffee714f932 --- /dev/null +++ b/arch/sparc/kernel/head_64.S | |||
@@ -0,0 +1,900 @@ | |||
1 | /* head.S: Initial boot code for the Sparc64 port of Linux. | ||
2 | * | ||
3 | * Copyright (C) 1996, 1997, 2007 David S. Miller (davem@davemloft.net) | ||
4 | * Copyright (C) 1996 David Sitsky (David.Sitsky@anu.edu.au) | ||
5 | * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
6 | * Copyright (C) 1997 Miguel de Icaza (miguel@nuclecu.unam.mx) | ||
7 | */ | ||
8 | |||
9 | #include <linux/version.h> | ||
10 | #include <linux/errno.h> | ||
11 | #include <linux/threads.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/linkage.h> | ||
14 | #include <asm/thread_info.h> | ||
15 | #include <asm/asi.h> | ||
16 | #include <asm/pstate.h> | ||
17 | #include <asm/ptrace.h> | ||
18 | #include <asm/spitfire.h> | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/pgtable.h> | ||
21 | #include <asm/errno.h> | ||
22 | #include <asm/signal.h> | ||
23 | #include <asm/processor.h> | ||
24 | #include <asm/lsu.h> | ||
25 | #include <asm/dcr.h> | ||
26 | #include <asm/dcu.h> | ||
27 | #include <asm/head.h> | ||
28 | #include <asm/ttable.h> | ||
29 | #include <asm/mmu.h> | ||
30 | #include <asm/cpudata.h> | ||
31 | #include <asm/pil.h> | ||
32 | #include <asm/estate.h> | ||
33 | #include <asm/sfafsr.h> | ||
34 | #include <asm/unistd.h> | ||
35 | |||
36 | /* This section from from _start to sparc64_boot_end should fit into | ||
37 | * 0x0000000000404000 to 0x0000000000408000. | ||
38 | */ | ||
39 | .text | ||
40 | .globl start, _start, stext, _stext | ||
41 | _start: | ||
42 | start: | ||
43 | _stext: | ||
44 | stext: | ||
45 | ! 0x0000000000404000 | ||
46 | b sparc64_boot | ||
47 | flushw /* Flush register file. */ | ||
48 | |||
49 | /* This stuff has to be in sync with SILO and other potential boot loaders | ||
50 | * Fields should be kept upward compatible and whenever any change is made, | ||
51 | * HdrS version should be incremented. | ||
52 | */ | ||
53 | .global root_flags, ram_flags, root_dev | ||
54 | .global sparc_ramdisk_image, sparc_ramdisk_size | ||
55 | .global sparc_ramdisk_image64 | ||
56 | |||
57 | .ascii "HdrS" | ||
58 | .word LINUX_VERSION_CODE | ||
59 | |||
60 | /* History: | ||
61 | * | ||
62 | * 0x0300 : Supports being located at other than 0x4000 | ||
63 | * 0x0202 : Supports kernel params string | ||
64 | * 0x0201 : Supports reboot_command | ||
65 | */ | ||
66 | .half 0x0301 /* HdrS version */ | ||
67 | |||
68 | root_flags: | ||
69 | .half 1 | ||
70 | root_dev: | ||
71 | .half 0 | ||
72 | ram_flags: | ||
73 | .half 0 | ||
74 | sparc_ramdisk_image: | ||
75 | .word 0 | ||
76 | sparc_ramdisk_size: | ||
77 | .word 0 | ||
78 | .xword reboot_command | ||
79 | .xword bootstr_info | ||
80 | sparc_ramdisk_image64: | ||
81 | .xword 0 | ||
82 | .word _end | ||
83 | |||
84 | /* PROM cif handler code address is in %o4. */ | ||
85 | sparc64_boot: | ||
86 | mov %o4, %l7 | ||
87 | |||
88 | /* We need to remap the kernel. Use position independant | ||
89 | * code to remap us to KERNBASE. | ||
90 | * | ||
91 | * SILO can invoke us with 32-bit address masking enabled, | ||
92 | * so make sure that's clear. | ||
93 | */ | ||
94 | rdpr %pstate, %g1 | ||
95 | andn %g1, PSTATE_AM, %g1 | ||
96 | wrpr %g1, 0x0, %pstate | ||
97 | ba,a,pt %xcc, 1f | ||
98 | |||
99 | .globl prom_finddev_name, prom_chosen_path, prom_root_node | ||
100 | .globl prom_getprop_name, prom_mmu_name, prom_peer_name | ||
101 | .globl prom_callmethod_name, prom_translate_name, prom_root_compatible | ||
102 | .globl prom_map_name, prom_unmap_name, prom_mmu_ihandle_cache | ||
103 | .globl prom_boot_mapped_pc, prom_boot_mapping_mode | ||
104 | .globl prom_boot_mapping_phys_high, prom_boot_mapping_phys_low | ||
105 | .globl prom_compatible_name, prom_cpu_path, prom_cpu_compatible | ||
106 | .globl is_sun4v, sun4v_chip_type, prom_set_trap_table_name | ||
107 | prom_peer_name: | ||
108 | .asciz "peer" | ||
109 | prom_compatible_name: | ||
110 | .asciz "compatible" | ||
111 | prom_finddev_name: | ||
112 | .asciz "finddevice" | ||
113 | prom_chosen_path: | ||
114 | .asciz "/chosen" | ||
115 | prom_cpu_path: | ||
116 | .asciz "/cpu" | ||
117 | prom_getprop_name: | ||
118 | .asciz "getprop" | ||
119 | prom_mmu_name: | ||
120 | .asciz "mmu" | ||
121 | prom_callmethod_name: | ||
122 | .asciz "call-method" | ||
123 | prom_translate_name: | ||
124 | .asciz "translate" | ||
125 | prom_map_name: | ||
126 | .asciz "map" | ||
127 | prom_unmap_name: | ||
128 | .asciz "unmap" | ||
129 | prom_set_trap_table_name: | ||
130 | .asciz "SUNW,set-trap-table" | ||
131 | prom_sun4v_name: | ||
132 | .asciz "sun4v" | ||
133 | prom_niagara_prefix: | ||
134 | .asciz "SUNW,UltraSPARC-T" | ||
135 | .align 4 | ||
136 | prom_root_compatible: | ||
137 | .skip 64 | ||
138 | prom_cpu_compatible: | ||
139 | .skip 64 | ||
140 | prom_root_node: | ||
141 | .word 0 | ||
142 | prom_mmu_ihandle_cache: | ||
143 | .word 0 | ||
144 | prom_boot_mapped_pc: | ||
145 | .word 0 | ||
146 | prom_boot_mapping_mode: | ||
147 | .word 0 | ||
148 | .align 8 | ||
149 | prom_boot_mapping_phys_high: | ||
150 | .xword 0 | ||
151 | prom_boot_mapping_phys_low: | ||
152 | .xword 0 | ||
153 | is_sun4v: | ||
154 | .word 0 | ||
155 | sun4v_chip_type: | ||
156 | .word SUN4V_CHIP_INVALID | ||
157 | 1: | ||
158 | rd %pc, %l0 | ||
159 | |||
160 | mov (1b - prom_peer_name), %l1 | ||
161 | sub %l0, %l1, %l1 | ||
162 | mov 0, %l2 | ||
163 | |||
164 | /* prom_root_node = prom_peer(0) */ | ||
165 | stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "peer" | ||
166 | mov 1, %l3 | ||
167 | stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1 | ||
168 | stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 | ||
169 | stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, 0 | ||
170 | stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1 | ||
171 | call %l7 | ||
172 | add %sp, (2047 + 128), %o0 ! argument array | ||
173 | |||
174 | ldx [%sp + 2047 + 128 + 0x20], %l4 ! prom root node | ||
175 | mov (1b - prom_root_node), %l1 | ||
176 | sub %l0, %l1, %l1 | ||
177 | stw %l4, [%l1] | ||
178 | |||
179 | mov (1b - prom_getprop_name), %l1 | ||
180 | mov (1b - prom_compatible_name), %l2 | ||
181 | mov (1b - prom_root_compatible), %l5 | ||
182 | sub %l0, %l1, %l1 | ||
183 | sub %l0, %l2, %l2 | ||
184 | sub %l0, %l5, %l5 | ||
185 | |||
186 | /* prom_getproperty(prom_root_node, "compatible", | ||
187 | * &prom_root_compatible, 64) | ||
188 | */ | ||
189 | stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop" | ||
190 | mov 4, %l3 | ||
191 | stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4 | ||
192 | mov 1, %l3 | ||
193 | stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 | ||
194 | stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, prom_root_node | ||
195 | stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "compatible" | ||
196 | stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_root_compatible | ||
197 | mov 64, %l3 | ||
198 | stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, size | ||
199 | stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1 | ||
200 | call %l7 | ||
201 | add %sp, (2047 + 128), %o0 ! argument array | ||
202 | |||
203 | mov (1b - prom_finddev_name), %l1 | ||
204 | mov (1b - prom_chosen_path), %l2 | ||
205 | mov (1b - prom_boot_mapped_pc), %l3 | ||
206 | sub %l0, %l1, %l1 | ||
207 | sub %l0, %l2, %l2 | ||
208 | sub %l0, %l3, %l3 | ||
209 | stw %l0, [%l3] | ||
210 | sub %sp, (192 + 128), %sp | ||
211 | |||
212 | /* chosen_node = prom_finddevice("/chosen") */ | ||
213 | stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "finddevice" | ||
214 | mov 1, %l3 | ||
215 | stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1 | ||
216 | stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 | ||
217 | stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, "/chosen" | ||
218 | stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1 | ||
219 | call %l7 | ||
220 | add %sp, (2047 + 128), %o0 ! argument array | ||
221 | |||
222 | ldx [%sp + 2047 + 128 + 0x20], %l4 ! chosen device node | ||
223 | |||
224 | mov (1b - prom_getprop_name), %l1 | ||
225 | mov (1b - prom_mmu_name), %l2 | ||
226 | mov (1b - prom_mmu_ihandle_cache), %l5 | ||
227 | sub %l0, %l1, %l1 | ||
228 | sub %l0, %l2, %l2 | ||
229 | sub %l0, %l5, %l5 | ||
230 | |||
231 | /* prom_mmu_ihandle_cache = prom_getint(chosen_node, "mmu") */ | ||
232 | stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop" | ||
233 | mov 4, %l3 | ||
234 | stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4 | ||
235 | mov 1, %l3 | ||
236 | stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 | ||
237 | stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, chosen_node | ||
238 | stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "mmu" | ||
239 | stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_mmu_ihandle_cache | ||
240 | mov 4, %l3 | ||
241 | stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, sizeof(arg3) | ||
242 | stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1 | ||
243 | call %l7 | ||
244 | add %sp, (2047 + 128), %o0 ! argument array | ||
245 | |||
246 | mov (1b - prom_callmethod_name), %l1 | ||
247 | mov (1b - prom_translate_name), %l2 | ||
248 | sub %l0, %l1, %l1 | ||
249 | sub %l0, %l2, %l2 | ||
250 | lduw [%l5], %l5 ! prom_mmu_ihandle_cache | ||
251 | |||
252 | stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "call-method" | ||
253 | mov 3, %l3 | ||
254 | stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 3 | ||
255 | mov 5, %l3 | ||
256 | stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 5 | ||
257 | stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1: "translate" | ||
258 | stx %l5, [%sp + 2047 + 128 + 0x20] ! arg2: prom_mmu_ihandle_cache | ||
259 | /* PAGE align */ | ||
260 | srlx %l0, 13, %l3 | ||
261 | sllx %l3, 13, %l3 | ||
262 | stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: vaddr, our PC | ||
263 | stx %g0, [%sp + 2047 + 128 + 0x30] ! res1 | ||
264 | stx %g0, [%sp + 2047 + 128 + 0x38] ! res2 | ||
265 | stx %g0, [%sp + 2047 + 128 + 0x40] ! res3 | ||
266 | stx %g0, [%sp + 2047 + 128 + 0x48] ! res4 | ||
267 | stx %g0, [%sp + 2047 + 128 + 0x50] ! res5 | ||
268 | call %l7 | ||
269 | add %sp, (2047 + 128), %o0 ! argument array | ||
270 | |||
271 | ldx [%sp + 2047 + 128 + 0x40], %l1 ! translation mode | ||
272 | mov (1b - prom_boot_mapping_mode), %l4 | ||
273 | sub %l0, %l4, %l4 | ||
274 | stw %l1, [%l4] | ||
275 | mov (1b - prom_boot_mapping_phys_high), %l4 | ||
276 | sub %l0, %l4, %l4 | ||
277 | ldx [%sp + 2047 + 128 + 0x48], %l2 ! physaddr high | ||
278 | stx %l2, [%l4 + 0x0] | ||
279 | ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low | ||
280 | /* 4MB align */ | ||
281 | srlx %l3, 22, %l3 | ||
282 | sllx %l3, 22, %l3 | ||
283 | stx %l3, [%l4 + 0x8] | ||
284 | |||
285 | /* Leave service as-is, "call-method" */ | ||
286 | mov 7, %l3 | ||
287 | stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 7 | ||
288 | mov 1, %l3 | ||
289 | stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 | ||
290 | mov (1b - prom_map_name), %l3 | ||
291 | sub %l0, %l3, %l3 | ||
292 | stx %l3, [%sp + 2047 + 128 + 0x18] ! arg1: "map" | ||
293 | /* Leave arg2 as-is, prom_mmu_ihandle_cache */ | ||
294 | mov -1, %l3 | ||
295 | stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: mode (-1 default) | ||
296 | /* 4MB align the kernel image size. */ | ||
297 | set (_end - KERNBASE), %l3 | ||
298 | set ((4 * 1024 * 1024) - 1), %l4 | ||
299 | add %l3, %l4, %l3 | ||
300 | andn %l3, %l4, %l3 | ||
301 | stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: roundup(ksize, 4MB) | ||
302 | sethi %hi(KERNBASE), %l3 | ||
303 | stx %l3, [%sp + 2047 + 128 + 0x38] ! arg5: vaddr (KERNBASE) | ||
304 | stx %g0, [%sp + 2047 + 128 + 0x40] ! arg6: empty | ||
305 | mov (1b - prom_boot_mapping_phys_low), %l3 | ||
306 | sub %l0, %l3, %l3 | ||
307 | ldx [%l3], %l3 | ||
308 | stx %l3, [%sp + 2047 + 128 + 0x48] ! arg7: phys addr | ||
309 | call %l7 | ||
310 | add %sp, (2047 + 128), %o0 ! argument array | ||
311 | |||
312 | add %sp, (192 + 128), %sp | ||
313 | |||
314 | sethi %hi(prom_root_compatible), %g1 | ||
315 | or %g1, %lo(prom_root_compatible), %g1 | ||
316 | sethi %hi(prom_sun4v_name), %g7 | ||
317 | or %g7, %lo(prom_sun4v_name), %g7 | ||
318 | mov 5, %g3 | ||
319 | 90: ldub [%g7], %g2 | ||
320 | ldub [%g1], %g4 | ||
321 | cmp %g2, %g4 | ||
322 | bne,pn %icc, 80f | ||
323 | add %g7, 1, %g7 | ||
324 | subcc %g3, 1, %g3 | ||
325 | bne,pt %xcc, 90b | ||
326 | add %g1, 1, %g1 | ||
327 | |||
328 | sethi %hi(is_sun4v), %g1 | ||
329 | or %g1, %lo(is_sun4v), %g1 | ||
330 | mov 1, %g7 | ||
331 | stw %g7, [%g1] | ||
332 | |||
333 | /* cpu_node = prom_finddevice("/cpu") */ | ||
334 | mov (1b - prom_finddev_name), %l1 | ||
335 | mov (1b - prom_cpu_path), %l2 | ||
336 | sub %l0, %l1, %l1 | ||
337 | sub %l0, %l2, %l2 | ||
338 | sub %sp, (192 + 128), %sp | ||
339 | |||
340 | stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "finddevice" | ||
341 | mov 1, %l3 | ||
342 | stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1 | ||
343 | stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 | ||
344 | stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, "/cpu" | ||
345 | stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1 | ||
346 | call %l7 | ||
347 | add %sp, (2047 + 128), %o0 ! argument array | ||
348 | |||
349 | ldx [%sp + 2047 + 128 + 0x20], %l4 ! cpu device node | ||
350 | |||
351 | mov (1b - prom_getprop_name), %l1 | ||
352 | mov (1b - prom_compatible_name), %l2 | ||
353 | mov (1b - prom_cpu_compatible), %l5 | ||
354 | sub %l0, %l1, %l1 | ||
355 | sub %l0, %l2, %l2 | ||
356 | sub %l0, %l5, %l5 | ||
357 | |||
358 | /* prom_getproperty(cpu_node, "compatible", | ||
359 | * &prom_cpu_compatible, 64) | ||
360 | */ | ||
361 | stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop" | ||
362 | mov 4, %l3 | ||
363 | stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4 | ||
364 | mov 1, %l3 | ||
365 | stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 | ||
366 | stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, cpu_node | ||
367 | stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "compatible" | ||
368 | stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_cpu_compatible | ||
369 | mov 64, %l3 | ||
370 | stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, size | ||
371 | stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1 | ||
372 | call %l7 | ||
373 | add %sp, (2047 + 128), %o0 ! argument array | ||
374 | |||
375 | add %sp, (192 + 128), %sp | ||
376 | |||
377 | sethi %hi(prom_cpu_compatible), %g1 | ||
378 | or %g1, %lo(prom_cpu_compatible), %g1 | ||
379 | sethi %hi(prom_niagara_prefix), %g7 | ||
380 | or %g7, %lo(prom_niagara_prefix), %g7 | ||
381 | mov 17, %g3 | ||
382 | 90: ldub [%g7], %g2 | ||
383 | ldub [%g1], %g4 | ||
384 | cmp %g2, %g4 | ||
385 | bne,pn %icc, 4f | ||
386 | add %g7, 1, %g7 | ||
387 | subcc %g3, 1, %g3 | ||
388 | bne,pt %xcc, 90b | ||
389 | add %g1, 1, %g1 | ||
390 | |||
391 | sethi %hi(prom_cpu_compatible), %g1 | ||
392 | or %g1, %lo(prom_cpu_compatible), %g1 | ||
393 | ldub [%g1 + 17], %g2 | ||
394 | cmp %g2, '1' | ||
395 | be,pt %xcc, 5f | ||
396 | mov SUN4V_CHIP_NIAGARA1, %g4 | ||
397 | cmp %g2, '2' | ||
398 | be,pt %xcc, 5f | ||
399 | mov SUN4V_CHIP_NIAGARA2, %g4 | ||
400 | 4: | ||
401 | mov SUN4V_CHIP_UNKNOWN, %g4 | ||
402 | 5: sethi %hi(sun4v_chip_type), %g2 | ||
403 | or %g2, %lo(sun4v_chip_type), %g2 | ||
404 | stw %g4, [%g2] | ||
405 | |||
406 | 80: | ||
407 | BRANCH_IF_SUN4V(g1, jump_to_sun4u_init) | ||
408 | BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot) | ||
409 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot) | ||
410 | ba,pt %xcc, spitfire_boot | ||
411 | nop | ||
412 | |||
413 | cheetah_plus_boot: | ||
414 | /* Preserve OBP chosen DCU and DCR register settings. */ | ||
415 | ba,pt %xcc, cheetah_generic_boot | ||
416 | nop | ||
417 | |||
418 | cheetah_boot: | ||
419 | mov DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1 | ||
420 | wr %g1, %asr18 | ||
421 | |||
422 | sethi %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g7 | ||
423 | or %g7, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g7 | ||
424 | sllx %g7, 32, %g7 | ||
425 | or %g7, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g7 | ||
426 | stxa %g7, [%g0] ASI_DCU_CONTROL_REG | ||
427 | membar #Sync | ||
428 | |||
429 | cheetah_generic_boot: | ||
430 | mov TSB_EXTENSION_P, %g3 | ||
431 | stxa %g0, [%g3] ASI_DMMU | ||
432 | stxa %g0, [%g3] ASI_IMMU | ||
433 | membar #Sync | ||
434 | |||
435 | mov TSB_EXTENSION_S, %g3 | ||
436 | stxa %g0, [%g3] ASI_DMMU | ||
437 | membar #Sync | ||
438 | |||
439 | mov TSB_EXTENSION_N, %g3 | ||
440 | stxa %g0, [%g3] ASI_DMMU | ||
441 | stxa %g0, [%g3] ASI_IMMU | ||
442 | membar #Sync | ||
443 | |||
444 | ba,a,pt %xcc, jump_to_sun4u_init | ||
445 | |||
446 | spitfire_boot: | ||
447 | /* Typically PROM has already enabled both MMU's and both on-chip | ||
448 | * caches, but we do it here anyway just to be paranoid. | ||
449 | */ | ||
450 | mov (LSU_CONTROL_IC|LSU_CONTROL_DC|LSU_CONTROL_IM|LSU_CONTROL_DM), %g1 | ||
451 | stxa %g1, [%g0] ASI_LSU_CONTROL | ||
452 | membar #Sync | ||
453 | |||
454 | jump_to_sun4u_init: | ||
455 | /* | ||
456 | * Make sure we are in privileged mode, have address masking, | ||
457 | * using the ordinary globals and have enabled floating | ||
458 | * point. | ||
459 | * | ||
460 | * Again, typically PROM has left %pil at 13 or similar, and | ||
461 | * (PSTATE_PRIV | PSTATE_PEF | PSTATE_IE) in %pstate. | ||
462 | */ | ||
463 | wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate | ||
464 | wr %g0, 0, %fprs | ||
465 | |||
466 | set sun4u_init, %g2 | ||
467 | jmpl %g2 + %g0, %g0 | ||
468 | nop | ||
469 | |||
470 | .section .text.init.refok | ||
471 | sun4u_init: | ||
472 | BRANCH_IF_SUN4V(g1, sun4v_init) | ||
473 | |||
474 | /* Set ctx 0 */ | ||
475 | mov PRIMARY_CONTEXT, %g7 | ||
476 | stxa %g0, [%g7] ASI_DMMU | ||
477 | membar #Sync | ||
478 | |||
479 | mov SECONDARY_CONTEXT, %g7 | ||
480 | stxa %g0, [%g7] ASI_DMMU | ||
481 | membar #Sync | ||
482 | |||
483 | ba,pt %xcc, sun4u_continue | ||
484 | nop | ||
485 | |||
486 | sun4v_init: | ||
487 | /* Set ctx 0 */ | ||
488 | mov PRIMARY_CONTEXT, %g7 | ||
489 | stxa %g0, [%g7] ASI_MMU | ||
490 | membar #Sync | ||
491 | |||
492 | mov SECONDARY_CONTEXT, %g7 | ||
493 | stxa %g0, [%g7] ASI_MMU | ||
494 | membar #Sync | ||
495 | ba,pt %xcc, niagara_tlb_fixup | ||
496 | nop | ||
497 | |||
498 | sun4u_continue: | ||
499 | BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup) | ||
500 | |||
501 | ba,pt %xcc, spitfire_tlb_fixup | ||
502 | nop | ||
503 | |||
504 | niagara_tlb_fixup: | ||
505 | mov 3, %g2 /* Set TLB type to hypervisor. */ | ||
506 | sethi %hi(tlb_type), %g1 | ||
507 | stw %g2, [%g1 + %lo(tlb_type)] | ||
508 | |||
509 | /* Patch copy/clear ops. */ | ||
510 | sethi %hi(sun4v_chip_type), %g1 | ||
511 | lduw [%g1 + %lo(sun4v_chip_type)], %g1 | ||
512 | cmp %g1, SUN4V_CHIP_NIAGARA1 | ||
513 | be,pt %xcc, niagara_patch | ||
514 | cmp %g1, SUN4V_CHIP_NIAGARA2 | ||
515 | be,pt %xcc, niagara2_patch | ||
516 | nop | ||
517 | |||
518 | call generic_patch_copyops | ||
519 | nop | ||
520 | call generic_patch_bzero | ||
521 | nop | ||
522 | call generic_patch_pageops | ||
523 | nop | ||
524 | |||
525 | ba,a,pt %xcc, 80f | ||
526 | niagara2_patch: | ||
527 | call niagara2_patch_copyops | ||
528 | nop | ||
529 | call niagara_patch_bzero | ||
530 | nop | ||
531 | call niagara2_patch_pageops | ||
532 | nop | ||
533 | |||
534 | ba,a,pt %xcc, 80f | ||
535 | |||
536 | niagara_patch: | ||
537 | call niagara_patch_copyops | ||
538 | nop | ||
539 | call niagara_patch_bzero | ||
540 | nop | ||
541 | call niagara_patch_pageops | ||
542 | nop | ||
543 | |||
544 | 80: | ||
545 | /* Patch TLB/cache ops. */ | ||
546 | call hypervisor_patch_cachetlbops | ||
547 | nop | ||
548 | |||
549 | ba,pt %xcc, tlb_fixup_done | ||
550 | nop | ||
551 | |||
552 | cheetah_tlb_fixup: | ||
553 | mov 2, %g2 /* Set TLB type to cheetah+. */ | ||
554 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f) | ||
555 | |||
556 | mov 1, %g2 /* Set TLB type to cheetah. */ | ||
557 | |||
558 | 1: sethi %hi(tlb_type), %g1 | ||
559 | stw %g2, [%g1 + %lo(tlb_type)] | ||
560 | |||
561 | /* Patch copy/page operations to cheetah optimized versions. */ | ||
562 | call cheetah_patch_copyops | ||
563 | nop | ||
564 | call cheetah_patch_copy_page | ||
565 | nop | ||
566 | call cheetah_patch_cachetlbops | ||
567 | nop | ||
568 | |||
569 | ba,pt %xcc, tlb_fixup_done | ||
570 | nop | ||
571 | |||
572 | spitfire_tlb_fixup: | ||
573 | /* Set TLB type to spitfire. */ | ||
574 | mov 0, %g2 | ||
575 | sethi %hi(tlb_type), %g1 | ||
576 | stw %g2, [%g1 + %lo(tlb_type)] | ||
577 | |||
578 | tlb_fixup_done: | ||
579 | sethi %hi(init_thread_union), %g6 | ||
580 | or %g6, %lo(init_thread_union), %g6 | ||
581 | ldx [%g6 + TI_TASK], %g4 | ||
582 | mov %sp, %l6 | ||
583 | |||
584 | wr %g0, ASI_P, %asi | ||
585 | mov 1, %g1 | ||
586 | sllx %g1, THREAD_SHIFT, %g1 | ||
587 | sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1 | ||
588 | add %g6, %g1, %sp | ||
589 | mov 0, %fp | ||
590 | |||
591 | /* Set per-cpu pointer initially to zero, this makes | ||
592 | * the boot-cpu use the in-kernel-image per-cpu areas | ||
593 | * before setup_per_cpu_area() is invoked. | ||
594 | */ | ||
595 | clr %g5 | ||
596 | |||
597 | wrpr %g0, 0, %wstate | ||
598 | wrpr %g0, 0x0, %tl | ||
599 | |||
600 | /* Clear the bss */ | ||
601 | sethi %hi(__bss_start), %o0 | ||
602 | or %o0, %lo(__bss_start), %o0 | ||
603 | sethi %hi(_end), %o1 | ||
604 | or %o1, %lo(_end), %o1 | ||
605 | call __bzero | ||
606 | sub %o1, %o0, %o1 | ||
607 | |||
608 | #ifdef CONFIG_LOCKDEP | ||
609 | /* We have this call this super early, as even prom_init can grab | ||
610 | * spinlocks and thus call into the lockdep code. | ||
611 | */ | ||
612 | call lockdep_init | ||
613 | nop | ||
614 | #endif | ||
615 | |||
616 | mov %l6, %o1 ! OpenPROM stack | ||
617 | call prom_init | ||
618 | mov %l7, %o0 ! OpenPROM cif handler | ||
619 | |||
620 | /* Initialize current_thread_info()->cpu as early as possible. | ||
621 | * In order to do that accurately we have to patch up the get_cpuid() | ||
622 | * assembler sequences. And that, in turn, requires that we know | ||
623 | * if we are on a Starfire box or not. While we're here, patch up | ||
624 | * the sun4v sequences as well. | ||
625 | */ | ||
626 | call check_if_starfire | ||
627 | nop | ||
628 | call per_cpu_patch | ||
629 | nop | ||
630 | call sun4v_patch | ||
631 | nop | ||
632 | |||
633 | #ifdef CONFIG_SMP | ||
634 | call hard_smp_processor_id | ||
635 | nop | ||
636 | cmp %o0, NR_CPUS | ||
637 | blu,pt %xcc, 1f | ||
638 | nop | ||
639 | call boot_cpu_id_too_large | ||
640 | nop | ||
641 | /* Not reached... */ | ||
642 | |||
643 | 1: | ||
644 | /* If we boot on a non-zero cpu, all of the per-cpu | ||
645 | * variable references we make before setting up the | ||
646 | * per-cpu areas will use a bogus offset. Put a | ||
647 | * compensating factor into __per_cpu_base to handle | ||
648 | * this cleanly. | ||
649 | * | ||
650 | * What the per-cpu code calculates is: | ||
651 | * | ||
652 | * __per_cpu_base + (cpu << __per_cpu_shift) | ||
653 | * | ||
654 | * These two variables are zero initially, so to | ||
655 | * make it all cancel out to zero we need to put | ||
656 | * "0 - (cpu << 0)" into __per_cpu_base so that the | ||
657 | * above formula evaluates to zero. | ||
658 | * | ||
659 | * We cannot even perform a printk() until this stuff | ||
660 | * is setup as that calls cpu_clock() which uses | ||
661 | * per-cpu variables. | ||
662 | */ | ||
663 | sub %g0, %o0, %o1 | ||
664 | sethi %hi(__per_cpu_base), %o2 | ||
665 | stx %o1, [%o2 + %lo(__per_cpu_base)] | ||
666 | #else | ||
667 | mov 0, %o0 | ||
668 | #endif | ||
669 | sth %o0, [%g6 + TI_CPU] | ||
670 | |||
671 | call prom_init_report | ||
672 | nop | ||
673 | |||
674 | /* Off we go.... */ | ||
675 | call start_kernel | ||
676 | nop | ||
677 | /* Not reached... */ | ||
678 | |||
679 | .previous | ||
680 | |||
681 | /* This is meant to allow the sharing of this code between | ||
682 | * boot processor invocation (via setup_tba() below) and | ||
683 | * secondary processor startup (via trampoline.S). The | ||
684 | * former does use this code, the latter does not yet due | ||
685 | * to some complexities. That should be fixed up at some | ||
686 | * point. | ||
687 | * | ||
688 | * There used to be enormous complexity wrt. transferring | ||
689 | * over from the firwmare's trap table to the Linux kernel's. | ||
690 | * For example, there was a chicken & egg problem wrt. building | ||
691 | * the OBP page tables, yet needing to be on the Linux kernel | ||
692 | * trap table (to translate PAGE_OFFSET addresses) in order to | ||
693 | * do that. | ||
694 | * | ||
695 | * We now handle OBP tlb misses differently, via linear lookups | ||
696 | * into the prom_trans[] array. So that specific problem no | ||
697 | * longer exists. Yet, unfortunately there are still some issues | ||
698 | * preventing trampoline.S from using this code... ho hum. | ||
699 | */ | ||
700 | .globl setup_trap_table | ||
701 | setup_trap_table: | ||
702 | save %sp, -192, %sp | ||
703 | |||
704 | /* Force interrupts to be disabled. */ | ||
705 | rdpr %pstate, %l0 | ||
706 | andn %l0, PSTATE_IE, %o1 | ||
707 | wrpr %o1, 0x0, %pstate | ||
708 | rdpr %pil, %l1 | ||
709 | wrpr %g0, PIL_NORMAL_MAX, %pil | ||
710 | |||
711 | /* Make the firmware call to jump over to the Linux trap table. */ | ||
712 | sethi %hi(is_sun4v), %o0 | ||
713 | lduw [%o0 + %lo(is_sun4v)], %o0 | ||
714 | brz,pt %o0, 1f | ||
715 | nop | ||
716 | |||
717 | TRAP_LOAD_TRAP_BLOCK(%g2, %g3) | ||
718 | add %g2, TRAP_PER_CPU_FAULT_INFO, %g2 | ||
719 | stxa %g2, [%g0] ASI_SCRATCHPAD | ||
720 | |||
721 | /* Compute physical address: | ||
722 | * | ||
723 | * paddr = kern_base + (mmfsa_vaddr - KERNBASE) | ||
724 | */ | ||
725 | sethi %hi(KERNBASE), %g3 | ||
726 | sub %g2, %g3, %g2 | ||
727 | sethi %hi(kern_base), %g3 | ||
728 | ldx [%g3 + %lo(kern_base)], %g3 | ||
729 | add %g2, %g3, %o1 | ||
730 | sethi %hi(sparc64_ttable_tl0), %o0 | ||
731 | |||
732 | set prom_set_trap_table_name, %g2 | ||
733 | stx %g2, [%sp + 2047 + 128 + 0x00] | ||
734 | mov 2, %g2 | ||
735 | stx %g2, [%sp + 2047 + 128 + 0x08] | ||
736 | mov 0, %g2 | ||
737 | stx %g2, [%sp + 2047 + 128 + 0x10] | ||
738 | stx %o0, [%sp + 2047 + 128 + 0x18] | ||
739 | stx %o1, [%sp + 2047 + 128 + 0x20] | ||
740 | sethi %hi(p1275buf), %g2 | ||
741 | or %g2, %lo(p1275buf), %g2 | ||
742 | ldx [%g2 + 0x08], %o1 | ||
743 | call %o1 | ||
744 | add %sp, (2047 + 128), %o0 | ||
745 | |||
746 | ba,pt %xcc, 2f | ||
747 | nop | ||
748 | |||
749 | 1: sethi %hi(sparc64_ttable_tl0), %o0 | ||
750 | set prom_set_trap_table_name, %g2 | ||
751 | stx %g2, [%sp + 2047 + 128 + 0x00] | ||
752 | mov 1, %g2 | ||
753 | stx %g2, [%sp + 2047 + 128 + 0x08] | ||
754 | mov 0, %g2 | ||
755 | stx %g2, [%sp + 2047 + 128 + 0x10] | ||
756 | stx %o0, [%sp + 2047 + 128 + 0x18] | ||
757 | sethi %hi(p1275buf), %g2 | ||
758 | or %g2, %lo(p1275buf), %g2 | ||
759 | ldx [%g2 + 0x08], %o1 | ||
760 | call %o1 | ||
761 | add %sp, (2047 + 128), %o0 | ||
762 | |||
763 | /* Start using proper page size encodings in ctx register. */ | ||
764 | 2: sethi %hi(sparc64_kern_pri_context), %g3 | ||
765 | ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 | ||
766 | |||
767 | mov PRIMARY_CONTEXT, %g1 | ||
768 | |||
769 | 661: stxa %g2, [%g1] ASI_DMMU | ||
770 | .section .sun4v_1insn_patch, "ax" | ||
771 | .word 661b | ||
772 | stxa %g2, [%g1] ASI_MMU | ||
773 | .previous | ||
774 | |||
775 | membar #Sync | ||
776 | |||
777 | BRANCH_IF_SUN4V(o2, 1f) | ||
778 | |||
779 | /* Kill PROM timer */ | ||
780 | sethi %hi(0x80000000), %o2 | ||
781 | sllx %o2, 32, %o2 | ||
782 | wr %o2, 0, %tick_cmpr | ||
783 | |||
784 | BRANCH_IF_ANY_CHEETAH(o2, o3, 1f) | ||
785 | |||
786 | ba,pt %xcc, 2f | ||
787 | nop | ||
788 | |||
789 | /* Disable STICK_INT interrupts. */ | ||
790 | 1: | ||
791 | sethi %hi(0x80000000), %o2 | ||
792 | sllx %o2, 32, %o2 | ||
793 | wr %o2, %asr25 | ||
794 | |||
795 | 2: | ||
796 | wrpr %g0, %g0, %wstate | ||
797 | |||
798 | call init_irqwork_curcpu | ||
799 | nop | ||
800 | |||
801 | /* Now we can restore interrupt state. */ | ||
802 | wrpr %l0, 0, %pstate | ||
803 | wrpr %l1, 0x0, %pil | ||
804 | |||
805 | ret | ||
806 | restore | ||
807 | |||
808 | .globl setup_tba | ||
809 | setup_tba: | ||
810 | save %sp, -192, %sp | ||
811 | |||
812 | /* The boot processor is the only cpu which invokes this | ||
813 | * routine, the other cpus set things up via trampoline.S. | ||
814 | * So save the OBP trap table address here. | ||
815 | */ | ||
816 | rdpr %tba, %g7 | ||
817 | sethi %hi(prom_tba), %o1 | ||
818 | or %o1, %lo(prom_tba), %o1 | ||
819 | stx %g7, [%o1] | ||
820 | |||
821 | call setup_trap_table | ||
822 | nop | ||
823 | |||
824 | ret | ||
825 | restore | ||
826 | sparc64_boot_end: | ||
827 | |||
828 | #include "etrap_64.S" | ||
829 | #include "rtrap_64.S" | ||
830 | #include "winfixup.S" | ||
831 | #include "fpu_traps.S" | ||
832 | #include "ivec.S" | ||
833 | #include "getsetcc.S" | ||
834 | #include "utrap.S" | ||
835 | #include "spiterrs.S" | ||
836 | #include "cherrs.S" | ||
837 | #include "misctrap.S" | ||
838 | #include "syscalls.S" | ||
839 | #include "helpers.S" | ||
840 | #include "hvcalls.S" | ||
841 | #include "sun4v_tlb_miss.S" | ||
842 | #include "sun4v_ivec.S" | ||
843 | #include "ktlb.S" | ||
844 | #include "tsb.S" | ||
845 | |||
846 | /* | ||
847 | * The following skip makes sure the trap table in ttable.S is aligned | ||
848 | * on a 32K boundary as required by the v9 specs for TBA register. | ||
849 | * | ||
850 | * We align to a 32K boundary, then we have the 32K kernel TSB, | ||
851 | * the 64K kernel 4MB TSB, and then the 32K aligned trap table. | ||
852 | */ | ||
853 | 1: | ||
854 | .skip 0x4000 + _start - 1b | ||
855 | |||
856 | ! 0x0000000000408000 | ||
857 | |||
858 | .globl swapper_tsb | ||
859 | swapper_tsb: | ||
860 | .skip (32 * 1024) | ||
861 | |||
862 | .globl swapper_4m_tsb | ||
863 | swapper_4m_tsb: | ||
864 | .skip (64 * 1024) | ||
865 | |||
866 | ! 0x0000000000420000 | ||
867 | |||
868 | /* Some care needs to be exercised if you try to move the | ||
869 | * location of the trap table relative to other things. For | ||
870 | * one thing there are br* instructions in some of the | ||
871 | * trap table entires which branch back to code in ktlb.S | ||
872 | * Those instructions can only handle a signed 16-bit | ||
873 | * displacement. | ||
874 | * | ||
875 | * There is a binutils bug (bugzilla #4558) which causes | ||
876 | * the relocation overflow checks for such instructions to | ||
877 | * not be done correctly. So bintuils will not notice the | ||
878 | * error and will instead write junk into the relocation and | ||
879 | * you'll have an unbootable kernel. | ||
880 | */ | ||
881 | #include "ttable.S" | ||
882 | |||
883 | ! 0x0000000000428000 | ||
884 | |||
885 | #include "systbls_64.S" | ||
886 | |||
887 | .data | ||
888 | .align 8 | ||
889 | .globl prom_tba, tlb_type | ||
890 | prom_tba: .xword 0 | ||
891 | tlb_type: .word 0 /* Must NOT end up in BSS */ | ||
892 | .section ".fixup",#alloc,#execinstr | ||
893 | |||
894 | .globl __ret_efault, __retl_efault | ||
895 | __ret_efault: | ||
896 | ret | ||
897 | restore %g0, -EFAULT, %o0 | ||
898 | __retl_efault: | ||
899 | retl | ||
900 | mov -EFAULT, %o0 | ||
diff --git a/arch/sparc/kernel/helpers.S b/arch/sparc/kernel/helpers.S new file mode 100644 index 000000000000..314dd0c9fc5b --- /dev/null +++ b/arch/sparc/kernel/helpers.S | |||
@@ -0,0 +1,63 @@ | |||
1 | .align 32 | ||
2 | .globl __flushw_user | ||
3 | .type __flushw_user,#function | ||
4 | __flushw_user: | ||
5 | rdpr %otherwin, %g1 | ||
6 | brz,pn %g1, 2f | ||
7 | clr %g2 | ||
8 | 1: save %sp, -128, %sp | ||
9 | rdpr %otherwin, %g1 | ||
10 | brnz,pt %g1, 1b | ||
11 | add %g2, 1, %g2 | ||
12 | 1: sub %g2, 1, %g2 | ||
13 | brnz,pt %g2, 1b | ||
14 | restore %g0, %g0, %g0 | ||
15 | 2: retl | ||
16 | nop | ||
17 | .size __flushw_user,.-__flushw_user | ||
18 | |||
19 | /* Flush %fp and %i7 to the stack for all register | ||
20 | * windows active inside of the cpu. This allows | ||
21 | * show_stack_trace() to avoid using an expensive | ||
22 | * 'flushw'. | ||
23 | */ | ||
24 | .globl stack_trace_flush | ||
25 | .type stack_trace_flush,#function | ||
26 | stack_trace_flush: | ||
27 | rdpr %pstate, %o0 | ||
28 | wrpr %o0, PSTATE_IE, %pstate | ||
29 | |||
30 | rdpr %cwp, %g1 | ||
31 | rdpr %canrestore, %g2 | ||
32 | sub %g1, 1, %g3 | ||
33 | |||
34 | 1: brz,pn %g2, 2f | ||
35 | sub %g2, 1, %g2 | ||
36 | wrpr %g3, %cwp | ||
37 | stx %fp, [%sp + STACK_BIAS + RW_V9_I6] | ||
38 | stx %i7, [%sp + STACK_BIAS + RW_V9_I7] | ||
39 | ba,pt %xcc, 1b | ||
40 | sub %g3, 1, %g3 | ||
41 | |||
42 | 2: wrpr %g1, %cwp | ||
43 | wrpr %o0, %pstate | ||
44 | |||
45 | retl | ||
46 | nop | ||
47 | .size stack_trace_flush,.-stack_trace_flush | ||
48 | |||
49 | #ifdef CONFIG_SMP | ||
50 | .globl hard_smp_processor_id | ||
51 | .type hard_smp_processor_id,#function | ||
52 | hard_smp_processor_id: | ||
53 | #endif | ||
54 | .globl real_hard_smp_processor_id | ||
55 | .type real_hard_smp_processor_id,#function | ||
56 | real_hard_smp_processor_id: | ||
57 | __GET_CPUID(%o0) | ||
58 | retl | ||
59 | nop | ||
60 | #ifdef CONFIG_SMP | ||
61 | .size hard_smp_processor_id,.-hard_smp_processor_id | ||
62 | #endif | ||
63 | .size real_hard_smp_processor_id,.-real_hard_smp_processor_id | ||
diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c new file mode 100644 index 000000000000..1d272c3b5740 --- /dev/null +++ b/arch/sparc/kernel/hvapi.c | |||
@@ -0,0 +1,193 @@ | |||
1 | /* hvapi.c: Hypervisor API management. | ||
2 | * | ||
3 | * Copyright (C) 2007 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/module.h> | ||
7 | #include <linux/init.h> | ||
8 | #include <linux/slab.h> | ||
9 | |||
10 | #include <asm/hypervisor.h> | ||
11 | #include <asm/oplib.h> | ||
12 | |||
13 | /* If the hypervisor indicates that the API setting | ||
14 | * calls are unsupported, by returning HV_EBADTRAP or | ||
15 | * HV_ENOTSUPPORTED, we assume that API groups with the | ||
16 | * PRE_API flag set are major 1 minor 0. | ||
17 | */ | ||
18 | struct api_info { | ||
19 | unsigned long group; | ||
20 | unsigned long major; | ||
21 | unsigned long minor; | ||
22 | unsigned int refcnt; | ||
23 | unsigned int flags; | ||
24 | #define FLAG_PRE_API 0x00000001 | ||
25 | }; | ||
26 | |||
27 | static struct api_info api_table[] = { | ||
28 | { .group = HV_GRP_SUN4V, .flags = FLAG_PRE_API }, | ||
29 | { .group = HV_GRP_CORE, .flags = FLAG_PRE_API }, | ||
30 | { .group = HV_GRP_INTR, }, | ||
31 | { .group = HV_GRP_SOFT_STATE, }, | ||
32 | { .group = HV_GRP_PCI, .flags = FLAG_PRE_API }, | ||
33 | { .group = HV_GRP_LDOM, }, | ||
34 | { .group = HV_GRP_SVC_CHAN, .flags = FLAG_PRE_API }, | ||
35 | { .group = HV_GRP_NCS, .flags = FLAG_PRE_API }, | ||
36 | { .group = HV_GRP_RNG, }, | ||
37 | { .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API }, | ||
38 | { .group = HV_GRP_FIRE_PERF, }, | ||
39 | { .group = HV_GRP_N2_CPU, }, | ||
40 | { .group = HV_GRP_NIU, }, | ||
41 | { .group = HV_GRP_VF_CPU, }, | ||
42 | { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API }, | ||
43 | }; | ||
44 | |||
45 | static DEFINE_SPINLOCK(hvapi_lock); | ||
46 | |||
47 | static struct api_info *__get_info(unsigned long group) | ||
48 | { | ||
49 | int i; | ||
50 | |||
51 | for (i = 0; i < ARRAY_SIZE(api_table); i++) { | ||
52 | if (api_table[i].group == group) | ||
53 | return &api_table[i]; | ||
54 | } | ||
55 | return NULL; | ||
56 | } | ||
57 | |||
58 | static void __get_ref(struct api_info *p) | ||
59 | { | ||
60 | p->refcnt++; | ||
61 | } | ||
62 | |||
63 | static void __put_ref(struct api_info *p) | ||
64 | { | ||
65 | if (--p->refcnt == 0) { | ||
66 | unsigned long ignore; | ||
67 | |||
68 | sun4v_set_version(p->group, 0, 0, &ignore); | ||
69 | p->major = p->minor = 0; | ||
70 | } | ||
71 | } | ||
72 | |||
73 | /* Register a hypervisor API specification. It indicates the | ||
74 | * API group and desired major+minor. | ||
75 | * | ||
76 | * If an existing API registration exists '0' (success) will | ||
77 | * be returned if it is compatible with the one being registered. | ||
78 | * Otherwise a negative error code will be returned. | ||
79 | * | ||
80 | * Otherwise an attempt will be made to negotiate the requested | ||
81 | * API group/major/minor with the hypervisor, and errors returned | ||
82 | * if that does not succeed. | ||
83 | */ | ||
84 | int sun4v_hvapi_register(unsigned long group, unsigned long major, | ||
85 | unsigned long *minor) | ||
86 | { | ||
87 | struct api_info *p; | ||
88 | unsigned long flags; | ||
89 | int ret; | ||
90 | |||
91 | spin_lock_irqsave(&hvapi_lock, flags); | ||
92 | p = __get_info(group); | ||
93 | ret = -EINVAL; | ||
94 | if (p) { | ||
95 | if (p->refcnt) { | ||
96 | ret = -EINVAL; | ||
97 | if (p->major == major) { | ||
98 | *minor = p->minor; | ||
99 | ret = 0; | ||
100 | } | ||
101 | } else { | ||
102 | unsigned long actual_minor; | ||
103 | unsigned long hv_ret; | ||
104 | |||
105 | hv_ret = sun4v_set_version(group, major, *minor, | ||
106 | &actual_minor); | ||
107 | ret = -EINVAL; | ||
108 | if (hv_ret == HV_EOK) { | ||
109 | *minor = actual_minor; | ||
110 | p->major = major; | ||
111 | p->minor = actual_minor; | ||
112 | ret = 0; | ||
113 | } else if (hv_ret == HV_EBADTRAP || | ||
114 | hv_ret == HV_ENOTSUPPORTED) { | ||
115 | if (p->flags & FLAG_PRE_API) { | ||
116 | if (major == 1) { | ||
117 | p->major = 1; | ||
118 | p->minor = 0; | ||
119 | *minor = 0; | ||
120 | ret = 0; | ||
121 | } | ||
122 | } | ||
123 | } | ||
124 | } | ||
125 | |||
126 | if (ret == 0) | ||
127 | __get_ref(p); | ||
128 | } | ||
129 | spin_unlock_irqrestore(&hvapi_lock, flags); | ||
130 | |||
131 | return ret; | ||
132 | } | ||
133 | EXPORT_SYMBOL(sun4v_hvapi_register); | ||
134 | |||
135 | void sun4v_hvapi_unregister(unsigned long group) | ||
136 | { | ||
137 | struct api_info *p; | ||
138 | unsigned long flags; | ||
139 | |||
140 | spin_lock_irqsave(&hvapi_lock, flags); | ||
141 | p = __get_info(group); | ||
142 | if (p) | ||
143 | __put_ref(p); | ||
144 | spin_unlock_irqrestore(&hvapi_lock, flags); | ||
145 | } | ||
146 | EXPORT_SYMBOL(sun4v_hvapi_unregister); | ||
147 | |||
148 | int sun4v_hvapi_get(unsigned long group, | ||
149 | unsigned long *major, | ||
150 | unsigned long *minor) | ||
151 | { | ||
152 | struct api_info *p; | ||
153 | unsigned long flags; | ||
154 | int ret; | ||
155 | |||
156 | spin_lock_irqsave(&hvapi_lock, flags); | ||
157 | ret = -EINVAL; | ||
158 | p = __get_info(group); | ||
159 | if (p && p->refcnt) { | ||
160 | *major = p->major; | ||
161 | *minor = p->minor; | ||
162 | ret = 0; | ||
163 | } | ||
164 | spin_unlock_irqrestore(&hvapi_lock, flags); | ||
165 | |||
166 | return ret; | ||
167 | } | ||
168 | EXPORT_SYMBOL(sun4v_hvapi_get); | ||
169 | |||
170 | void __init sun4v_hvapi_init(void) | ||
171 | { | ||
172 | unsigned long group, major, minor; | ||
173 | |||
174 | group = HV_GRP_SUN4V; | ||
175 | major = 1; | ||
176 | minor = 0; | ||
177 | if (sun4v_hvapi_register(group, major, &minor)) | ||
178 | goto bad; | ||
179 | |||
180 | group = HV_GRP_CORE; | ||
181 | major = 1; | ||
182 | minor = 1; | ||
183 | if (sun4v_hvapi_register(group, major, &minor)) | ||
184 | goto bad; | ||
185 | |||
186 | return; | ||
187 | |||
188 | bad: | ||
189 | prom_printf("HVAPI: Cannot register API group " | ||
190 | "%lx with major(%u) minor(%u)\n", | ||
191 | group, major, minor); | ||
192 | prom_halt(); | ||
193 | } | ||
diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S new file mode 100644 index 000000000000..8a5f35ffb15e --- /dev/null +++ b/arch/sparc/kernel/hvcalls.S | |||
@@ -0,0 +1,800 @@ | |||
1 | /* %o0: devhandle | ||
2 | * %o1: devino | ||
3 | * | ||
4 | * returns %o0: sysino | ||
5 | */ | ||
6 | ENTRY(sun4v_devino_to_sysino) | ||
7 | mov HV_FAST_INTR_DEVINO2SYSINO, %o5 | ||
8 | ta HV_FAST_TRAP | ||
9 | retl | ||
10 | mov %o1, %o0 | ||
11 | ENDPROC(sun4v_devino_to_sysino) | ||
12 | |||
13 | /* %o0: sysino | ||
14 | * | ||
15 | * returns %o0: intr_enabled (HV_INTR_{DISABLED,ENABLED}) | ||
16 | */ | ||
17 | ENTRY(sun4v_intr_getenabled) | ||
18 | mov HV_FAST_INTR_GETENABLED, %o5 | ||
19 | ta HV_FAST_TRAP | ||
20 | retl | ||
21 | mov %o1, %o0 | ||
22 | ENDPROC(sun4v_intr_getenabled) | ||
23 | |||
24 | /* %o0: sysino | ||
25 | * %o1: intr_enabled (HV_INTR_{DISABLED,ENABLED}) | ||
26 | */ | ||
27 | ENTRY(sun4v_intr_setenabled) | ||
28 | mov HV_FAST_INTR_SETENABLED, %o5 | ||
29 | ta HV_FAST_TRAP | ||
30 | retl | ||
31 | nop | ||
32 | ENDPROC(sun4v_intr_setenabled) | ||
33 | |||
34 | /* %o0: sysino | ||
35 | * | ||
36 | * returns %o0: intr_state (HV_INTR_STATE_*) | ||
37 | */ | ||
38 | ENTRY(sun4v_intr_getstate) | ||
39 | mov HV_FAST_INTR_GETSTATE, %o5 | ||
40 | ta HV_FAST_TRAP | ||
41 | retl | ||
42 | mov %o1, %o0 | ||
43 | ENDPROC(sun4v_intr_getstate) | ||
44 | |||
45 | /* %o0: sysino | ||
46 | * %o1: intr_state (HV_INTR_STATE_*) | ||
47 | */ | ||
48 | ENTRY(sun4v_intr_setstate) | ||
49 | mov HV_FAST_INTR_SETSTATE, %o5 | ||
50 | ta HV_FAST_TRAP | ||
51 | retl | ||
52 | nop | ||
53 | ENDPROC(sun4v_intr_setstate) | ||
54 | |||
55 | /* %o0: sysino | ||
56 | * | ||
57 | * returns %o0: cpuid | ||
58 | */ | ||
59 | ENTRY(sun4v_intr_gettarget) | ||
60 | mov HV_FAST_INTR_GETTARGET, %o5 | ||
61 | ta HV_FAST_TRAP | ||
62 | retl | ||
63 | mov %o1, %o0 | ||
64 | ENDPROC(sun4v_intr_gettarget) | ||
65 | |||
66 | /* %o0: sysino | ||
67 | * %o1: cpuid | ||
68 | */ | ||
69 | ENTRY(sun4v_intr_settarget) | ||
70 | mov HV_FAST_INTR_SETTARGET, %o5 | ||
71 | ta HV_FAST_TRAP | ||
72 | retl | ||
73 | nop | ||
74 | ENDPROC(sun4v_intr_settarget) | ||
75 | |||
76 | /* %o0: cpuid | ||
77 | * %o1: pc | ||
78 | * %o2: rtba | ||
79 | * %o3: arg0 | ||
80 | * | ||
81 | * returns %o0: status | ||
82 | */ | ||
83 | ENTRY(sun4v_cpu_start) | ||
84 | mov HV_FAST_CPU_START, %o5 | ||
85 | ta HV_FAST_TRAP | ||
86 | retl | ||
87 | nop | ||
88 | ENDPROC(sun4v_cpu_start) | ||
89 | |||
90 | /* %o0: cpuid | ||
91 | * | ||
92 | * returns %o0: status | ||
93 | */ | ||
94 | ENTRY(sun4v_cpu_stop) | ||
95 | mov HV_FAST_CPU_STOP, %o5 | ||
96 | ta HV_FAST_TRAP | ||
97 | retl | ||
98 | nop | ||
99 | ENDPROC(sun4v_cpu_stop) | ||
100 | |||
101 | /* returns %o0: status */ | ||
102 | ENTRY(sun4v_cpu_yield) | ||
103 | mov HV_FAST_CPU_YIELD, %o5 | ||
104 | ta HV_FAST_TRAP | ||
105 | retl | ||
106 | nop | ||
107 | ENDPROC(sun4v_cpu_yield) | ||
108 | |||
109 | /* %o0: type | ||
110 | * %o1: queue paddr | ||
111 | * %o2: num queue entries | ||
112 | * | ||
113 | * returns %o0: status | ||
114 | */ | ||
115 | ENTRY(sun4v_cpu_qconf) | ||
116 | mov HV_FAST_CPU_QCONF, %o5 | ||
117 | ta HV_FAST_TRAP | ||
118 | retl | ||
119 | nop | ||
120 | ENDPROC(sun4v_cpu_qconf) | ||
121 | |||
122 | /* %o0: num cpus in cpu list | ||
123 | * %o1: cpu list paddr | ||
124 | * %o2: mondo block paddr | ||
125 | * | ||
126 | * returns %o0: status | ||
127 | */ | ||
128 | ENTRY(sun4v_cpu_mondo_send) | ||
129 | mov HV_FAST_CPU_MONDO_SEND, %o5 | ||
130 | ta HV_FAST_TRAP | ||
131 | retl | ||
132 | nop | ||
133 | ENDPROC(sun4v_cpu_mondo_send) | ||
134 | |||
135 | /* %o0: CPU ID | ||
136 | * | ||
137 | * returns %o0: -status if status non-zero, else | ||
138 | * %o0: cpu state as HV_CPU_STATE_* | ||
139 | */ | ||
140 | ENTRY(sun4v_cpu_state) | ||
141 | mov HV_FAST_CPU_STATE, %o5 | ||
142 | ta HV_FAST_TRAP | ||
143 | brnz,pn %o0, 1f | ||
144 | sub %g0, %o0, %o0 | ||
145 | mov %o1, %o0 | ||
146 | 1: retl | ||
147 | nop | ||
148 | ENDPROC(sun4v_cpu_state) | ||
149 | |||
150 | /* %o0: virtual address | ||
151 | * %o1: must be zero | ||
152 | * %o2: TTE | ||
153 | * %o3: HV_MMU_* flags | ||
154 | * | ||
155 | * returns %o0: status | ||
156 | */ | ||
157 | ENTRY(sun4v_mmu_map_perm_addr) | ||
158 | mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 | ||
159 | ta HV_FAST_TRAP | ||
160 | retl | ||
161 | nop | ||
162 | ENDPROC(sun4v_mmu_map_perm_addr) | ||
163 | |||
164 | /* %o0: number of TSB descriptions | ||
165 | * %o1: TSB descriptions real address | ||
166 | * | ||
167 | * returns %o0: status | ||
168 | */ | ||
169 | ENTRY(sun4v_mmu_tsb_ctx0) | ||
170 | mov HV_FAST_MMU_TSB_CTX0, %o5 | ||
171 | ta HV_FAST_TRAP | ||
172 | retl | ||
173 | nop | ||
174 | ENDPROC(sun4v_mmu_tsb_ctx0) | ||
175 | |||
176 | /* %o0: API group number | ||
177 | * %o1: pointer to unsigned long major number storage | ||
178 | * %o2: pointer to unsigned long minor number storage | ||
179 | * | ||
180 | * returns %o0: status | ||
181 | */ | ||
182 | ENTRY(sun4v_get_version) | ||
183 | mov HV_CORE_GET_VER, %o5 | ||
184 | mov %o1, %o3 | ||
185 | mov %o2, %o4 | ||
186 | ta HV_CORE_TRAP | ||
187 | stx %o1, [%o3] | ||
188 | retl | ||
189 | stx %o2, [%o4] | ||
190 | ENDPROC(sun4v_get_version) | ||
191 | |||
192 | /* %o0: API group number | ||
193 | * %o1: desired major number | ||
194 | * %o2: desired minor number | ||
195 | * %o3: pointer to unsigned long actual minor number storage | ||
196 | * | ||
197 | * returns %o0: status | ||
198 | */ | ||
199 | ENTRY(sun4v_set_version) | ||
200 | mov HV_CORE_SET_VER, %o5 | ||
201 | mov %o3, %o4 | ||
202 | ta HV_CORE_TRAP | ||
203 | retl | ||
204 | stx %o1, [%o4] | ||
205 | ENDPROC(sun4v_set_version) | ||
206 | |||
207 | /* %o0: pointer to unsigned long time | ||
208 | * | ||
209 | * returns %o0: status | ||
210 | */ | ||
211 | ENTRY(sun4v_tod_get) | ||
212 | mov %o0, %o4 | ||
213 | mov HV_FAST_TOD_GET, %o5 | ||
214 | ta HV_FAST_TRAP | ||
215 | stx %o1, [%o4] | ||
216 | retl | ||
217 | nop | ||
218 | ENDPROC(sun4v_tod_get) | ||
219 | |||
220 | /* %o0: time | ||
221 | * | ||
222 | * returns %o0: status | ||
223 | */ | ||
224 | ENTRY(sun4v_tod_set) | ||
225 | mov HV_FAST_TOD_SET, %o5 | ||
226 | ta HV_FAST_TRAP | ||
227 | retl | ||
228 | nop | ||
229 | ENDPROC(sun4v_tod_set) | ||
230 | |||
231 | /* %o0: pointer to unsigned long status | ||
232 | * | ||
233 | * returns %o0: signed character | ||
234 | */ | ||
235 | ENTRY(sun4v_con_getchar) | ||
236 | mov %o0, %o4 | ||
237 | mov HV_FAST_CONS_GETCHAR, %o5 | ||
238 | clr %o0 | ||
239 | clr %o1 | ||
240 | ta HV_FAST_TRAP | ||
241 | stx %o0, [%o4] | ||
242 | retl | ||
243 | sra %o1, 0, %o0 | ||
244 | ENDPROC(sun4v_con_getchar) | ||
245 | |||
246 | /* %o0: signed long character | ||
247 | * | ||
248 | * returns %o0: status | ||
249 | */ | ||
250 | ENTRY(sun4v_con_putchar) | ||
251 | mov HV_FAST_CONS_PUTCHAR, %o5 | ||
252 | ta HV_FAST_TRAP | ||
253 | retl | ||
254 | sra %o0, 0, %o0 | ||
255 | ENDPROC(sun4v_con_putchar) | ||
256 | |||
257 | /* %o0: buffer real address | ||
258 | * %o1: buffer size | ||
259 | * %o2: pointer to unsigned long bytes_read | ||
260 | * | ||
261 | * returns %o0: status | ||
262 | */ | ||
263 | ENTRY(sun4v_con_read) | ||
264 | mov %o2, %o4 | ||
265 | mov HV_FAST_CONS_READ, %o5 | ||
266 | ta HV_FAST_TRAP | ||
267 | brnz %o0, 1f | ||
268 | cmp %o1, -1 /* break */ | ||
269 | be,a,pn %icc, 1f | ||
270 | mov %o1, %o0 | ||
271 | cmp %o1, -2 /* hup */ | ||
272 | be,a,pn %icc, 1f | ||
273 | mov %o1, %o0 | ||
274 | stx %o1, [%o4] | ||
275 | 1: retl | ||
276 | nop | ||
277 | ENDPROC(sun4v_con_read) | ||
278 | |||
279 | /* %o0: buffer real address | ||
280 | * %o1: buffer size | ||
281 | * %o2: pointer to unsigned long bytes_written | ||
282 | * | ||
283 | * returns %o0: status | ||
284 | */ | ||
285 | ENTRY(sun4v_con_write) | ||
286 | mov %o2, %o4 | ||
287 | mov HV_FAST_CONS_WRITE, %o5 | ||
288 | ta HV_FAST_TRAP | ||
289 | stx %o1, [%o4] | ||
290 | retl | ||
291 | nop | ||
292 | ENDPROC(sun4v_con_write) | ||
293 | |||
294 | /* %o0: soft state | ||
295 | * %o1: address of description string | ||
296 | * | ||
297 | * returns %o0: status | ||
298 | */ | ||
299 | ENTRY(sun4v_mach_set_soft_state) | ||
300 | mov HV_FAST_MACH_SET_SOFT_STATE, %o5 | ||
301 | ta HV_FAST_TRAP | ||
302 | retl | ||
303 | nop | ||
304 | ENDPROC(sun4v_mach_set_soft_state) | ||
305 | |||
306 | /* %o0: exit code | ||
307 | * | ||
308 | * Does not return. | ||
309 | */ | ||
310 | ENTRY(sun4v_mach_exit) | ||
311 | mov HV_FAST_MACH_EXIT, %o5 | ||
312 | ta HV_FAST_TRAP | ||
313 | retl | ||
314 | nop | ||
315 | ENDPROC(sun4v_mach_exit) | ||
316 | |||
317 | /* %o0: buffer real address | ||
318 | * %o1: buffer length | ||
319 | * %o2: pointer to unsigned long real_buf_len | ||
320 | * | ||
321 | * returns %o0: status | ||
322 | */ | ||
323 | ENTRY(sun4v_mach_desc) | ||
324 | mov %o2, %o4 | ||
325 | mov HV_FAST_MACH_DESC, %o5 | ||
326 | ta HV_FAST_TRAP | ||
327 | stx %o1, [%o4] | ||
328 | retl | ||
329 | nop | ||
330 | ENDPROC(sun4v_mach_desc) | ||
331 | |||
332 | /* %o0: new timeout in milliseconds | ||
333 | * %o1: pointer to unsigned long orig_timeout | ||
334 | * | ||
335 | * returns %o0: status | ||
336 | */ | ||
337 | ENTRY(sun4v_mach_set_watchdog) | ||
338 | mov %o1, %o4 | ||
339 | mov HV_FAST_MACH_SET_WATCHDOG, %o5 | ||
340 | ta HV_FAST_TRAP | ||
341 | stx %o1, [%o4] | ||
342 | retl | ||
343 | nop | ||
344 | ENDPROC(sun4v_mach_set_watchdog) | ||
345 | |||
346 | /* No inputs and does not return. */ | ||
347 | ENTRY(sun4v_mach_sir) | ||
348 | mov %o1, %o4 | ||
349 | mov HV_FAST_MACH_SIR, %o5 | ||
350 | ta HV_FAST_TRAP | ||
351 | stx %o1, [%o4] | ||
352 | retl | ||
353 | nop | ||
354 | ENDPROC(sun4v_mach_sir) | ||
355 | |||
356 | /* %o0: channel | ||
357 | * %o1: ra | ||
358 | * %o2: num_entries | ||
359 | * | ||
360 | * returns %o0: status | ||
361 | */ | ||
362 | ENTRY(sun4v_ldc_tx_qconf) | ||
363 | mov HV_FAST_LDC_TX_QCONF, %o5 | ||
364 | ta HV_FAST_TRAP | ||
365 | retl | ||
366 | nop | ||
367 | ENDPROC(sun4v_ldc_tx_qconf) | ||
368 | |||
369 | /* %o0: channel | ||
370 | * %o1: pointer to unsigned long ra | ||
371 | * %o2: pointer to unsigned long num_entries | ||
372 | * | ||
373 | * returns %o0: status | ||
374 | */ | ||
375 | ENTRY(sun4v_ldc_tx_qinfo) | ||
376 | mov %o1, %g1 | ||
377 | mov %o2, %g2 | ||
378 | mov HV_FAST_LDC_TX_QINFO, %o5 | ||
379 | ta HV_FAST_TRAP | ||
380 | stx %o1, [%g1] | ||
381 | stx %o2, [%g2] | ||
382 | retl | ||
383 | nop | ||
384 | ENDPROC(sun4v_ldc_tx_qinfo) | ||
385 | |||
386 | /* %o0: channel | ||
387 | * %o1: pointer to unsigned long head_off | ||
388 | * %o2: pointer to unsigned long tail_off | ||
389 | * %o2: pointer to unsigned long chan_state | ||
390 | * | ||
391 | * returns %o0: status | ||
392 | */ | ||
393 | ENTRY(sun4v_ldc_tx_get_state) | ||
394 | mov %o1, %g1 | ||
395 | mov %o2, %g2 | ||
396 | mov %o3, %g3 | ||
397 | mov HV_FAST_LDC_TX_GET_STATE, %o5 | ||
398 | ta HV_FAST_TRAP | ||
399 | stx %o1, [%g1] | ||
400 | stx %o2, [%g2] | ||
401 | stx %o3, [%g3] | ||
402 | retl | ||
403 | nop | ||
404 | ENDPROC(sun4v_ldc_tx_get_state) | ||
405 | |||
406 | /* %o0: channel | ||
407 | * %o1: tail_off | ||
408 | * | ||
409 | * returns %o0: status | ||
410 | */ | ||
411 | ENTRY(sun4v_ldc_tx_set_qtail) | ||
412 | mov HV_FAST_LDC_TX_SET_QTAIL, %o5 | ||
413 | ta HV_FAST_TRAP | ||
414 | retl | ||
415 | nop | ||
416 | ENDPROC(sun4v_ldc_tx_set_qtail) | ||
417 | |||
418 | /* %o0: channel | ||
419 | * %o1: ra | ||
420 | * %o2: num_entries | ||
421 | * | ||
422 | * returns %o0: status | ||
423 | */ | ||
424 | ENTRY(sun4v_ldc_rx_qconf) | ||
425 | mov HV_FAST_LDC_RX_QCONF, %o5 | ||
426 | ta HV_FAST_TRAP | ||
427 | retl | ||
428 | nop | ||
429 | ENDPROC(sun4v_ldc_rx_qconf) | ||
430 | |||
431 | /* %o0: channel | ||
432 | * %o1: pointer to unsigned long ra | ||
433 | * %o2: pointer to unsigned long num_entries | ||
434 | * | ||
435 | * returns %o0: status | ||
436 | */ | ||
437 | ENTRY(sun4v_ldc_rx_qinfo) | ||
438 | mov %o1, %g1 | ||
439 | mov %o2, %g2 | ||
440 | mov HV_FAST_LDC_RX_QINFO, %o5 | ||
441 | ta HV_FAST_TRAP | ||
442 | stx %o1, [%g1] | ||
443 | stx %o2, [%g2] | ||
444 | retl | ||
445 | nop | ||
446 | ENDPROC(sun4v_ldc_rx_qinfo) | ||
447 | |||
448 | /* %o0: channel | ||
449 | * %o1: pointer to unsigned long head_off | ||
450 | * %o2: pointer to unsigned long tail_off | ||
451 | * %o2: pointer to unsigned long chan_state | ||
452 | * | ||
453 | * returns %o0: status | ||
454 | */ | ||
455 | ENTRY(sun4v_ldc_rx_get_state) | ||
456 | mov %o1, %g1 | ||
457 | mov %o2, %g2 | ||
458 | mov %o3, %g3 | ||
459 | mov HV_FAST_LDC_RX_GET_STATE, %o5 | ||
460 | ta HV_FAST_TRAP | ||
461 | stx %o1, [%g1] | ||
462 | stx %o2, [%g2] | ||
463 | stx %o3, [%g3] | ||
464 | retl | ||
465 | nop | ||
466 | ENDPROC(sun4v_ldc_rx_get_state) | ||
467 | |||
468 | /* %o0: channel | ||
469 | * %o1: head_off | ||
470 | * | ||
471 | * returns %o0: status | ||
472 | */ | ||
473 | ENTRY(sun4v_ldc_rx_set_qhead) | ||
474 | mov HV_FAST_LDC_RX_SET_QHEAD, %o5 | ||
475 | ta HV_FAST_TRAP | ||
476 | retl | ||
477 | nop | ||
478 | ENDPROC(sun4v_ldc_rx_set_qhead) | ||
479 | |||
480 | /* %o0: channel | ||
481 | * %o1: ra | ||
482 | * %o2: num_entries | ||
483 | * | ||
484 | * returns %o0: status | ||
485 | */ | ||
486 | ENTRY(sun4v_ldc_set_map_table) | ||
487 | mov HV_FAST_LDC_SET_MAP_TABLE, %o5 | ||
488 | ta HV_FAST_TRAP | ||
489 | retl | ||
490 | nop | ||
491 | ENDPROC(sun4v_ldc_set_map_table) | ||
492 | |||
493 | /* %o0: channel | ||
494 | * %o1: pointer to unsigned long ra | ||
495 | * %o2: pointer to unsigned long num_entries | ||
496 | * | ||
497 | * returns %o0: status | ||
498 | */ | ||
499 | ENTRY(sun4v_ldc_get_map_table) | ||
500 | mov %o1, %g1 | ||
501 | mov %o2, %g2 | ||
502 | mov HV_FAST_LDC_GET_MAP_TABLE, %o5 | ||
503 | ta HV_FAST_TRAP | ||
504 | stx %o1, [%g1] | ||
505 | stx %o2, [%g2] | ||
506 | retl | ||
507 | nop | ||
508 | ENDPROC(sun4v_ldc_get_map_table) | ||
509 | |||
510 | /* %o0: channel | ||
511 | * %o1: dir_code | ||
512 | * %o2: tgt_raddr | ||
513 | * %o3: lcl_raddr | ||
514 | * %o4: len | ||
515 | * %o5: pointer to unsigned long actual_len | ||
516 | * | ||
517 | * returns %o0: status | ||
518 | */ | ||
519 | ENTRY(sun4v_ldc_copy) | ||
520 | mov %o5, %g1 | ||
521 | mov HV_FAST_LDC_COPY, %o5 | ||
522 | ta HV_FAST_TRAP | ||
523 | stx %o1, [%g1] | ||
524 | retl | ||
525 | nop | ||
526 | ENDPROC(sun4v_ldc_copy) | ||
527 | |||
528 | /* %o0: channel | ||
529 | * %o1: cookie | ||
530 | * %o2: pointer to unsigned long ra | ||
531 | * %o3: pointer to unsigned long perm | ||
532 | * | ||
533 | * returns %o0: status | ||
534 | */ | ||
535 | ENTRY(sun4v_ldc_mapin) | ||
536 | mov %o2, %g1 | ||
537 | mov %o3, %g2 | ||
538 | mov HV_FAST_LDC_MAPIN, %o5 | ||
539 | ta HV_FAST_TRAP | ||
540 | stx %o1, [%g1] | ||
541 | stx %o2, [%g2] | ||
542 | retl | ||
543 | nop | ||
544 | ENDPROC(sun4v_ldc_mapin) | ||
545 | |||
546 | /* %o0: ra | ||
547 | * | ||
548 | * returns %o0: status | ||
549 | */ | ||
550 | ENTRY(sun4v_ldc_unmap) | ||
551 | mov HV_FAST_LDC_UNMAP, %o5 | ||
552 | ta HV_FAST_TRAP | ||
553 | retl | ||
554 | nop | ||
555 | ENDPROC(sun4v_ldc_unmap) | ||
556 | |||
557 | /* %o0: channel | ||
558 | * %o1: cookie | ||
559 | * %o2: mte_cookie | ||
560 | * | ||
561 | * returns %o0: status | ||
562 | */ | ||
563 | ENTRY(sun4v_ldc_revoke) | ||
564 | mov HV_FAST_LDC_REVOKE, %o5 | ||
565 | ta HV_FAST_TRAP | ||
566 | retl | ||
567 | nop | ||
568 | ENDPROC(sun4v_ldc_revoke) | ||
569 | |||
570 | /* %o0: device handle | ||
571 | * %o1: device INO | ||
572 | * %o2: pointer to unsigned long cookie | ||
573 | * | ||
574 | * returns %o0: status | ||
575 | */ | ||
576 | ENTRY(sun4v_vintr_get_cookie) | ||
577 | mov %o2, %g1 | ||
578 | mov HV_FAST_VINTR_GET_COOKIE, %o5 | ||
579 | ta HV_FAST_TRAP | ||
580 | stx %o1, [%g1] | ||
581 | retl | ||
582 | nop | ||
583 | ENDPROC(sun4v_vintr_get_cookie) | ||
584 | |||
585 | /* %o0: device handle | ||
586 | * %o1: device INO | ||
587 | * %o2: cookie | ||
588 | * | ||
589 | * returns %o0: status | ||
590 | */ | ||
591 | ENTRY(sun4v_vintr_set_cookie) | ||
592 | mov HV_FAST_VINTR_SET_COOKIE, %o5 | ||
593 | ta HV_FAST_TRAP | ||
594 | retl | ||
595 | nop | ||
596 | ENDPROC(sun4v_vintr_set_cookie) | ||
597 | |||
598 | /* %o0: device handle | ||
599 | * %o1: device INO | ||
600 | * %o2: pointer to unsigned long valid_state | ||
601 | * | ||
602 | * returns %o0: status | ||
603 | */ | ||
604 | ENTRY(sun4v_vintr_get_valid) | ||
605 | mov %o2, %g1 | ||
606 | mov HV_FAST_VINTR_GET_VALID, %o5 | ||
607 | ta HV_FAST_TRAP | ||
608 | stx %o1, [%g1] | ||
609 | retl | ||
610 | nop | ||
611 | ENDPROC(sun4v_vintr_get_valid) | ||
612 | |||
613 | /* %o0: device handle | ||
614 | * %o1: device INO | ||
615 | * %o2: valid_state | ||
616 | * | ||
617 | * returns %o0: status | ||
618 | */ | ||
619 | ENTRY(sun4v_vintr_set_valid) | ||
620 | mov HV_FAST_VINTR_SET_VALID, %o5 | ||
621 | ta HV_FAST_TRAP | ||
622 | retl | ||
623 | nop | ||
624 | ENDPROC(sun4v_vintr_set_valid) | ||
625 | |||
626 | /* %o0: device handle | ||
627 | * %o1: device INO | ||
628 | * %o2: pointer to unsigned long state | ||
629 | * | ||
630 | * returns %o0: status | ||
631 | */ | ||
632 | ENTRY(sun4v_vintr_get_state) | ||
633 | mov %o2, %g1 | ||
634 | mov HV_FAST_VINTR_GET_STATE, %o5 | ||
635 | ta HV_FAST_TRAP | ||
636 | stx %o1, [%g1] | ||
637 | retl | ||
638 | nop | ||
639 | ENDPROC(sun4v_vintr_get_state) | ||
640 | |||
641 | /* %o0: device handle | ||
642 | * %o1: device INO | ||
643 | * %o2: state | ||
644 | * | ||
645 | * returns %o0: status | ||
646 | */ | ||
647 | ENTRY(sun4v_vintr_set_state) | ||
648 | mov HV_FAST_VINTR_SET_STATE, %o5 | ||
649 | ta HV_FAST_TRAP | ||
650 | retl | ||
651 | nop | ||
652 | ENDPROC(sun4v_vintr_set_state) | ||
653 | |||
654 | /* %o0: device handle | ||
655 | * %o1: device INO | ||
656 | * %o2: pointer to unsigned long cpuid | ||
657 | * | ||
658 | * returns %o0: status | ||
659 | */ | ||
660 | ENTRY(sun4v_vintr_get_target) | ||
661 | mov %o2, %g1 | ||
662 | mov HV_FAST_VINTR_GET_TARGET, %o5 | ||
663 | ta HV_FAST_TRAP | ||
664 | stx %o1, [%g1] | ||
665 | retl | ||
666 | nop | ||
667 | ENDPROC(sun4v_vintr_get_target) | ||
668 | |||
669 | /* %o0: device handle | ||
670 | * %o1: device INO | ||
671 | * %o2: cpuid | ||
672 | * | ||
673 | * returns %o0: status | ||
674 | */ | ||
675 | ENTRY(sun4v_vintr_set_target) | ||
676 | mov HV_FAST_VINTR_SET_TARGET, %o5 | ||
677 | ta HV_FAST_TRAP | ||
678 | retl | ||
679 | nop | ||
680 | ENDPROC(sun4v_vintr_set_target) | ||
681 | |||
682 | /* %o0: NCS sub-function | ||
683 | * %o1: sub-function arg real-address | ||
684 | * %o2: sub-function arg size | ||
685 | * | ||
686 | * returns %o0: status | ||
687 | */ | ||
688 | ENTRY(sun4v_ncs_request) | ||
689 | mov HV_FAST_NCS_REQUEST, %o5 | ||
690 | ta HV_FAST_TRAP | ||
691 | retl | ||
692 | nop | ||
693 | ENDPROC(sun4v_ncs_request) | ||
694 | |||
695 | ENTRY(sun4v_svc_send) | ||
696 | save %sp, -192, %sp | ||
697 | mov %i0, %o0 | ||
698 | mov %i1, %o1 | ||
699 | mov %i2, %o2 | ||
700 | mov HV_FAST_SVC_SEND, %o5 | ||
701 | ta HV_FAST_TRAP | ||
702 | stx %o1, [%i3] | ||
703 | ret | ||
704 | restore | ||
705 | ENDPROC(sun4v_svc_send) | ||
706 | |||
707 | ENTRY(sun4v_svc_recv) | ||
708 | save %sp, -192, %sp | ||
709 | mov %i0, %o0 | ||
710 | mov %i1, %o1 | ||
711 | mov %i2, %o2 | ||
712 | mov HV_FAST_SVC_RECV, %o5 | ||
713 | ta HV_FAST_TRAP | ||
714 | stx %o1, [%i3] | ||
715 | ret | ||
716 | restore | ||
717 | ENDPROC(sun4v_svc_recv) | ||
718 | |||
719 | ENTRY(sun4v_svc_getstatus) | ||
720 | mov HV_FAST_SVC_GETSTATUS, %o5 | ||
721 | mov %o1, %o4 | ||
722 | ta HV_FAST_TRAP | ||
723 | stx %o1, [%o4] | ||
724 | retl | ||
725 | nop | ||
726 | ENDPROC(sun4v_svc_getstatus) | ||
727 | |||
728 | ENTRY(sun4v_svc_setstatus) | ||
729 | mov HV_FAST_SVC_SETSTATUS, %o5 | ||
730 | ta HV_FAST_TRAP | ||
731 | retl | ||
732 | nop | ||
733 | ENDPROC(sun4v_svc_setstatus) | ||
734 | |||
735 | ENTRY(sun4v_svc_clrstatus) | ||
736 | mov HV_FAST_SVC_CLRSTATUS, %o5 | ||
737 | ta HV_FAST_TRAP | ||
738 | retl | ||
739 | nop | ||
740 | ENDPROC(sun4v_svc_clrstatus) | ||
741 | |||
742 | ENTRY(sun4v_mmustat_conf) | ||
743 | mov %o1, %o4 | ||
744 | mov HV_FAST_MMUSTAT_CONF, %o5 | ||
745 | ta HV_FAST_TRAP | ||
746 | stx %o1, [%o4] | ||
747 | retl | ||
748 | nop | ||
749 | ENDPROC(sun4v_mmustat_conf) | ||
750 | |||
751 | ENTRY(sun4v_mmustat_info) | ||
752 | mov %o0, %o4 | ||
753 | mov HV_FAST_MMUSTAT_INFO, %o5 | ||
754 | ta HV_FAST_TRAP | ||
755 | stx %o1, [%o4] | ||
756 | retl | ||
757 | nop | ||
758 | ENDPROC(sun4v_mmustat_info) | ||
759 | |||
760 | ENTRY(sun4v_mmu_demap_all) | ||
761 | clr %o0 | ||
762 | clr %o1 | ||
763 | mov HV_MMU_ALL, %o2 | ||
764 | mov HV_FAST_MMU_DEMAP_ALL, %o5 | ||
765 | ta HV_FAST_TRAP | ||
766 | retl | ||
767 | nop | ||
768 | ENDPROC(sun4v_mmu_demap_all) | ||
769 | |||
770 | ENTRY(sun4v_niagara_getperf) | ||
771 | mov %o0, %o4 | ||
772 | mov HV_FAST_GET_PERFREG, %o5 | ||
773 | ta HV_FAST_TRAP | ||
774 | stx %o1, [%o4] | ||
775 | retl | ||
776 | nop | ||
777 | ENDPROC(sun4v_niagara_getperf) | ||
778 | |||
779 | ENTRY(sun4v_niagara_setperf) | ||
780 | mov HV_FAST_SET_PERFREG, %o5 | ||
781 | ta HV_FAST_TRAP | ||
782 | retl | ||
783 | nop | ||
784 | ENDPROC(sun4v_niagara_setperf) | ||
785 | |||
786 | ENTRY(sun4v_niagara2_getperf) | ||
787 | mov %o0, %o4 | ||
788 | mov HV_FAST_N2_GET_PERFREG, %o5 | ||
789 | ta HV_FAST_TRAP | ||
790 | stx %o1, [%o4] | ||
791 | retl | ||
792 | nop | ||
793 | ENDPROC(sun4v_niagara2_getperf) | ||
794 | |||
795 | ENTRY(sun4v_niagara2_setperf) | ||
796 | mov HV_FAST_N2_SET_PERFREG, %o5 | ||
797 | ta HV_FAST_TRAP | ||
798 | retl | ||
799 | nop | ||
800 | ENDPROC(sun4v_niagara2_setperf) | ||
diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S new file mode 100644 index 000000000000..9365432904d6 --- /dev/null +++ b/arch/sparc/kernel/hvtramp.S | |||
@@ -0,0 +1,140 @@ | |||
1 | /* hvtramp.S: Hypervisor start-cpu trampoline code. | ||
2 | * | ||
3 | * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <linux/init.h> | ||
7 | |||
8 | #include <asm/thread_info.h> | ||
9 | #include <asm/hypervisor.h> | ||
10 | #include <asm/scratchpad.h> | ||
11 | #include <asm/spitfire.h> | ||
12 | #include <asm/hvtramp.h> | ||
13 | #include <asm/pstate.h> | ||
14 | #include <asm/ptrace.h> | ||
15 | #include <asm/head.h> | ||
16 | #include <asm/asi.h> | ||
17 | #include <asm/pil.h> | ||
18 | |||
19 | __CPUINIT | ||
20 | .align 8 | ||
21 | .globl hv_cpu_startup, hv_cpu_startup_end | ||
22 | |||
23 | /* This code executes directly out of the hypervisor | ||
24 | * with physical addressing (va==pa). %o0 contains | ||
25 | * our client argument which for Linux points to | ||
26 | * a descriptor data structure which defines the | ||
27 | * MMU entries we need to load up. | ||
28 | * | ||
29 | * After we set things up we enable the MMU and call | ||
30 | * into the kernel. | ||
31 | * | ||
32 | * First setup basic privileged cpu state. | ||
33 | */ | ||
34 | hv_cpu_startup: | ||
35 | SET_GL(0) | ||
36 | wrpr %g0, PIL_NORMAL_MAX, %pil | ||
37 | wrpr %g0, 0, %canrestore | ||
38 | wrpr %g0, 0, %otherwin | ||
39 | wrpr %g0, 6, %cansave | ||
40 | wrpr %g0, 6, %cleanwin | ||
41 | wrpr %g0, 0, %cwp | ||
42 | wrpr %g0, 0, %wstate | ||
43 | wrpr %g0, 0, %tl | ||
44 | |||
45 | sethi %hi(sparc64_ttable_tl0), %g1 | ||
46 | wrpr %g1, %tba | ||
47 | |||
48 | mov %o0, %l0 | ||
49 | |||
50 | lduw [%l0 + HVTRAMP_DESCR_CPU], %g1 | ||
51 | mov SCRATCHPAD_CPUID, %g2 | ||
52 | stxa %g1, [%g2] ASI_SCRATCHPAD | ||
53 | |||
54 | ldx [%l0 + HVTRAMP_DESCR_FAULT_INFO_VA], %g2 | ||
55 | stxa %g2, [%g0] ASI_SCRATCHPAD | ||
56 | |||
57 | mov 0, %l1 | ||
58 | lduw [%l0 + HVTRAMP_DESCR_NUM_MAPPINGS], %l2 | ||
59 | add %l0, HVTRAMP_DESCR_MAPS, %l3 | ||
60 | |||
61 | 1: ldx [%l3 + HVTRAMP_MAPPING_VADDR], %o0 | ||
62 | clr %o1 | ||
63 | ldx [%l3 + HVTRAMP_MAPPING_TTE], %o2 | ||
64 | mov HV_MMU_IMMU | HV_MMU_DMMU, %o3 | ||
65 | mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 | ||
66 | ta HV_FAST_TRAP | ||
67 | |||
68 | brnz,pn %o0, 80f | ||
69 | nop | ||
70 | |||
71 | add %l1, 1, %l1 | ||
72 | cmp %l1, %l2 | ||
73 | blt,a,pt %xcc, 1b | ||
74 | add %l3, HVTRAMP_MAPPING_SIZE, %l3 | ||
75 | |||
76 | ldx [%l0 + HVTRAMP_DESCR_FAULT_INFO_PA], %o0 | ||
77 | mov HV_FAST_MMU_FAULT_AREA_CONF, %o5 | ||
78 | ta HV_FAST_TRAP | ||
79 | |||
80 | brnz,pn %o0, 80f | ||
81 | nop | ||
82 | |||
83 | wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate | ||
84 | |||
85 | ldx [%l0 + HVTRAMP_DESCR_THREAD_REG], %l6 | ||
86 | |||
87 | mov 1, %o0 | ||
88 | set 1f, %o1 | ||
89 | mov HV_FAST_MMU_ENABLE, %o5 | ||
90 | ta HV_FAST_TRAP | ||
91 | |||
92 | ba,pt %xcc, 80f | ||
93 | nop | ||
94 | |||
95 | 1: | ||
96 | wr %g0, 0, %fprs | ||
97 | wr %g0, ASI_P, %asi | ||
98 | |||
99 | mov PRIMARY_CONTEXT, %g7 | ||
100 | stxa %g0, [%g7] ASI_MMU | ||
101 | membar #Sync | ||
102 | |||
103 | mov SECONDARY_CONTEXT, %g7 | ||
104 | stxa %g0, [%g7] ASI_MMU | ||
105 | membar #Sync | ||
106 | |||
107 | mov %l6, %g6 | ||
108 | ldx [%g6 + TI_TASK], %g4 | ||
109 | |||
110 | mov 1, %g5 | ||
111 | sllx %g5, THREAD_SHIFT, %g5 | ||
112 | sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5 | ||
113 | add %g6, %g5, %sp | ||
114 | mov 0, %fp | ||
115 | |||
116 | call init_irqwork_curcpu | ||
117 | nop | ||
118 | call hard_smp_processor_id | ||
119 | nop | ||
120 | |||
121 | call sun4v_register_mondo_queues | ||
122 | nop | ||
123 | |||
124 | call init_cur_cpu_trap | ||
125 | mov %g6, %o0 | ||
126 | |||
127 | wrpr %g0, (PSTATE_PRIV | PSTATE_PEF | PSTATE_IE), %pstate | ||
128 | |||
129 | call smp_callin | ||
130 | nop | ||
131 | call cpu_idle | ||
132 | mov 0, %o0 | ||
133 | call cpu_panic | ||
134 | nop | ||
135 | |||
136 | 80: ba,pt %xcc, 80b | ||
137 | nop | ||
138 | |||
139 | .align 8 | ||
140 | hv_cpu_startup_end: | ||
diff --git a/arch/sparc/kernel/idprom_64.c b/arch/sparc/kernel/idprom_64.c new file mode 100644 index 000000000000..5b45a808c621 --- /dev/null +++ b/arch/sparc/kernel/idprom_64.c | |||
@@ -0,0 +1,49 @@ | |||
1 | /* | ||
2 | * idprom.c: Routines to load the idprom into kernel addresses and | ||
3 | * interpret the data contained within. | ||
4 | * | ||
5 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | ||
6 | */ | ||
7 | |||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/types.h> | ||
10 | #include <linux/init.h> | ||
11 | |||
12 | #include <asm/oplib.h> | ||
13 | #include <asm/idprom.h> | ||
14 | |||
15 | struct idprom *idprom; | ||
16 | static struct idprom idprom_buffer; | ||
17 | |||
18 | /* Calculate the IDPROM checksum (xor of the data bytes). */ | ||
19 | static unsigned char __init calc_idprom_cksum(struct idprom *idprom) | ||
20 | { | ||
21 | unsigned char cksum, i, *ptr = (unsigned char *)idprom; | ||
22 | |||
23 | for (i = cksum = 0; i <= 0x0E; i++) | ||
24 | cksum ^= *ptr++; | ||
25 | |||
26 | return cksum; | ||
27 | } | ||
28 | |||
29 | /* Create a local IDPROM copy and verify integrity. */ | ||
30 | void __init idprom_init(void) | ||
31 | { | ||
32 | prom_get_idprom((char *) &idprom_buffer, sizeof(idprom_buffer)); | ||
33 | |||
34 | idprom = &idprom_buffer; | ||
35 | |||
36 | if (idprom->id_format != 0x01) { | ||
37 | prom_printf("IDPROM: Warning, unknown format type!\n"); | ||
38 | } | ||
39 | |||
40 | if (idprom->id_cksum != calc_idprom_cksum(idprom)) { | ||
41 | prom_printf("IDPROM: Warning, checksum failure (nvram=%x, calc=%x)!\n", | ||
42 | idprom->id_cksum, calc_idprom_cksum(idprom)); | ||
43 | } | ||
44 | |||
45 | printk("Ethernet address: %02x:%02x:%02x:%02x:%02x:%02x\n", | ||
46 | idprom->id_ethaddr[0], idprom->id_ethaddr[1], | ||
47 | idprom->id_ethaddr[2], idprom->id_ethaddr[3], | ||
48 | idprom->id_ethaddr[4], idprom->id_ethaddr[5]); | ||
49 | } | ||
diff --git a/arch/sparc/kernel/init_task_64.c b/arch/sparc/kernel/init_task_64.c new file mode 100644 index 000000000000..d2b312381c19 --- /dev/null +++ b/arch/sparc/kernel/init_task_64.c | |||
@@ -0,0 +1,35 @@ | |||
1 | #include <linux/mm.h> | ||
2 | #include <linux/fs.h> | ||
3 | #include <linux/module.h> | ||
4 | #include <linux/sched.h> | ||
5 | #include <linux/init_task.h> | ||
6 | #include <linux/mqueue.h> | ||
7 | |||
8 | #include <asm/pgtable.h> | ||
9 | #include <asm/uaccess.h> | ||
10 | #include <asm/processor.h> | ||
11 | |||
12 | static struct fs_struct init_fs = INIT_FS; | ||
13 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | ||
14 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | ||
15 | struct mm_struct init_mm = INIT_MM(init_mm); | ||
16 | |||
17 | EXPORT_SYMBOL(init_mm); | ||
18 | |||
19 | /* .text section in head.S is aligned at 2 page boundary and this gets linked | ||
20 | * right after that so that the init_thread_union is aligned properly as well. | ||
21 | * We really don't need this special alignment like the Intel does, but | ||
22 | * I do it anyways for completeness. | ||
23 | */ | ||
24 | __asm__ (".text"); | ||
25 | union thread_union init_thread_union = { INIT_THREAD_INFO(init_task) }; | ||
26 | |||
27 | /* | ||
28 | * Initial task structure. | ||
29 | * | ||
30 | * All other task structs will be allocated on slabs in fork.c | ||
31 | */ | ||
32 | EXPORT_SYMBOL(init_task); | ||
33 | |||
34 | __asm__(".data"); | ||
35 | struct task_struct init_task = INIT_TASK(init_task); | ||
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c new file mode 100644 index 000000000000..1cc1995531e2 --- /dev/null +++ b/arch/sparc/kernel/iommu.c | |||
@@ -0,0 +1,866 @@ | |||
1 | /* iommu.c: Generic sparc64 IOMMU support. | ||
2 | * | ||
3 | * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net) | ||
4 | * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com) | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/delay.h> | ||
10 | #include <linux/device.h> | ||
11 | #include <linux/dma-mapping.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/iommu-helper.h> | ||
14 | |||
15 | #ifdef CONFIG_PCI | ||
16 | #include <linux/pci.h> | ||
17 | #endif | ||
18 | |||
19 | #include <asm/iommu.h> | ||
20 | |||
21 | #include "iommu_common.h" | ||
22 | |||
23 | #define STC_CTXMATCH_ADDR(STC, CTX) \ | ||
24 | ((STC)->strbuf_ctxmatch_base + ((CTX) << 3)) | ||
25 | #define STC_FLUSHFLAG_INIT(STC) \ | ||
26 | (*((STC)->strbuf_flushflag) = 0UL) | ||
27 | #define STC_FLUSHFLAG_SET(STC) \ | ||
28 | (*((STC)->strbuf_flushflag) != 0UL) | ||
29 | |||
30 | #define iommu_read(__reg) \ | ||
31 | ({ u64 __ret; \ | ||
32 | __asm__ __volatile__("ldxa [%1] %2, %0" \ | ||
33 | : "=r" (__ret) \ | ||
34 | : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \ | ||
35 | : "memory"); \ | ||
36 | __ret; \ | ||
37 | }) | ||
38 | #define iommu_write(__reg, __val) \ | ||
39 | __asm__ __volatile__("stxa %0, [%1] %2" \ | ||
40 | : /* no outputs */ \ | ||
41 | : "r" (__val), "r" (__reg), \ | ||
42 | "i" (ASI_PHYS_BYPASS_EC_E)) | ||
43 | |||
44 | /* Must be invoked under the IOMMU lock. */ | ||
45 | static void iommu_flushall(struct iommu *iommu) | ||
46 | { | ||
47 | if (iommu->iommu_flushinv) { | ||
48 | iommu_write(iommu->iommu_flushinv, ~(u64)0); | ||
49 | } else { | ||
50 | unsigned long tag; | ||
51 | int entry; | ||
52 | |||
53 | tag = iommu->iommu_tags; | ||
54 | for (entry = 0; entry < 16; entry++) { | ||
55 | iommu_write(tag, 0); | ||
56 | tag += 8; | ||
57 | } | ||
58 | |||
59 | /* Ensure completion of previous PIO writes. */ | ||
60 | (void) iommu_read(iommu->write_complete_reg); | ||
61 | } | ||
62 | } | ||
63 | |||
64 | #define IOPTE_CONSISTENT(CTX) \ | ||
65 | (IOPTE_VALID | IOPTE_CACHE | \ | ||
66 | (((CTX) << 47) & IOPTE_CONTEXT)) | ||
67 | |||
68 | #define IOPTE_STREAMING(CTX) \ | ||
69 | (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF) | ||
70 | |||
71 | /* Existing mappings are never marked invalid, instead they | ||
72 | * are pointed to a dummy page. | ||
73 | */ | ||
74 | #define IOPTE_IS_DUMMY(iommu, iopte) \ | ||
75 | ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa) | ||
76 | |||
77 | static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte) | ||
78 | { | ||
79 | unsigned long val = iopte_val(*iopte); | ||
80 | |||
81 | val &= ~IOPTE_PAGE; | ||
82 | val |= iommu->dummy_page_pa; | ||
83 | |||
84 | iopte_val(*iopte) = val; | ||
85 | } | ||
86 | |||
87 | /* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle' | ||
88 | * facility it must all be done in one pass while under the iommu lock. | ||
89 | * | ||
90 | * On sun4u platforms, we only flush the IOMMU once every time we've passed | ||
91 | * over the entire page table doing allocations. Therefore we only ever advance | ||
92 | * the hint and cannot backtrack it. | ||
93 | */ | ||
94 | unsigned long iommu_range_alloc(struct device *dev, | ||
95 | struct iommu *iommu, | ||
96 | unsigned long npages, | ||
97 | unsigned long *handle) | ||
98 | { | ||
99 | unsigned long n, end, start, limit, boundary_size; | ||
100 | struct iommu_arena *arena = &iommu->arena; | ||
101 | int pass = 0; | ||
102 | |||
103 | /* This allocator was derived from x86_64's bit string search */ | ||
104 | |||
105 | /* Sanity check */ | ||
106 | if (unlikely(npages == 0)) { | ||
107 | if (printk_ratelimit()) | ||
108 | WARN_ON(1); | ||
109 | return DMA_ERROR_CODE; | ||
110 | } | ||
111 | |||
112 | if (handle && *handle) | ||
113 | start = *handle; | ||
114 | else | ||
115 | start = arena->hint; | ||
116 | |||
117 | limit = arena->limit; | ||
118 | |||
119 | /* The case below can happen if we have a small segment appended | ||
120 | * to a large, or when the previous alloc was at the very end of | ||
121 | * the available space. If so, go back to the beginning and flush. | ||
122 | */ | ||
123 | if (start >= limit) { | ||
124 | start = 0; | ||
125 | if (iommu->flush_all) | ||
126 | iommu->flush_all(iommu); | ||
127 | } | ||
128 | |||
129 | again: | ||
130 | |||
131 | if (dev) | ||
132 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, | ||
133 | 1 << IO_PAGE_SHIFT); | ||
134 | else | ||
135 | boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT); | ||
136 | |||
137 | n = iommu_area_alloc(arena->map, limit, start, npages, | ||
138 | iommu->page_table_map_base >> IO_PAGE_SHIFT, | ||
139 | boundary_size >> IO_PAGE_SHIFT, 0); | ||
140 | if (n == -1) { | ||
141 | if (likely(pass < 1)) { | ||
142 | /* First failure, rescan from the beginning. */ | ||
143 | start = 0; | ||
144 | if (iommu->flush_all) | ||
145 | iommu->flush_all(iommu); | ||
146 | pass++; | ||
147 | goto again; | ||
148 | } else { | ||
149 | /* Second failure, give up */ | ||
150 | return DMA_ERROR_CODE; | ||
151 | } | ||
152 | } | ||
153 | |||
154 | end = n + npages; | ||
155 | |||
156 | arena->hint = end; | ||
157 | |||
158 | /* Update handle for SG allocations */ | ||
159 | if (handle) | ||
160 | *handle = end; | ||
161 | |||
162 | return n; | ||
163 | } | ||
164 | |||
165 | void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages) | ||
166 | { | ||
167 | struct iommu_arena *arena = &iommu->arena; | ||
168 | unsigned long entry; | ||
169 | |||
170 | entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; | ||
171 | |||
172 | iommu_area_free(arena->map, entry, npages); | ||
173 | } | ||
174 | |||
175 | int iommu_table_init(struct iommu *iommu, int tsbsize, | ||
176 | u32 dma_offset, u32 dma_addr_mask, | ||
177 | int numa_node) | ||
178 | { | ||
179 | unsigned long i, order, sz, num_tsb_entries; | ||
180 | struct page *page; | ||
181 | |||
182 | num_tsb_entries = tsbsize / sizeof(iopte_t); | ||
183 | |||
184 | /* Setup initial software IOMMU state. */ | ||
185 | spin_lock_init(&iommu->lock); | ||
186 | iommu->ctx_lowest_free = 1; | ||
187 | iommu->page_table_map_base = dma_offset; | ||
188 | iommu->dma_addr_mask = dma_addr_mask; | ||
189 | |||
190 | /* Allocate and initialize the free area map. */ | ||
191 | sz = num_tsb_entries / 8; | ||
192 | sz = (sz + 7UL) & ~7UL; | ||
193 | iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node); | ||
194 | if (!iommu->arena.map) { | ||
195 | printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n"); | ||
196 | return -ENOMEM; | ||
197 | } | ||
198 | memset(iommu->arena.map, 0, sz); | ||
199 | iommu->arena.limit = num_tsb_entries; | ||
200 | |||
201 | if (tlb_type != hypervisor) | ||
202 | iommu->flush_all = iommu_flushall; | ||
203 | |||
204 | /* Allocate and initialize the dummy page which we | ||
205 | * set inactive IO PTEs to point to. | ||
206 | */ | ||
207 | page = alloc_pages_node(numa_node, GFP_KERNEL, 0); | ||
208 | if (!page) { | ||
209 | printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n"); | ||
210 | goto out_free_map; | ||
211 | } | ||
212 | iommu->dummy_page = (unsigned long) page_address(page); | ||
213 | memset((void *)iommu->dummy_page, 0, PAGE_SIZE); | ||
214 | iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page); | ||
215 | |||
216 | /* Now allocate and setup the IOMMU page table itself. */ | ||
217 | order = get_order(tsbsize); | ||
218 | page = alloc_pages_node(numa_node, GFP_KERNEL, order); | ||
219 | if (!page) { | ||
220 | printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n"); | ||
221 | goto out_free_dummy_page; | ||
222 | } | ||
223 | iommu->page_table = (iopte_t *)page_address(page); | ||
224 | |||
225 | for (i = 0; i < num_tsb_entries; i++) | ||
226 | iopte_make_dummy(iommu, &iommu->page_table[i]); | ||
227 | |||
228 | return 0; | ||
229 | |||
230 | out_free_dummy_page: | ||
231 | free_page(iommu->dummy_page); | ||
232 | iommu->dummy_page = 0UL; | ||
233 | |||
234 | out_free_map: | ||
235 | kfree(iommu->arena.map); | ||
236 | iommu->arena.map = NULL; | ||
237 | |||
238 | return -ENOMEM; | ||
239 | } | ||
240 | |||
241 | static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu, | ||
242 | unsigned long npages) | ||
243 | { | ||
244 | unsigned long entry; | ||
245 | |||
246 | entry = iommu_range_alloc(dev, iommu, npages, NULL); | ||
247 | if (unlikely(entry == DMA_ERROR_CODE)) | ||
248 | return NULL; | ||
249 | |||
250 | return iommu->page_table + entry; | ||
251 | } | ||
252 | |||
253 | static int iommu_alloc_ctx(struct iommu *iommu) | ||
254 | { | ||
255 | int lowest = iommu->ctx_lowest_free; | ||
256 | int sz = IOMMU_NUM_CTXS - lowest; | ||
257 | int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest); | ||
258 | |||
259 | if (unlikely(n == sz)) { | ||
260 | n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); | ||
261 | if (unlikely(n == lowest)) { | ||
262 | printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); | ||
263 | n = 0; | ||
264 | } | ||
265 | } | ||
266 | if (n) | ||
267 | __set_bit(n, iommu->ctx_bitmap); | ||
268 | |||
269 | return n; | ||
270 | } | ||
271 | |||
272 | static inline void iommu_free_ctx(struct iommu *iommu, int ctx) | ||
273 | { | ||
274 | if (likely(ctx)) { | ||
275 | __clear_bit(ctx, iommu->ctx_bitmap); | ||
276 | if (ctx < iommu->ctx_lowest_free) | ||
277 | iommu->ctx_lowest_free = ctx; | ||
278 | } | ||
279 | } | ||
280 | |||
281 | static void *dma_4u_alloc_coherent(struct device *dev, size_t size, | ||
282 | dma_addr_t *dma_addrp, gfp_t gfp) | ||
283 | { | ||
284 | unsigned long flags, order, first_page; | ||
285 | struct iommu *iommu; | ||
286 | struct page *page; | ||
287 | int npages, nid; | ||
288 | iopte_t *iopte; | ||
289 | void *ret; | ||
290 | |||
291 | size = IO_PAGE_ALIGN(size); | ||
292 | order = get_order(size); | ||
293 | if (order >= 10) | ||
294 | return NULL; | ||
295 | |||
296 | nid = dev->archdata.numa_node; | ||
297 | page = alloc_pages_node(nid, gfp, order); | ||
298 | if (unlikely(!page)) | ||
299 | return NULL; | ||
300 | |||
301 | first_page = (unsigned long) page_address(page); | ||
302 | memset((char *)first_page, 0, PAGE_SIZE << order); | ||
303 | |||
304 | iommu = dev->archdata.iommu; | ||
305 | |||
306 | spin_lock_irqsave(&iommu->lock, flags); | ||
307 | iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT); | ||
308 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
309 | |||
310 | if (unlikely(iopte == NULL)) { | ||
311 | free_pages(first_page, order); | ||
312 | return NULL; | ||
313 | } | ||
314 | |||
315 | *dma_addrp = (iommu->page_table_map_base + | ||
316 | ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); | ||
317 | ret = (void *) first_page; | ||
318 | npages = size >> IO_PAGE_SHIFT; | ||
319 | first_page = __pa(first_page); | ||
320 | while (npages--) { | ||
321 | iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) | | ||
322 | IOPTE_WRITE | | ||
323 | (first_page & IOPTE_PAGE)); | ||
324 | iopte++; | ||
325 | first_page += IO_PAGE_SIZE; | ||
326 | } | ||
327 | |||
328 | return ret; | ||
329 | } | ||
330 | |||
331 | static void dma_4u_free_coherent(struct device *dev, size_t size, | ||
332 | void *cpu, dma_addr_t dvma) | ||
333 | { | ||
334 | struct iommu *iommu; | ||
335 | iopte_t *iopte; | ||
336 | unsigned long flags, order, npages; | ||
337 | |||
338 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; | ||
339 | iommu = dev->archdata.iommu; | ||
340 | iopte = iommu->page_table + | ||
341 | ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | ||
342 | |||
343 | spin_lock_irqsave(&iommu->lock, flags); | ||
344 | |||
345 | iommu_range_free(iommu, dvma, npages); | ||
346 | |||
347 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
348 | |||
349 | order = get_order(size); | ||
350 | if (order < 10) | ||
351 | free_pages((unsigned long)cpu, order); | ||
352 | } | ||
353 | |||
354 | static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz, | ||
355 | enum dma_data_direction direction) | ||
356 | { | ||
357 | struct iommu *iommu; | ||
358 | struct strbuf *strbuf; | ||
359 | iopte_t *base; | ||
360 | unsigned long flags, npages, oaddr; | ||
361 | unsigned long i, base_paddr, ctx; | ||
362 | u32 bus_addr, ret; | ||
363 | unsigned long iopte_protection; | ||
364 | |||
365 | iommu = dev->archdata.iommu; | ||
366 | strbuf = dev->archdata.stc; | ||
367 | |||
368 | if (unlikely(direction == DMA_NONE)) | ||
369 | goto bad_no_ctx; | ||
370 | |||
371 | oaddr = (unsigned long)ptr; | ||
372 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); | ||
373 | npages >>= IO_PAGE_SHIFT; | ||
374 | |||
375 | spin_lock_irqsave(&iommu->lock, flags); | ||
376 | base = alloc_npages(dev, iommu, npages); | ||
377 | ctx = 0; | ||
378 | if (iommu->iommu_ctxflush) | ||
379 | ctx = iommu_alloc_ctx(iommu); | ||
380 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
381 | |||
382 | if (unlikely(!base)) | ||
383 | goto bad; | ||
384 | |||
385 | bus_addr = (iommu->page_table_map_base + | ||
386 | ((base - iommu->page_table) << IO_PAGE_SHIFT)); | ||
387 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); | ||
388 | base_paddr = __pa(oaddr & IO_PAGE_MASK); | ||
389 | if (strbuf->strbuf_enabled) | ||
390 | iopte_protection = IOPTE_STREAMING(ctx); | ||
391 | else | ||
392 | iopte_protection = IOPTE_CONSISTENT(ctx); | ||
393 | if (direction != DMA_TO_DEVICE) | ||
394 | iopte_protection |= IOPTE_WRITE; | ||
395 | |||
396 | for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE) | ||
397 | iopte_val(*base) = iopte_protection | base_paddr; | ||
398 | |||
399 | return ret; | ||
400 | |||
401 | bad: | ||
402 | iommu_free_ctx(iommu, ctx); | ||
403 | bad_no_ctx: | ||
404 | if (printk_ratelimit()) | ||
405 | WARN_ON(1); | ||
406 | return DMA_ERROR_CODE; | ||
407 | } | ||
408 | |||
409 | static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, | ||
410 | u32 vaddr, unsigned long ctx, unsigned long npages, | ||
411 | enum dma_data_direction direction) | ||
412 | { | ||
413 | int limit; | ||
414 | |||
415 | if (strbuf->strbuf_ctxflush && | ||
416 | iommu->iommu_ctxflush) { | ||
417 | unsigned long matchreg, flushreg; | ||
418 | u64 val; | ||
419 | |||
420 | flushreg = strbuf->strbuf_ctxflush; | ||
421 | matchreg = STC_CTXMATCH_ADDR(strbuf, ctx); | ||
422 | |||
423 | iommu_write(flushreg, ctx); | ||
424 | val = iommu_read(matchreg); | ||
425 | val &= 0xffff; | ||
426 | if (!val) | ||
427 | goto do_flush_sync; | ||
428 | |||
429 | while (val) { | ||
430 | if (val & 0x1) | ||
431 | iommu_write(flushreg, ctx); | ||
432 | val >>= 1; | ||
433 | } | ||
434 | val = iommu_read(matchreg); | ||
435 | if (unlikely(val)) { | ||
436 | printk(KERN_WARNING "strbuf_flush: ctx flush " | ||
437 | "timeout matchreg[%lx] ctx[%lx]\n", | ||
438 | val, ctx); | ||
439 | goto do_page_flush; | ||
440 | } | ||
441 | } else { | ||
442 | unsigned long i; | ||
443 | |||
444 | do_page_flush: | ||
445 | for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) | ||
446 | iommu_write(strbuf->strbuf_pflush, vaddr); | ||
447 | } | ||
448 | |||
449 | do_flush_sync: | ||
450 | /* If the device could not have possibly put dirty data into | ||
451 | * the streaming cache, no flush-flag synchronization needs | ||
452 | * to be performed. | ||
453 | */ | ||
454 | if (direction == DMA_TO_DEVICE) | ||
455 | return; | ||
456 | |||
457 | STC_FLUSHFLAG_INIT(strbuf); | ||
458 | iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); | ||
459 | (void) iommu_read(iommu->write_complete_reg); | ||
460 | |||
461 | limit = 100000; | ||
462 | while (!STC_FLUSHFLAG_SET(strbuf)) { | ||
463 | limit--; | ||
464 | if (!limit) | ||
465 | break; | ||
466 | udelay(1); | ||
467 | rmb(); | ||
468 | } | ||
469 | if (!limit) | ||
470 | printk(KERN_WARNING "strbuf_flush: flushflag timeout " | ||
471 | "vaddr[%08x] ctx[%lx] npages[%ld]\n", | ||
472 | vaddr, ctx, npages); | ||
473 | } | ||
474 | |||
475 | static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr, | ||
476 | size_t sz, enum dma_data_direction direction) | ||
477 | { | ||
478 | struct iommu *iommu; | ||
479 | struct strbuf *strbuf; | ||
480 | iopte_t *base; | ||
481 | unsigned long flags, npages, ctx, i; | ||
482 | |||
483 | if (unlikely(direction == DMA_NONE)) { | ||
484 | if (printk_ratelimit()) | ||
485 | WARN_ON(1); | ||
486 | return; | ||
487 | } | ||
488 | |||
489 | iommu = dev->archdata.iommu; | ||
490 | strbuf = dev->archdata.stc; | ||
491 | |||
492 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | ||
493 | npages >>= IO_PAGE_SHIFT; | ||
494 | base = iommu->page_table + | ||
495 | ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | ||
496 | bus_addr &= IO_PAGE_MASK; | ||
497 | |||
498 | spin_lock_irqsave(&iommu->lock, flags); | ||
499 | |||
500 | /* Record the context, if any. */ | ||
501 | ctx = 0; | ||
502 | if (iommu->iommu_ctxflush) | ||
503 | ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; | ||
504 | |||
505 | /* Step 1: Kick data out of streaming buffers if necessary. */ | ||
506 | if (strbuf->strbuf_enabled) | ||
507 | strbuf_flush(strbuf, iommu, bus_addr, ctx, | ||
508 | npages, direction); | ||
509 | |||
510 | /* Step 2: Clear out TSB entries. */ | ||
511 | for (i = 0; i < npages; i++) | ||
512 | iopte_make_dummy(iommu, base + i); | ||
513 | |||
514 | iommu_range_free(iommu, bus_addr, npages); | ||
515 | |||
516 | iommu_free_ctx(iommu, ctx); | ||
517 | |||
518 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
519 | } | ||
520 | |||
521 | static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, | ||
522 | int nelems, enum dma_data_direction direction) | ||
523 | { | ||
524 | struct scatterlist *s, *outs, *segstart; | ||
525 | unsigned long flags, handle, prot, ctx; | ||
526 | dma_addr_t dma_next = 0, dma_addr; | ||
527 | unsigned int max_seg_size; | ||
528 | unsigned long seg_boundary_size; | ||
529 | int outcount, incount, i; | ||
530 | struct strbuf *strbuf; | ||
531 | struct iommu *iommu; | ||
532 | unsigned long base_shift; | ||
533 | |||
534 | BUG_ON(direction == DMA_NONE); | ||
535 | |||
536 | iommu = dev->archdata.iommu; | ||
537 | strbuf = dev->archdata.stc; | ||
538 | if (nelems == 0 || !iommu) | ||
539 | return 0; | ||
540 | |||
541 | spin_lock_irqsave(&iommu->lock, flags); | ||
542 | |||
543 | ctx = 0; | ||
544 | if (iommu->iommu_ctxflush) | ||
545 | ctx = iommu_alloc_ctx(iommu); | ||
546 | |||
547 | if (strbuf->strbuf_enabled) | ||
548 | prot = IOPTE_STREAMING(ctx); | ||
549 | else | ||
550 | prot = IOPTE_CONSISTENT(ctx); | ||
551 | if (direction != DMA_TO_DEVICE) | ||
552 | prot |= IOPTE_WRITE; | ||
553 | |||
554 | outs = s = segstart = &sglist[0]; | ||
555 | outcount = 1; | ||
556 | incount = nelems; | ||
557 | handle = 0; | ||
558 | |||
559 | /* Init first segment length for backout at failure */ | ||
560 | outs->dma_length = 0; | ||
561 | |||
562 | max_seg_size = dma_get_max_seg_size(dev); | ||
563 | seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, | ||
564 | IO_PAGE_SIZE) >> IO_PAGE_SHIFT; | ||
565 | base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT; | ||
566 | for_each_sg(sglist, s, nelems, i) { | ||
567 | unsigned long paddr, npages, entry, out_entry = 0, slen; | ||
568 | iopte_t *base; | ||
569 | |||
570 | slen = s->length; | ||
571 | /* Sanity check */ | ||
572 | if (slen == 0) { | ||
573 | dma_next = 0; | ||
574 | continue; | ||
575 | } | ||
576 | /* Allocate iommu entries for that segment */ | ||
577 | paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); | ||
578 | npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); | ||
579 | entry = iommu_range_alloc(dev, iommu, npages, &handle); | ||
580 | |||
581 | /* Handle failure */ | ||
582 | if (unlikely(entry == DMA_ERROR_CODE)) { | ||
583 | if (printk_ratelimit()) | ||
584 | printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" | ||
585 | " npages %lx\n", iommu, paddr, npages); | ||
586 | goto iommu_map_failed; | ||
587 | } | ||
588 | |||
589 | base = iommu->page_table + entry; | ||
590 | |||
591 | /* Convert entry to a dma_addr_t */ | ||
592 | dma_addr = iommu->page_table_map_base + | ||
593 | (entry << IO_PAGE_SHIFT); | ||
594 | dma_addr |= (s->offset & ~IO_PAGE_MASK); | ||
595 | |||
596 | /* Insert into HW table */ | ||
597 | paddr &= IO_PAGE_MASK; | ||
598 | while (npages--) { | ||
599 | iopte_val(*base) = prot | paddr; | ||
600 | base++; | ||
601 | paddr += IO_PAGE_SIZE; | ||
602 | } | ||
603 | |||
604 | /* If we are in an open segment, try merging */ | ||
605 | if (segstart != s) { | ||
606 | /* We cannot merge if: | ||
607 | * - allocated dma_addr isn't contiguous to previous allocation | ||
608 | */ | ||
609 | if ((dma_addr != dma_next) || | ||
610 | (outs->dma_length + s->length > max_seg_size) || | ||
611 | (is_span_boundary(out_entry, base_shift, | ||
612 | seg_boundary_size, outs, s))) { | ||
613 | /* Can't merge: create a new segment */ | ||
614 | segstart = s; | ||
615 | outcount++; | ||
616 | outs = sg_next(outs); | ||
617 | } else { | ||
618 | outs->dma_length += s->length; | ||
619 | } | ||
620 | } | ||
621 | |||
622 | if (segstart == s) { | ||
623 | /* This is a new segment, fill entries */ | ||
624 | outs->dma_address = dma_addr; | ||
625 | outs->dma_length = slen; | ||
626 | out_entry = entry; | ||
627 | } | ||
628 | |||
629 | /* Calculate next page pointer for contiguous check */ | ||
630 | dma_next = dma_addr + slen; | ||
631 | } | ||
632 | |||
633 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
634 | |||
635 | if (outcount < incount) { | ||
636 | outs = sg_next(outs); | ||
637 | outs->dma_address = DMA_ERROR_CODE; | ||
638 | outs->dma_length = 0; | ||
639 | } | ||
640 | |||
641 | return outcount; | ||
642 | |||
643 | iommu_map_failed: | ||
644 | for_each_sg(sglist, s, nelems, i) { | ||
645 | if (s->dma_length != 0) { | ||
646 | unsigned long vaddr, npages, entry, j; | ||
647 | iopte_t *base; | ||
648 | |||
649 | vaddr = s->dma_address & IO_PAGE_MASK; | ||
650 | npages = iommu_num_pages(s->dma_address, s->dma_length, | ||
651 | IO_PAGE_SIZE); | ||
652 | iommu_range_free(iommu, vaddr, npages); | ||
653 | |||
654 | entry = (vaddr - iommu->page_table_map_base) | ||
655 | >> IO_PAGE_SHIFT; | ||
656 | base = iommu->page_table + entry; | ||
657 | |||
658 | for (j = 0; j < npages; j++) | ||
659 | iopte_make_dummy(iommu, base + j); | ||
660 | |||
661 | s->dma_address = DMA_ERROR_CODE; | ||
662 | s->dma_length = 0; | ||
663 | } | ||
664 | if (s == outs) | ||
665 | break; | ||
666 | } | ||
667 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
668 | |||
669 | return 0; | ||
670 | } | ||
671 | |||
672 | /* If contexts are being used, they are the same in all of the mappings | ||
673 | * we make for a particular SG. | ||
674 | */ | ||
675 | static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg) | ||
676 | { | ||
677 | unsigned long ctx = 0; | ||
678 | |||
679 | if (iommu->iommu_ctxflush) { | ||
680 | iopte_t *base; | ||
681 | u32 bus_addr; | ||
682 | |||
683 | bus_addr = sg->dma_address & IO_PAGE_MASK; | ||
684 | base = iommu->page_table + | ||
685 | ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | ||
686 | |||
687 | ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; | ||
688 | } | ||
689 | return ctx; | ||
690 | } | ||
691 | |||
692 | static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, | ||
693 | int nelems, enum dma_data_direction direction) | ||
694 | { | ||
695 | unsigned long flags, ctx; | ||
696 | struct scatterlist *sg; | ||
697 | struct strbuf *strbuf; | ||
698 | struct iommu *iommu; | ||
699 | |||
700 | BUG_ON(direction == DMA_NONE); | ||
701 | |||
702 | iommu = dev->archdata.iommu; | ||
703 | strbuf = dev->archdata.stc; | ||
704 | |||
705 | ctx = fetch_sg_ctx(iommu, sglist); | ||
706 | |||
707 | spin_lock_irqsave(&iommu->lock, flags); | ||
708 | |||
709 | sg = sglist; | ||
710 | while (nelems--) { | ||
711 | dma_addr_t dma_handle = sg->dma_address; | ||
712 | unsigned int len = sg->dma_length; | ||
713 | unsigned long npages, entry; | ||
714 | iopte_t *base; | ||
715 | int i; | ||
716 | |||
717 | if (!len) | ||
718 | break; | ||
719 | npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); | ||
720 | iommu_range_free(iommu, dma_handle, npages); | ||
721 | |||
722 | entry = ((dma_handle - iommu->page_table_map_base) | ||
723 | >> IO_PAGE_SHIFT); | ||
724 | base = iommu->page_table + entry; | ||
725 | |||
726 | dma_handle &= IO_PAGE_MASK; | ||
727 | if (strbuf->strbuf_enabled) | ||
728 | strbuf_flush(strbuf, iommu, dma_handle, ctx, | ||
729 | npages, direction); | ||
730 | |||
731 | for (i = 0; i < npages; i++) | ||
732 | iopte_make_dummy(iommu, base + i); | ||
733 | |||
734 | sg = sg_next(sg); | ||
735 | } | ||
736 | |||
737 | iommu_free_ctx(iommu, ctx); | ||
738 | |||
739 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
740 | } | ||
741 | |||
742 | static void dma_4u_sync_single_for_cpu(struct device *dev, | ||
743 | dma_addr_t bus_addr, size_t sz, | ||
744 | enum dma_data_direction direction) | ||
745 | { | ||
746 | struct iommu *iommu; | ||
747 | struct strbuf *strbuf; | ||
748 | unsigned long flags, ctx, npages; | ||
749 | |||
750 | iommu = dev->archdata.iommu; | ||
751 | strbuf = dev->archdata.stc; | ||
752 | |||
753 | if (!strbuf->strbuf_enabled) | ||
754 | return; | ||
755 | |||
756 | spin_lock_irqsave(&iommu->lock, flags); | ||
757 | |||
758 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | ||
759 | npages >>= IO_PAGE_SHIFT; | ||
760 | bus_addr &= IO_PAGE_MASK; | ||
761 | |||
762 | /* Step 1: Record the context, if any. */ | ||
763 | ctx = 0; | ||
764 | if (iommu->iommu_ctxflush && | ||
765 | strbuf->strbuf_ctxflush) { | ||
766 | iopte_t *iopte; | ||
767 | |||
768 | iopte = iommu->page_table + | ||
769 | ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT); | ||
770 | ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL; | ||
771 | } | ||
772 | |||
773 | /* Step 2: Kick data out of streaming buffers. */ | ||
774 | strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); | ||
775 | |||
776 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
777 | } | ||
778 | |||
779 | static void dma_4u_sync_sg_for_cpu(struct device *dev, | ||
780 | struct scatterlist *sglist, int nelems, | ||
781 | enum dma_data_direction direction) | ||
782 | { | ||
783 | struct iommu *iommu; | ||
784 | struct strbuf *strbuf; | ||
785 | unsigned long flags, ctx, npages, i; | ||
786 | struct scatterlist *sg, *sgprv; | ||
787 | u32 bus_addr; | ||
788 | |||
789 | iommu = dev->archdata.iommu; | ||
790 | strbuf = dev->archdata.stc; | ||
791 | |||
792 | if (!strbuf->strbuf_enabled) | ||
793 | return; | ||
794 | |||
795 | spin_lock_irqsave(&iommu->lock, flags); | ||
796 | |||
797 | /* Step 1: Record the context, if any. */ | ||
798 | ctx = 0; | ||
799 | if (iommu->iommu_ctxflush && | ||
800 | strbuf->strbuf_ctxflush) { | ||
801 | iopte_t *iopte; | ||
802 | |||
803 | iopte = iommu->page_table + | ||
804 | ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | ||
805 | ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL; | ||
806 | } | ||
807 | |||
808 | /* Step 2: Kick data out of streaming buffers. */ | ||
809 | bus_addr = sglist[0].dma_address & IO_PAGE_MASK; | ||
810 | sgprv = NULL; | ||
811 | for_each_sg(sglist, sg, nelems, i) { | ||
812 | if (sg->dma_length == 0) | ||
813 | break; | ||
814 | sgprv = sg; | ||
815 | } | ||
816 | |||
817 | npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) | ||
818 | - bus_addr) >> IO_PAGE_SHIFT; | ||
819 | strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); | ||
820 | |||
821 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
822 | } | ||
823 | |||
824 | static const struct dma_ops sun4u_dma_ops = { | ||
825 | .alloc_coherent = dma_4u_alloc_coherent, | ||
826 | .free_coherent = dma_4u_free_coherent, | ||
827 | .map_single = dma_4u_map_single, | ||
828 | .unmap_single = dma_4u_unmap_single, | ||
829 | .map_sg = dma_4u_map_sg, | ||
830 | .unmap_sg = dma_4u_unmap_sg, | ||
831 | .sync_single_for_cpu = dma_4u_sync_single_for_cpu, | ||
832 | .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, | ||
833 | }; | ||
834 | |||
835 | const struct dma_ops *dma_ops = &sun4u_dma_ops; | ||
836 | EXPORT_SYMBOL(dma_ops); | ||
837 | |||
838 | int dma_supported(struct device *dev, u64 device_mask) | ||
839 | { | ||
840 | struct iommu *iommu = dev->archdata.iommu; | ||
841 | u64 dma_addr_mask = iommu->dma_addr_mask; | ||
842 | |||
843 | if (device_mask >= (1UL << 32UL)) | ||
844 | return 0; | ||
845 | |||
846 | if ((device_mask & dma_addr_mask) == dma_addr_mask) | ||
847 | return 1; | ||
848 | |||
849 | #ifdef CONFIG_PCI | ||
850 | if (dev->bus == &pci_bus_type) | ||
851 | return pci_dma_supported(to_pci_dev(dev), device_mask); | ||
852 | #endif | ||
853 | |||
854 | return 0; | ||
855 | } | ||
856 | EXPORT_SYMBOL(dma_supported); | ||
857 | |||
858 | int dma_set_mask(struct device *dev, u64 dma_mask) | ||
859 | { | ||
860 | #ifdef CONFIG_PCI | ||
861 | if (dev->bus == &pci_bus_type) | ||
862 | return pci_set_dma_mask(to_pci_dev(dev), dma_mask); | ||
863 | #endif | ||
864 | return -EINVAL; | ||
865 | } | ||
866 | EXPORT_SYMBOL(dma_set_mask); | ||
diff --git a/arch/sparc/kernel/iommu_common.h b/arch/sparc/kernel/iommu_common.h new file mode 100644 index 000000000000..591f5879039c --- /dev/null +++ b/arch/sparc/kernel/iommu_common.h | |||
@@ -0,0 +1,59 @@ | |||
1 | /* iommu_common.h: UltraSparc SBUS/PCI common iommu declarations. | ||
2 | * | ||
3 | * Copyright (C) 1999, 2008 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #ifndef _IOMMU_COMMON_H | ||
7 | #define _IOMMU_COMMON_H | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/scatterlist.h> | ||
14 | #include <linux/device.h> | ||
15 | #include <linux/iommu-helper.h> | ||
16 | |||
17 | #include <asm/iommu.h> | ||
18 | #include <asm/scatterlist.h> | ||
19 | |||
20 | /* | ||
21 | * These give mapping size of each iommu pte/tlb. | ||
22 | */ | ||
23 | #define IO_PAGE_SHIFT 13 | ||
24 | #define IO_PAGE_SIZE (1UL << IO_PAGE_SHIFT) | ||
25 | #define IO_PAGE_MASK (~(IO_PAGE_SIZE-1)) | ||
26 | #define IO_PAGE_ALIGN(addr) ALIGN(addr, IO_PAGE_SIZE) | ||
27 | |||
28 | #define IO_TSB_ENTRIES (128*1024) | ||
29 | #define IO_TSB_SIZE (IO_TSB_ENTRIES * 8) | ||
30 | |||
31 | /* | ||
32 | * This is the hardwired shift in the iotlb tag/data parts. | ||
33 | */ | ||
34 | #define IOMMU_PAGE_SHIFT 13 | ||
35 | |||
36 | #define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG)))) | ||
37 | |||
38 | static inline int is_span_boundary(unsigned long entry, | ||
39 | unsigned long shift, | ||
40 | unsigned long boundary_size, | ||
41 | struct scatterlist *outs, | ||
42 | struct scatterlist *sg) | ||
43 | { | ||
44 | unsigned long paddr = SG_ENT_PHYS_ADDRESS(outs); | ||
45 | int nr = iommu_num_pages(paddr, outs->dma_length + sg->length, | ||
46 | IO_PAGE_SIZE); | ||
47 | |||
48 | return iommu_is_span_boundary(entry, nr, shift, boundary_size); | ||
49 | } | ||
50 | |||
51 | extern unsigned long iommu_range_alloc(struct device *dev, | ||
52 | struct iommu *iommu, | ||
53 | unsigned long npages, | ||
54 | unsigned long *handle); | ||
55 | extern void iommu_range_free(struct iommu *iommu, | ||
56 | dma_addr_t dma_addr, | ||
57 | unsigned long npages); | ||
58 | |||
59 | #endif /* _IOMMU_COMMON_H */ | ||
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c new file mode 100644 index 000000000000..a3ea2bcb95de --- /dev/null +++ b/arch/sparc/kernel/irq_64.c | |||
@@ -0,0 +1,1101 @@ | |||
1 | /* irq.c: UltraSparc IRQ handling/init/registry. | ||
2 | * | ||
3 | * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net) | ||
4 | * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) | ||
5 | * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz) | ||
6 | */ | ||
7 | |||
8 | #include <linux/module.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/linkage.h> | ||
11 | #include <linux/ptrace.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/kernel_stat.h> | ||
14 | #include <linux/signal.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/random.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/proc_fs.h> | ||
22 | #include <linux/seq_file.h> | ||
23 | #include <linux/bootmem.h> | ||
24 | #include <linux/irq.h> | ||
25 | |||
26 | #include <asm/ptrace.h> | ||
27 | #include <asm/processor.h> | ||
28 | #include <asm/atomic.h> | ||
29 | #include <asm/system.h> | ||
30 | #include <asm/irq.h> | ||
31 | #include <asm/io.h> | ||
32 | #include <asm/iommu.h> | ||
33 | #include <asm/upa.h> | ||
34 | #include <asm/oplib.h> | ||
35 | #include <asm/prom.h> | ||
36 | #include <asm/timer.h> | ||
37 | #include <asm/smp.h> | ||
38 | #include <asm/starfire.h> | ||
39 | #include <asm/uaccess.h> | ||
40 | #include <asm/cache.h> | ||
41 | #include <asm/cpudata.h> | ||
42 | #include <asm/auxio.h> | ||
43 | #include <asm/head.h> | ||
44 | #include <asm/hypervisor.h> | ||
45 | #include <asm/cacheflush.h> | ||
46 | |||
47 | #include "entry.h" | ||
48 | |||
49 | #define NUM_IVECS (IMAP_INR + 1) | ||
50 | |||
51 | struct ino_bucket *ivector_table; | ||
52 | unsigned long ivector_table_pa; | ||
53 | |||
54 | /* On several sun4u processors, it is illegal to mix bypass and | ||
55 | * non-bypass accesses. Therefore we access all INO buckets | ||
56 | * using bypass accesses only. | ||
57 | */ | ||
58 | static unsigned long bucket_get_chain_pa(unsigned long bucket_pa) | ||
59 | { | ||
60 | unsigned long ret; | ||
61 | |||
62 | __asm__ __volatile__("ldxa [%1] %2, %0" | ||
63 | : "=&r" (ret) | ||
64 | : "r" (bucket_pa + | ||
65 | offsetof(struct ino_bucket, | ||
66 | __irq_chain_pa)), | ||
67 | "i" (ASI_PHYS_USE_EC)); | ||
68 | |||
69 | return ret; | ||
70 | } | ||
71 | |||
72 | static void bucket_clear_chain_pa(unsigned long bucket_pa) | ||
73 | { | ||
74 | __asm__ __volatile__("stxa %%g0, [%0] %1" | ||
75 | : /* no outputs */ | ||
76 | : "r" (bucket_pa + | ||
77 | offsetof(struct ino_bucket, | ||
78 | __irq_chain_pa)), | ||
79 | "i" (ASI_PHYS_USE_EC)); | ||
80 | } | ||
81 | |||
82 | static unsigned int bucket_get_virt_irq(unsigned long bucket_pa) | ||
83 | { | ||
84 | unsigned int ret; | ||
85 | |||
86 | __asm__ __volatile__("lduwa [%1] %2, %0" | ||
87 | : "=&r" (ret) | ||
88 | : "r" (bucket_pa + | ||
89 | offsetof(struct ino_bucket, | ||
90 | __virt_irq)), | ||
91 | "i" (ASI_PHYS_USE_EC)); | ||
92 | |||
93 | return ret; | ||
94 | } | ||
95 | |||
96 | static void bucket_set_virt_irq(unsigned long bucket_pa, | ||
97 | unsigned int virt_irq) | ||
98 | { | ||
99 | __asm__ __volatile__("stwa %0, [%1] %2" | ||
100 | : /* no outputs */ | ||
101 | : "r" (virt_irq), | ||
102 | "r" (bucket_pa + | ||
103 | offsetof(struct ino_bucket, | ||
104 | __virt_irq)), | ||
105 | "i" (ASI_PHYS_USE_EC)); | ||
106 | } | ||
107 | |||
108 | #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa) | ||
109 | |||
110 | static struct { | ||
111 | unsigned int dev_handle; | ||
112 | unsigned int dev_ino; | ||
113 | unsigned int in_use; | ||
114 | } virt_irq_table[NR_IRQS]; | ||
115 | static DEFINE_SPINLOCK(virt_irq_alloc_lock); | ||
116 | |||
117 | unsigned char virt_irq_alloc(unsigned int dev_handle, | ||
118 | unsigned int dev_ino) | ||
119 | { | ||
120 | unsigned long flags; | ||
121 | unsigned char ent; | ||
122 | |||
123 | BUILD_BUG_ON(NR_IRQS >= 256); | ||
124 | |||
125 | spin_lock_irqsave(&virt_irq_alloc_lock, flags); | ||
126 | |||
127 | for (ent = 1; ent < NR_IRQS; ent++) { | ||
128 | if (!virt_irq_table[ent].in_use) | ||
129 | break; | ||
130 | } | ||
131 | if (ent >= NR_IRQS) { | ||
132 | printk(KERN_ERR "IRQ: Out of virtual IRQs.\n"); | ||
133 | ent = 0; | ||
134 | } else { | ||
135 | virt_irq_table[ent].dev_handle = dev_handle; | ||
136 | virt_irq_table[ent].dev_ino = dev_ino; | ||
137 | virt_irq_table[ent].in_use = 1; | ||
138 | } | ||
139 | |||
140 | spin_unlock_irqrestore(&virt_irq_alloc_lock, flags); | ||
141 | |||
142 | return ent; | ||
143 | } | ||
144 | |||
145 | #ifdef CONFIG_PCI_MSI | ||
146 | void virt_irq_free(unsigned int virt_irq) | ||
147 | { | ||
148 | unsigned long flags; | ||
149 | |||
150 | if (virt_irq >= NR_IRQS) | ||
151 | return; | ||
152 | |||
153 | spin_lock_irqsave(&virt_irq_alloc_lock, flags); | ||
154 | |||
155 | virt_irq_table[virt_irq].in_use = 0; | ||
156 | |||
157 | spin_unlock_irqrestore(&virt_irq_alloc_lock, flags); | ||
158 | } | ||
159 | #endif | ||
160 | |||
161 | /* | ||
162 | * /proc/interrupts printing: | ||
163 | */ | ||
164 | |||
165 | int show_interrupts(struct seq_file *p, void *v) | ||
166 | { | ||
167 | int i = *(loff_t *) v, j; | ||
168 | struct irqaction * action; | ||
169 | unsigned long flags; | ||
170 | |||
171 | if (i == 0) { | ||
172 | seq_printf(p, " "); | ||
173 | for_each_online_cpu(j) | ||
174 | seq_printf(p, "CPU%d ",j); | ||
175 | seq_putc(p, '\n'); | ||
176 | } | ||
177 | |||
178 | if (i < NR_IRQS) { | ||
179 | spin_lock_irqsave(&irq_desc[i].lock, flags); | ||
180 | action = irq_desc[i].action; | ||
181 | if (!action) | ||
182 | goto skip; | ||
183 | seq_printf(p, "%3d: ",i); | ||
184 | #ifndef CONFIG_SMP | ||
185 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
186 | #else | ||
187 | for_each_online_cpu(j) | ||
188 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | ||
189 | #endif | ||
190 | seq_printf(p, " %9s", irq_desc[i].chip->typename); | ||
191 | seq_printf(p, " %s", action->name); | ||
192 | |||
193 | for (action=action->next; action; action = action->next) | ||
194 | seq_printf(p, ", %s", action->name); | ||
195 | |||
196 | seq_putc(p, '\n'); | ||
197 | skip: | ||
198 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | ||
199 | } | ||
200 | return 0; | ||
201 | } | ||
202 | |||
203 | static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid) | ||
204 | { | ||
205 | unsigned int tid; | ||
206 | |||
207 | if (this_is_starfire) { | ||
208 | tid = starfire_translate(imap, cpuid); | ||
209 | tid <<= IMAP_TID_SHIFT; | ||
210 | tid &= IMAP_TID_UPA; | ||
211 | } else { | ||
212 | if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
213 | unsigned long ver; | ||
214 | |||
215 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | ||
216 | if ((ver >> 32UL) == __JALAPENO_ID || | ||
217 | (ver >> 32UL) == __SERRANO_ID) { | ||
218 | tid = cpuid << IMAP_TID_SHIFT; | ||
219 | tid &= IMAP_TID_JBUS; | ||
220 | } else { | ||
221 | unsigned int a = cpuid & 0x1f; | ||
222 | unsigned int n = (cpuid >> 5) & 0x1f; | ||
223 | |||
224 | tid = ((a << IMAP_AID_SHIFT) | | ||
225 | (n << IMAP_NID_SHIFT)); | ||
226 | tid &= (IMAP_AID_SAFARI | | ||
227 | IMAP_NID_SAFARI);; | ||
228 | } | ||
229 | } else { | ||
230 | tid = cpuid << IMAP_TID_SHIFT; | ||
231 | tid &= IMAP_TID_UPA; | ||
232 | } | ||
233 | } | ||
234 | |||
235 | return tid; | ||
236 | } | ||
237 | |||
238 | struct irq_handler_data { | ||
239 | unsigned long iclr; | ||
240 | unsigned long imap; | ||
241 | |||
242 | void (*pre_handler)(unsigned int, void *, void *); | ||
243 | void *arg1; | ||
244 | void *arg2; | ||
245 | }; | ||
246 | |||
247 | #ifdef CONFIG_SMP | ||
248 | static int irq_choose_cpu(unsigned int virt_irq) | ||
249 | { | ||
250 | cpumask_t mask = irq_desc[virt_irq].affinity; | ||
251 | int cpuid; | ||
252 | |||
253 | if (cpus_equal(mask, CPU_MASK_ALL)) { | ||
254 | static int irq_rover; | ||
255 | static DEFINE_SPINLOCK(irq_rover_lock); | ||
256 | unsigned long flags; | ||
257 | |||
258 | /* Round-robin distribution... */ | ||
259 | do_round_robin: | ||
260 | spin_lock_irqsave(&irq_rover_lock, flags); | ||
261 | |||
262 | while (!cpu_online(irq_rover)) { | ||
263 | if (++irq_rover >= NR_CPUS) | ||
264 | irq_rover = 0; | ||
265 | } | ||
266 | cpuid = irq_rover; | ||
267 | do { | ||
268 | if (++irq_rover >= NR_CPUS) | ||
269 | irq_rover = 0; | ||
270 | } while (!cpu_online(irq_rover)); | ||
271 | |||
272 | spin_unlock_irqrestore(&irq_rover_lock, flags); | ||
273 | } else { | ||
274 | cpumask_t tmp; | ||
275 | |||
276 | cpus_and(tmp, cpu_online_map, mask); | ||
277 | |||
278 | if (cpus_empty(tmp)) | ||
279 | goto do_round_robin; | ||
280 | |||
281 | cpuid = first_cpu(tmp); | ||
282 | } | ||
283 | |||
284 | return cpuid; | ||
285 | } | ||
286 | #else | ||
287 | static int irq_choose_cpu(unsigned int virt_irq) | ||
288 | { | ||
289 | return real_hard_smp_processor_id(); | ||
290 | } | ||
291 | #endif | ||
292 | |||
293 | static void sun4u_irq_enable(unsigned int virt_irq) | ||
294 | { | ||
295 | struct irq_handler_data *data = get_irq_chip_data(virt_irq); | ||
296 | |||
297 | if (likely(data)) { | ||
298 | unsigned long cpuid, imap, val; | ||
299 | unsigned int tid; | ||
300 | |||
301 | cpuid = irq_choose_cpu(virt_irq); | ||
302 | imap = data->imap; | ||
303 | |||
304 | tid = sun4u_compute_tid(imap, cpuid); | ||
305 | |||
306 | val = upa_readq(imap); | ||
307 | val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS | | ||
308 | IMAP_AID_SAFARI | IMAP_NID_SAFARI); | ||
309 | val |= tid | IMAP_VALID; | ||
310 | upa_writeq(val, imap); | ||
311 | upa_writeq(ICLR_IDLE, data->iclr); | ||
312 | } | ||
313 | } | ||
314 | |||
315 | static void sun4u_set_affinity(unsigned int virt_irq, cpumask_t mask) | ||
316 | { | ||
317 | sun4u_irq_enable(virt_irq); | ||
318 | } | ||
319 | |||
320 | static void sun4u_irq_disable(unsigned int virt_irq) | ||
321 | { | ||
322 | struct irq_handler_data *data = get_irq_chip_data(virt_irq); | ||
323 | |||
324 | if (likely(data)) { | ||
325 | unsigned long imap = data->imap; | ||
326 | unsigned long tmp = upa_readq(imap); | ||
327 | |||
328 | tmp &= ~IMAP_VALID; | ||
329 | upa_writeq(tmp, imap); | ||
330 | } | ||
331 | } | ||
332 | |||
333 | static void sun4u_irq_eoi(unsigned int virt_irq) | ||
334 | { | ||
335 | struct irq_handler_data *data = get_irq_chip_data(virt_irq); | ||
336 | struct irq_desc *desc = irq_desc + virt_irq; | ||
337 | |||
338 | if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
339 | return; | ||
340 | |||
341 | if (likely(data)) | ||
342 | upa_writeq(ICLR_IDLE, data->iclr); | ||
343 | } | ||
344 | |||
345 | static void sun4v_irq_enable(unsigned int virt_irq) | ||
346 | { | ||
347 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; | ||
348 | unsigned long cpuid = irq_choose_cpu(virt_irq); | ||
349 | int err; | ||
350 | |||
351 | err = sun4v_intr_settarget(ino, cpuid); | ||
352 | if (err != HV_EOK) | ||
353 | printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " | ||
354 | "err(%d)\n", ino, cpuid, err); | ||
355 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); | ||
356 | if (err != HV_EOK) | ||
357 | printk(KERN_ERR "sun4v_intr_setstate(%x): " | ||
358 | "err(%d)\n", ino, err); | ||
359 | err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED); | ||
360 | if (err != HV_EOK) | ||
361 | printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n", | ||
362 | ino, err); | ||
363 | } | ||
364 | |||
365 | static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask) | ||
366 | { | ||
367 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; | ||
368 | unsigned long cpuid = irq_choose_cpu(virt_irq); | ||
369 | int err; | ||
370 | |||
371 | err = sun4v_intr_settarget(ino, cpuid); | ||
372 | if (err != HV_EOK) | ||
373 | printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " | ||
374 | "err(%d)\n", ino, cpuid, err); | ||
375 | } | ||
376 | |||
377 | static void sun4v_irq_disable(unsigned int virt_irq) | ||
378 | { | ||
379 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; | ||
380 | int err; | ||
381 | |||
382 | err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); | ||
383 | if (err != HV_EOK) | ||
384 | printk(KERN_ERR "sun4v_intr_setenabled(%x): " | ||
385 | "err(%d)\n", ino, err); | ||
386 | } | ||
387 | |||
388 | static void sun4v_irq_eoi(unsigned int virt_irq) | ||
389 | { | ||
390 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; | ||
391 | struct irq_desc *desc = irq_desc + virt_irq; | ||
392 | int err; | ||
393 | |||
394 | if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
395 | return; | ||
396 | |||
397 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); | ||
398 | if (err != HV_EOK) | ||
399 | printk(KERN_ERR "sun4v_intr_setstate(%x): " | ||
400 | "err(%d)\n", ino, err); | ||
401 | } | ||
402 | |||
403 | static void sun4v_virq_enable(unsigned int virt_irq) | ||
404 | { | ||
405 | unsigned long cpuid, dev_handle, dev_ino; | ||
406 | int err; | ||
407 | |||
408 | cpuid = irq_choose_cpu(virt_irq); | ||
409 | |||
410 | dev_handle = virt_irq_table[virt_irq].dev_handle; | ||
411 | dev_ino = virt_irq_table[virt_irq].dev_ino; | ||
412 | |||
413 | err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); | ||
414 | if (err != HV_EOK) | ||
415 | printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " | ||
416 | "err(%d)\n", | ||
417 | dev_handle, dev_ino, cpuid, err); | ||
418 | err = sun4v_vintr_set_state(dev_handle, dev_ino, | ||
419 | HV_INTR_STATE_IDLE); | ||
420 | if (err != HV_EOK) | ||
421 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," | ||
422 | "HV_INTR_STATE_IDLE): err(%d)\n", | ||
423 | dev_handle, dev_ino, err); | ||
424 | err = sun4v_vintr_set_valid(dev_handle, dev_ino, | ||
425 | HV_INTR_ENABLED); | ||
426 | if (err != HV_EOK) | ||
427 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," | ||
428 | "HV_INTR_ENABLED): err(%d)\n", | ||
429 | dev_handle, dev_ino, err); | ||
430 | } | ||
431 | |||
432 | static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask) | ||
433 | { | ||
434 | unsigned long cpuid, dev_handle, dev_ino; | ||
435 | int err; | ||
436 | |||
437 | cpuid = irq_choose_cpu(virt_irq); | ||
438 | |||
439 | dev_handle = virt_irq_table[virt_irq].dev_handle; | ||
440 | dev_ino = virt_irq_table[virt_irq].dev_ino; | ||
441 | |||
442 | err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); | ||
443 | if (err != HV_EOK) | ||
444 | printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " | ||
445 | "err(%d)\n", | ||
446 | dev_handle, dev_ino, cpuid, err); | ||
447 | } | ||
448 | |||
449 | static void sun4v_virq_disable(unsigned int virt_irq) | ||
450 | { | ||
451 | unsigned long dev_handle, dev_ino; | ||
452 | int err; | ||
453 | |||
454 | dev_handle = virt_irq_table[virt_irq].dev_handle; | ||
455 | dev_ino = virt_irq_table[virt_irq].dev_ino; | ||
456 | |||
457 | err = sun4v_vintr_set_valid(dev_handle, dev_ino, | ||
458 | HV_INTR_DISABLED); | ||
459 | if (err != HV_EOK) | ||
460 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," | ||
461 | "HV_INTR_DISABLED): err(%d)\n", | ||
462 | dev_handle, dev_ino, err); | ||
463 | } | ||
464 | |||
465 | static void sun4v_virq_eoi(unsigned int virt_irq) | ||
466 | { | ||
467 | struct irq_desc *desc = irq_desc + virt_irq; | ||
468 | unsigned long dev_handle, dev_ino; | ||
469 | int err; | ||
470 | |||
471 | if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
472 | return; | ||
473 | |||
474 | dev_handle = virt_irq_table[virt_irq].dev_handle; | ||
475 | dev_ino = virt_irq_table[virt_irq].dev_ino; | ||
476 | |||
477 | err = sun4v_vintr_set_state(dev_handle, dev_ino, | ||
478 | HV_INTR_STATE_IDLE); | ||
479 | if (err != HV_EOK) | ||
480 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," | ||
481 | "HV_INTR_STATE_IDLE): err(%d)\n", | ||
482 | dev_handle, dev_ino, err); | ||
483 | } | ||
484 | |||
485 | static struct irq_chip sun4u_irq = { | ||
486 | .typename = "sun4u", | ||
487 | .enable = sun4u_irq_enable, | ||
488 | .disable = sun4u_irq_disable, | ||
489 | .eoi = sun4u_irq_eoi, | ||
490 | .set_affinity = sun4u_set_affinity, | ||
491 | }; | ||
492 | |||
493 | static struct irq_chip sun4v_irq = { | ||
494 | .typename = "sun4v", | ||
495 | .enable = sun4v_irq_enable, | ||
496 | .disable = sun4v_irq_disable, | ||
497 | .eoi = sun4v_irq_eoi, | ||
498 | .set_affinity = sun4v_set_affinity, | ||
499 | }; | ||
500 | |||
501 | static struct irq_chip sun4v_virq = { | ||
502 | .typename = "vsun4v", | ||
503 | .enable = sun4v_virq_enable, | ||
504 | .disable = sun4v_virq_disable, | ||
505 | .eoi = sun4v_virq_eoi, | ||
506 | .set_affinity = sun4v_virt_set_affinity, | ||
507 | }; | ||
508 | |||
509 | static void pre_flow_handler(unsigned int virt_irq, | ||
510 | struct irq_desc *desc) | ||
511 | { | ||
512 | struct irq_handler_data *data = get_irq_chip_data(virt_irq); | ||
513 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; | ||
514 | |||
515 | data->pre_handler(ino, data->arg1, data->arg2); | ||
516 | |||
517 | handle_fasteoi_irq(virt_irq, desc); | ||
518 | } | ||
519 | |||
520 | void irq_install_pre_handler(int virt_irq, | ||
521 | void (*func)(unsigned int, void *, void *), | ||
522 | void *arg1, void *arg2) | ||
523 | { | ||
524 | struct irq_handler_data *data = get_irq_chip_data(virt_irq); | ||
525 | struct irq_desc *desc = irq_desc + virt_irq; | ||
526 | |||
527 | data->pre_handler = func; | ||
528 | data->arg1 = arg1; | ||
529 | data->arg2 = arg2; | ||
530 | |||
531 | desc->handle_irq = pre_flow_handler; | ||
532 | } | ||
533 | |||
534 | unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) | ||
535 | { | ||
536 | struct ino_bucket *bucket; | ||
537 | struct irq_handler_data *data; | ||
538 | unsigned int virt_irq; | ||
539 | int ino; | ||
540 | |||
541 | BUG_ON(tlb_type == hypervisor); | ||
542 | |||
543 | ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup; | ||
544 | bucket = &ivector_table[ino]; | ||
545 | virt_irq = bucket_get_virt_irq(__pa(bucket)); | ||
546 | if (!virt_irq) { | ||
547 | virt_irq = virt_irq_alloc(0, ino); | ||
548 | bucket_set_virt_irq(__pa(bucket), virt_irq); | ||
549 | set_irq_chip_and_handler_name(virt_irq, | ||
550 | &sun4u_irq, | ||
551 | handle_fasteoi_irq, | ||
552 | "IVEC"); | ||
553 | } | ||
554 | |||
555 | data = get_irq_chip_data(virt_irq); | ||
556 | if (unlikely(data)) | ||
557 | goto out; | ||
558 | |||
559 | data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); | ||
560 | if (unlikely(!data)) { | ||
561 | prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); | ||
562 | prom_halt(); | ||
563 | } | ||
564 | set_irq_chip_data(virt_irq, data); | ||
565 | |||
566 | data->imap = imap; | ||
567 | data->iclr = iclr; | ||
568 | |||
569 | out: | ||
570 | return virt_irq; | ||
571 | } | ||
572 | |||
573 | static unsigned int sun4v_build_common(unsigned long sysino, | ||
574 | struct irq_chip *chip) | ||
575 | { | ||
576 | struct ino_bucket *bucket; | ||
577 | struct irq_handler_data *data; | ||
578 | unsigned int virt_irq; | ||
579 | |||
580 | BUG_ON(tlb_type != hypervisor); | ||
581 | |||
582 | bucket = &ivector_table[sysino]; | ||
583 | virt_irq = bucket_get_virt_irq(__pa(bucket)); | ||
584 | if (!virt_irq) { | ||
585 | virt_irq = virt_irq_alloc(0, sysino); | ||
586 | bucket_set_virt_irq(__pa(bucket), virt_irq); | ||
587 | set_irq_chip_and_handler_name(virt_irq, chip, | ||
588 | handle_fasteoi_irq, | ||
589 | "IVEC"); | ||
590 | } | ||
591 | |||
592 | data = get_irq_chip_data(virt_irq); | ||
593 | if (unlikely(data)) | ||
594 | goto out; | ||
595 | |||
596 | data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); | ||
597 | if (unlikely(!data)) { | ||
598 | prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); | ||
599 | prom_halt(); | ||
600 | } | ||
601 | set_irq_chip_data(virt_irq, data); | ||
602 | |||
603 | /* Catch accidental accesses to these things. IMAP/ICLR handling | ||
604 | * is done by hypervisor calls on sun4v platforms, not by direct | ||
605 | * register accesses. | ||
606 | */ | ||
607 | data->imap = ~0UL; | ||
608 | data->iclr = ~0UL; | ||
609 | |||
610 | out: | ||
611 | return virt_irq; | ||
612 | } | ||
613 | |||
614 | unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino) | ||
615 | { | ||
616 | unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino); | ||
617 | |||
618 | return sun4v_build_common(sysino, &sun4v_irq); | ||
619 | } | ||
620 | |||
621 | unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) | ||
622 | { | ||
623 | struct irq_handler_data *data; | ||
624 | unsigned long hv_err, cookie; | ||
625 | struct ino_bucket *bucket; | ||
626 | struct irq_desc *desc; | ||
627 | unsigned int virt_irq; | ||
628 | |||
629 | bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); | ||
630 | if (unlikely(!bucket)) | ||
631 | return 0; | ||
632 | __flush_dcache_range((unsigned long) bucket, | ||
633 | ((unsigned long) bucket + | ||
634 | sizeof(struct ino_bucket))); | ||
635 | |||
636 | virt_irq = virt_irq_alloc(devhandle, devino); | ||
637 | bucket_set_virt_irq(__pa(bucket), virt_irq); | ||
638 | |||
639 | set_irq_chip_and_handler_name(virt_irq, &sun4v_virq, | ||
640 | handle_fasteoi_irq, | ||
641 | "IVEC"); | ||
642 | |||
643 | data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); | ||
644 | if (unlikely(!data)) | ||
645 | return 0; | ||
646 | |||
647 | /* In order to make the LDC channel startup sequence easier, | ||
648 | * especially wrt. locking, we do not let request_irq() enable | ||
649 | * the interrupt. | ||
650 | */ | ||
651 | desc = irq_desc + virt_irq; | ||
652 | desc->status |= IRQ_NOAUTOEN; | ||
653 | |||
654 | set_irq_chip_data(virt_irq, data); | ||
655 | |||
656 | /* Catch accidental accesses to these things. IMAP/ICLR handling | ||
657 | * is done by hypervisor calls on sun4v platforms, not by direct | ||
658 | * register accesses. | ||
659 | */ | ||
660 | data->imap = ~0UL; | ||
661 | data->iclr = ~0UL; | ||
662 | |||
663 | cookie = ~__pa(bucket); | ||
664 | hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie); | ||
665 | if (hv_err) { | ||
666 | prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] " | ||
667 | "err=%lu\n", devhandle, devino, hv_err); | ||
668 | prom_halt(); | ||
669 | } | ||
670 | |||
671 | return virt_irq; | ||
672 | } | ||
673 | |||
674 | void ack_bad_irq(unsigned int virt_irq) | ||
675 | { | ||
676 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; | ||
677 | |||
678 | if (!ino) | ||
679 | ino = 0xdeadbeef; | ||
680 | |||
681 | printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n", | ||
682 | ino, virt_irq); | ||
683 | } | ||
684 | |||
685 | void *hardirq_stack[NR_CPUS]; | ||
686 | void *softirq_stack[NR_CPUS]; | ||
687 | |||
688 | static __attribute__((always_inline)) void *set_hardirq_stack(void) | ||
689 | { | ||
690 | void *orig_sp, *sp = hardirq_stack[smp_processor_id()]; | ||
691 | |||
692 | __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp)); | ||
693 | if (orig_sp < sp || | ||
694 | orig_sp > (sp + THREAD_SIZE)) { | ||
695 | sp += THREAD_SIZE - 192 - STACK_BIAS; | ||
696 | __asm__ __volatile__("mov %0, %%sp" : : "r" (sp)); | ||
697 | } | ||
698 | |||
699 | return orig_sp; | ||
700 | } | ||
701 | static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp) | ||
702 | { | ||
703 | __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); | ||
704 | } | ||
705 | |||
706 | void handler_irq(int irq, struct pt_regs *regs) | ||
707 | { | ||
708 | unsigned long pstate, bucket_pa; | ||
709 | struct pt_regs *old_regs; | ||
710 | void *orig_sp; | ||
711 | |||
712 | clear_softint(1 << irq); | ||
713 | |||
714 | old_regs = set_irq_regs(regs); | ||
715 | irq_enter(); | ||
716 | |||
717 | /* Grab an atomic snapshot of the pending IVECs. */ | ||
718 | __asm__ __volatile__("rdpr %%pstate, %0\n\t" | ||
719 | "wrpr %0, %3, %%pstate\n\t" | ||
720 | "ldx [%2], %1\n\t" | ||
721 | "stx %%g0, [%2]\n\t" | ||
722 | "wrpr %0, 0x0, %%pstate\n\t" | ||
723 | : "=&r" (pstate), "=&r" (bucket_pa) | ||
724 | : "r" (irq_work_pa(smp_processor_id())), | ||
725 | "i" (PSTATE_IE) | ||
726 | : "memory"); | ||
727 | |||
728 | orig_sp = set_hardirq_stack(); | ||
729 | |||
730 | while (bucket_pa) { | ||
731 | struct irq_desc *desc; | ||
732 | unsigned long next_pa; | ||
733 | unsigned int virt_irq; | ||
734 | |||
735 | next_pa = bucket_get_chain_pa(bucket_pa); | ||
736 | virt_irq = bucket_get_virt_irq(bucket_pa); | ||
737 | bucket_clear_chain_pa(bucket_pa); | ||
738 | |||
739 | desc = irq_desc + virt_irq; | ||
740 | |||
741 | desc->handle_irq(virt_irq, desc); | ||
742 | |||
743 | bucket_pa = next_pa; | ||
744 | } | ||
745 | |||
746 | restore_hardirq_stack(orig_sp); | ||
747 | |||
748 | irq_exit(); | ||
749 | set_irq_regs(old_regs); | ||
750 | } | ||
751 | |||
752 | void do_softirq(void) | ||
753 | { | ||
754 | unsigned long flags; | ||
755 | |||
756 | if (in_interrupt()) | ||
757 | return; | ||
758 | |||
759 | local_irq_save(flags); | ||
760 | |||
761 | if (local_softirq_pending()) { | ||
762 | void *orig_sp, *sp = softirq_stack[smp_processor_id()]; | ||
763 | |||
764 | sp += THREAD_SIZE - 192 - STACK_BIAS; | ||
765 | |||
766 | __asm__ __volatile__("mov %%sp, %0\n\t" | ||
767 | "mov %1, %%sp" | ||
768 | : "=&r" (orig_sp) | ||
769 | : "r" (sp)); | ||
770 | __do_softirq(); | ||
771 | __asm__ __volatile__("mov %0, %%sp" | ||
772 | : : "r" (orig_sp)); | ||
773 | } | ||
774 | |||
775 | local_irq_restore(flags); | ||
776 | } | ||
777 | |||
778 | static void unhandled_perf_irq(struct pt_regs *regs) | ||
779 | { | ||
780 | unsigned long pcr, pic; | ||
781 | |||
782 | read_pcr(pcr); | ||
783 | read_pic(pic); | ||
784 | |||
785 | write_pcr(0); | ||
786 | |||
787 | printk(KERN_EMERG "CPU %d: Got unexpected perf counter IRQ.\n", | ||
788 | smp_processor_id()); | ||
789 | printk(KERN_EMERG "CPU %d: PCR[%016lx] PIC[%016lx]\n", | ||
790 | smp_processor_id(), pcr, pic); | ||
791 | } | ||
792 | |||
793 | /* Almost a direct copy of the powerpc PMC code. */ | ||
794 | static DEFINE_SPINLOCK(perf_irq_lock); | ||
795 | static void *perf_irq_owner_caller; /* mostly for debugging */ | ||
796 | static void (*perf_irq)(struct pt_regs *regs) = unhandled_perf_irq; | ||
797 | |||
798 | /* Invoked from level 15 PIL handler in trap table. */ | ||
799 | void perfctr_irq(int irq, struct pt_regs *regs) | ||
800 | { | ||
801 | clear_softint(1 << irq); | ||
802 | perf_irq(regs); | ||
803 | } | ||
804 | |||
805 | int register_perfctr_intr(void (*handler)(struct pt_regs *)) | ||
806 | { | ||
807 | int ret; | ||
808 | |||
809 | if (!handler) | ||
810 | return -EINVAL; | ||
811 | |||
812 | spin_lock(&perf_irq_lock); | ||
813 | if (perf_irq != unhandled_perf_irq) { | ||
814 | printk(KERN_WARNING "register_perfctr_intr: " | ||
815 | "perf IRQ busy (reserved by caller %p)\n", | ||
816 | perf_irq_owner_caller); | ||
817 | ret = -EBUSY; | ||
818 | goto out; | ||
819 | } | ||
820 | |||
821 | perf_irq_owner_caller = __builtin_return_address(0); | ||
822 | perf_irq = handler; | ||
823 | |||
824 | ret = 0; | ||
825 | out: | ||
826 | spin_unlock(&perf_irq_lock); | ||
827 | |||
828 | return ret; | ||
829 | } | ||
830 | EXPORT_SYMBOL_GPL(register_perfctr_intr); | ||
831 | |||
832 | void release_perfctr_intr(void (*handler)(struct pt_regs *)) | ||
833 | { | ||
834 | spin_lock(&perf_irq_lock); | ||
835 | perf_irq_owner_caller = NULL; | ||
836 | perf_irq = unhandled_perf_irq; | ||
837 | spin_unlock(&perf_irq_lock); | ||
838 | } | ||
839 | EXPORT_SYMBOL_GPL(release_perfctr_intr); | ||
840 | |||
841 | #ifdef CONFIG_HOTPLUG_CPU | ||
842 | void fixup_irqs(void) | ||
843 | { | ||
844 | unsigned int irq; | ||
845 | |||
846 | for (irq = 0; irq < NR_IRQS; irq++) { | ||
847 | unsigned long flags; | ||
848 | |||
849 | spin_lock_irqsave(&irq_desc[irq].lock, flags); | ||
850 | if (irq_desc[irq].action && | ||
851 | !(irq_desc[irq].status & IRQ_PER_CPU)) { | ||
852 | if (irq_desc[irq].chip->set_affinity) | ||
853 | irq_desc[irq].chip->set_affinity(irq, | ||
854 | irq_desc[irq].affinity); | ||
855 | } | ||
856 | spin_unlock_irqrestore(&irq_desc[irq].lock, flags); | ||
857 | } | ||
858 | |||
859 | tick_ops->disable_irq(); | ||
860 | } | ||
861 | #endif | ||
862 | |||
863 | struct sun5_timer { | ||
864 | u64 count0; | ||
865 | u64 limit0; | ||
866 | u64 count1; | ||
867 | u64 limit1; | ||
868 | }; | ||
869 | |||
870 | static struct sun5_timer *prom_timers; | ||
871 | static u64 prom_limit0, prom_limit1; | ||
872 | |||
873 | static void map_prom_timers(void) | ||
874 | { | ||
875 | struct device_node *dp; | ||
876 | const unsigned int *addr; | ||
877 | |||
878 | /* PROM timer node hangs out in the top level of device siblings... */ | ||
879 | dp = of_find_node_by_path("/"); | ||
880 | dp = dp->child; | ||
881 | while (dp) { | ||
882 | if (!strcmp(dp->name, "counter-timer")) | ||
883 | break; | ||
884 | dp = dp->sibling; | ||
885 | } | ||
886 | |||
887 | /* Assume if node is not present, PROM uses different tick mechanism | ||
888 | * which we should not care about. | ||
889 | */ | ||
890 | if (!dp) { | ||
891 | prom_timers = (struct sun5_timer *) 0; | ||
892 | return; | ||
893 | } | ||
894 | |||
895 | /* If PROM is really using this, it must be mapped by him. */ | ||
896 | addr = of_get_property(dp, "address", NULL); | ||
897 | if (!addr) { | ||
898 | prom_printf("PROM does not have timer mapped, trying to continue.\n"); | ||
899 | prom_timers = (struct sun5_timer *) 0; | ||
900 | return; | ||
901 | } | ||
902 | prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]); | ||
903 | } | ||
904 | |||
905 | static void kill_prom_timer(void) | ||
906 | { | ||
907 | if (!prom_timers) | ||
908 | return; | ||
909 | |||
910 | /* Save them away for later. */ | ||
911 | prom_limit0 = prom_timers->limit0; | ||
912 | prom_limit1 = prom_timers->limit1; | ||
913 | |||
914 | /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14. | ||
915 | * We turn both off here just to be paranoid. | ||
916 | */ | ||
917 | prom_timers->limit0 = 0; | ||
918 | prom_timers->limit1 = 0; | ||
919 | |||
920 | /* Wheee, eat the interrupt packet too... */ | ||
921 | __asm__ __volatile__( | ||
922 | " mov 0x40, %%g2\n" | ||
923 | " ldxa [%%g0] %0, %%g1\n" | ||
924 | " ldxa [%%g2] %1, %%g1\n" | ||
925 | " stxa %%g0, [%%g0] %0\n" | ||
926 | " membar #Sync\n" | ||
927 | : /* no outputs */ | ||
928 | : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R) | ||
929 | : "g1", "g2"); | ||
930 | } | ||
931 | |||
932 | void notrace init_irqwork_curcpu(void) | ||
933 | { | ||
934 | int cpu = hard_smp_processor_id(); | ||
935 | |||
936 | trap_block[cpu].irq_worklist_pa = 0UL; | ||
937 | } | ||
938 | |||
939 | /* Please be very careful with register_one_mondo() and | ||
940 | * sun4v_register_mondo_queues(). | ||
941 | * | ||
942 | * On SMP this gets invoked from the CPU trampoline before | ||
943 | * the cpu has fully taken over the trap table from OBP, | ||
944 | * and it's kernel stack + %g6 thread register state is | ||
945 | * not fully cooked yet. | ||
946 | * | ||
947 | * Therefore you cannot make any OBP calls, not even prom_printf, | ||
948 | * from these two routines. | ||
949 | */ | ||
950 | static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask) | ||
951 | { | ||
952 | unsigned long num_entries = (qmask + 1) / 64; | ||
953 | unsigned long status; | ||
954 | |||
955 | status = sun4v_cpu_qconf(type, paddr, num_entries); | ||
956 | if (status != HV_EOK) { | ||
957 | prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, " | ||
958 | "err %lu\n", type, paddr, num_entries, status); | ||
959 | prom_halt(); | ||
960 | } | ||
961 | } | ||
962 | |||
963 | void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu) | ||
964 | { | ||
965 | struct trap_per_cpu *tb = &trap_block[this_cpu]; | ||
966 | |||
967 | register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO, | ||
968 | tb->cpu_mondo_qmask); | ||
969 | register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO, | ||
970 | tb->dev_mondo_qmask); | ||
971 | register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR, | ||
972 | tb->resum_qmask); | ||
973 | register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR, | ||
974 | tb->nonresum_qmask); | ||
975 | } | ||
976 | |||
977 | static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask) | ||
978 | { | ||
979 | unsigned long size = PAGE_ALIGN(qmask + 1); | ||
980 | void *p = __alloc_bootmem(size, size, 0); | ||
981 | if (!p) { | ||
982 | prom_printf("SUN4V: Error, cannot allocate mondo queue.\n"); | ||
983 | prom_halt(); | ||
984 | } | ||
985 | |||
986 | *pa_ptr = __pa(p); | ||
987 | } | ||
988 | |||
989 | static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask) | ||
990 | { | ||
991 | unsigned long size = PAGE_ALIGN(qmask + 1); | ||
992 | void *p = __alloc_bootmem(size, size, 0); | ||
993 | |||
994 | if (!p) { | ||
995 | prom_printf("SUN4V: Error, cannot allocate kbuf page.\n"); | ||
996 | prom_halt(); | ||
997 | } | ||
998 | |||
999 | *pa_ptr = __pa(p); | ||
1000 | } | ||
1001 | |||
1002 | static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb) | ||
1003 | { | ||
1004 | #ifdef CONFIG_SMP | ||
1005 | void *page; | ||
1006 | |||
1007 | BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); | ||
1008 | |||
1009 | page = alloc_bootmem_pages(PAGE_SIZE); | ||
1010 | if (!page) { | ||
1011 | prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); | ||
1012 | prom_halt(); | ||
1013 | } | ||
1014 | |||
1015 | tb->cpu_mondo_block_pa = __pa(page); | ||
1016 | tb->cpu_list_pa = __pa(page + 64); | ||
1017 | #endif | ||
1018 | } | ||
1019 | |||
1020 | /* Allocate mondo and error queues for all possible cpus. */ | ||
1021 | static void __init sun4v_init_mondo_queues(void) | ||
1022 | { | ||
1023 | int cpu; | ||
1024 | |||
1025 | for_each_possible_cpu(cpu) { | ||
1026 | struct trap_per_cpu *tb = &trap_block[cpu]; | ||
1027 | |||
1028 | alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask); | ||
1029 | alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask); | ||
1030 | alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask); | ||
1031 | alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask); | ||
1032 | alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask); | ||
1033 | alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, | ||
1034 | tb->nonresum_qmask); | ||
1035 | } | ||
1036 | } | ||
1037 | |||
1038 | static void __init init_send_mondo_info(void) | ||
1039 | { | ||
1040 | int cpu; | ||
1041 | |||
1042 | for_each_possible_cpu(cpu) { | ||
1043 | struct trap_per_cpu *tb = &trap_block[cpu]; | ||
1044 | |||
1045 | init_cpu_send_mondo_info(tb); | ||
1046 | } | ||
1047 | } | ||
1048 | |||
1049 | static struct irqaction timer_irq_action = { | ||
1050 | .name = "timer", | ||
1051 | }; | ||
1052 | |||
1053 | /* Only invoked on boot processor. */ | ||
1054 | void __init init_IRQ(void) | ||
1055 | { | ||
1056 | unsigned long size; | ||
1057 | |||
1058 | map_prom_timers(); | ||
1059 | kill_prom_timer(); | ||
1060 | |||
1061 | size = sizeof(struct ino_bucket) * NUM_IVECS; | ||
1062 | ivector_table = alloc_bootmem(size); | ||
1063 | if (!ivector_table) { | ||
1064 | prom_printf("Fatal error, cannot allocate ivector_table\n"); | ||
1065 | prom_halt(); | ||
1066 | } | ||
1067 | __flush_dcache_range((unsigned long) ivector_table, | ||
1068 | ((unsigned long) ivector_table) + size); | ||
1069 | |||
1070 | ivector_table_pa = __pa(ivector_table); | ||
1071 | |||
1072 | if (tlb_type == hypervisor) | ||
1073 | sun4v_init_mondo_queues(); | ||
1074 | |||
1075 | init_send_mondo_info(); | ||
1076 | |||
1077 | if (tlb_type == hypervisor) { | ||
1078 | /* Load up the boot cpu's entries. */ | ||
1079 | sun4v_register_mondo_queues(hard_smp_processor_id()); | ||
1080 | } | ||
1081 | |||
1082 | /* We need to clear any IRQ's pending in the soft interrupt | ||
1083 | * registers, a spurious one could be left around from the | ||
1084 | * PROM timer which we just disabled. | ||
1085 | */ | ||
1086 | clear_softint(get_softint()); | ||
1087 | |||
1088 | /* Now that ivector table is initialized, it is safe | ||
1089 | * to receive IRQ vector traps. We will normally take | ||
1090 | * one or two right now, in case some device PROM used | ||
1091 | * to boot us wants to speak to us. We just ignore them. | ||
1092 | */ | ||
1093 | __asm__ __volatile__("rdpr %%pstate, %%g1\n\t" | ||
1094 | "or %%g1, %0, %%g1\n\t" | ||
1095 | "wrpr %%g1, 0x0, %%pstate" | ||
1096 | : /* No outputs */ | ||
1097 | : "i" (PSTATE_IE) | ||
1098 | : "g1"); | ||
1099 | |||
1100 | irq_desc[0].action = &timer_irq_action; | ||
1101 | } | ||
diff --git a/arch/sparc/kernel/itlb_miss.S b/arch/sparc/kernel/itlb_miss.S new file mode 100644 index 000000000000..5a8377b54955 --- /dev/null +++ b/arch/sparc/kernel/itlb_miss.S | |||
@@ -0,0 +1,39 @@ | |||
1 | /* ITLB ** ICACHE line 1: Context 0 check and TSB load */ | ||
2 | ldxa [%g0] ASI_IMMU_TSB_8KB_PTR, %g1 ! Get TSB 8K pointer | ||
3 | ldxa [%g0] ASI_IMMU, %g6 ! Get TAG TARGET | ||
4 | srlx %g6, 48, %g5 ! Get context | ||
5 | sllx %g6, 22, %g6 ! Zero out context | ||
6 | brz,pn %g5, kvmap_itlb ! Context 0 processing | ||
7 | srlx %g6, 22, %g6 ! Delay slot | ||
8 | TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry | ||
9 | cmp %g4, %g6 ! Compare TAG | ||
10 | |||
11 | /* ITLB ** ICACHE line 2: TSB compare and TLB load */ | ||
12 | bne,pn %xcc, tsb_miss_itlb ! Miss | ||
13 | mov FAULT_CODE_ITLB, %g3 | ||
14 | sethi %hi(_PAGE_EXEC_4U), %g4 | ||
15 | andcc %g5, %g4, %g0 ! Executable? | ||
16 | be,pn %xcc, tsb_do_fault | ||
17 | nop ! Delay slot, fill me | ||
18 | stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB | ||
19 | retry ! Trap done | ||
20 | |||
21 | /* ITLB ** ICACHE line 3: */ | ||
22 | nop | ||
23 | nop | ||
24 | nop | ||
25 | nop | ||
26 | nop | ||
27 | nop | ||
28 | nop | ||
29 | nop | ||
30 | |||
31 | /* ITLB ** ICACHE line 4: */ | ||
32 | nop | ||
33 | nop | ||
34 | nop | ||
35 | nop | ||
36 | nop | ||
37 | nop | ||
38 | nop | ||
39 | nop | ||
diff --git a/arch/sparc/kernel/ivec.S b/arch/sparc/kernel/ivec.S new file mode 100644 index 000000000000..d29f92ebca5e --- /dev/null +++ b/arch/sparc/kernel/ivec.S | |||
@@ -0,0 +1,51 @@ | |||
1 | /* The registers for cross calls will be: | ||
2 | * | ||
3 | * DATA 0: [low 32-bits] Address of function to call, jmp to this | ||
4 | * [high 32-bits] MMU Context Argument 0, place in %g5 | ||
5 | * DATA 1: Address Argument 1, place in %g1 | ||
6 | * DATA 2: Address Argument 2, place in %g7 | ||
7 | * | ||
8 | * With this method we can do most of the cross-call tlb/cache | ||
9 | * flushing very quickly. | ||
10 | */ | ||
11 | .align 32 | ||
12 | .globl do_ivec | ||
13 | .type do_ivec,#function | ||
14 | do_ivec: | ||
15 | mov 0x40, %g3 | ||
16 | ldxa [%g3 + %g0] ASI_INTR_R, %g3 | ||
17 | sethi %hi(KERNBASE), %g4 | ||
18 | cmp %g3, %g4 | ||
19 | bgeu,pn %xcc, do_ivec_xcall | ||
20 | srlx %g3, 32, %g5 | ||
21 | stxa %g0, [%g0] ASI_INTR_RECEIVE | ||
22 | membar #Sync | ||
23 | |||
24 | sethi %hi(ivector_table_pa), %g2 | ||
25 | ldx [%g2 + %lo(ivector_table_pa)], %g2 | ||
26 | sllx %g3, 4, %g3 | ||
27 | add %g2, %g3, %g3 | ||
28 | |||
29 | TRAP_LOAD_IRQ_WORK_PA(%g6, %g1) | ||
30 | |||
31 | ldx [%g6], %g5 | ||
32 | stxa %g5, [%g3] ASI_PHYS_USE_EC | ||
33 | stx %g3, [%g6] | ||
34 | wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint | ||
35 | retry | ||
36 | do_ivec_xcall: | ||
37 | mov 0x50, %g1 | ||
38 | ldxa [%g1 + %g0] ASI_INTR_R, %g1 | ||
39 | srl %g3, 0, %g3 | ||
40 | |||
41 | mov 0x60, %g7 | ||
42 | ldxa [%g7 + %g0] ASI_INTR_R, %g7 | ||
43 | stxa %g0, [%g0] ASI_INTR_RECEIVE | ||
44 | membar #Sync | ||
45 | ba,pt %xcc, 1f | ||
46 | nop | ||
47 | |||
48 | .align 32 | ||
49 | 1: jmpl %g3, %g0 | ||
50 | nop | ||
51 | .size do_ivec,.-do_ivec | ||
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c new file mode 100644 index 000000000000..fefbe6dc51be --- /dev/null +++ b/arch/sparc/kernel/kgdb_64.c | |||
@@ -0,0 +1,186 @@ | |||
1 | /* kgdb.c: KGDB support for 64-bit sparc. | ||
2 | * | ||
3 | * Copyright (C) 2008 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <linux/kgdb.h> | ||
7 | #include <linux/kdebug.h> | ||
8 | |||
9 | #include <asm/kdebug.h> | ||
10 | #include <asm/ptrace.h> | ||
11 | #include <asm/irq.h> | ||
12 | |||
13 | void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) | ||
14 | { | ||
15 | struct reg_window *win; | ||
16 | int i; | ||
17 | |||
18 | gdb_regs[GDB_G0] = 0; | ||
19 | for (i = 0; i < 15; i++) | ||
20 | gdb_regs[GDB_G1 + i] = regs->u_regs[UREG_G1 + i]; | ||
21 | |||
22 | win = (struct reg_window *) (regs->u_regs[UREG_FP] + STACK_BIAS); | ||
23 | for (i = 0; i < 8; i++) | ||
24 | gdb_regs[GDB_L0 + i] = win->locals[i]; | ||
25 | for (i = 0; i < 8; i++) | ||
26 | gdb_regs[GDB_I0 + i] = win->ins[i]; | ||
27 | |||
28 | for (i = GDB_F0; i <= GDB_F62; i++) | ||
29 | gdb_regs[i] = 0; | ||
30 | |||
31 | gdb_regs[GDB_PC] = regs->tpc; | ||
32 | gdb_regs[GDB_NPC] = regs->tnpc; | ||
33 | gdb_regs[GDB_STATE] = regs->tstate; | ||
34 | gdb_regs[GDB_FSR] = 0; | ||
35 | gdb_regs[GDB_FPRS] = 0; | ||
36 | gdb_regs[GDB_Y] = regs->y; | ||
37 | } | ||
38 | |||
39 | void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) | ||
40 | { | ||
41 | struct thread_info *t = task_thread_info(p); | ||
42 | extern unsigned int switch_to_pc; | ||
43 | extern unsigned int ret_from_syscall; | ||
44 | struct reg_window *win; | ||
45 | unsigned long pc, cwp; | ||
46 | int i; | ||
47 | |||
48 | for (i = GDB_G0; i < GDB_G6; i++) | ||
49 | gdb_regs[i] = 0; | ||
50 | gdb_regs[GDB_G6] = (unsigned long) t; | ||
51 | gdb_regs[GDB_G7] = (unsigned long) p; | ||
52 | for (i = GDB_O0; i < GDB_SP; i++) | ||
53 | gdb_regs[i] = 0; | ||
54 | gdb_regs[GDB_SP] = t->ksp; | ||
55 | gdb_regs[GDB_O7] = 0; | ||
56 | |||
57 | win = (struct reg_window *) (t->ksp + STACK_BIAS); | ||
58 | for (i = 0; i < 8; i++) | ||
59 | gdb_regs[GDB_L0 + i] = win->locals[i]; | ||
60 | for (i = 0; i < 8; i++) | ||
61 | gdb_regs[GDB_I0 + i] = win->ins[i]; | ||
62 | |||
63 | for (i = GDB_F0; i <= GDB_F62; i++) | ||
64 | gdb_regs[i] = 0; | ||
65 | |||
66 | if (t->new_child) | ||
67 | pc = (unsigned long) &ret_from_syscall; | ||
68 | else | ||
69 | pc = (unsigned long) &switch_to_pc; | ||
70 | |||
71 | gdb_regs[GDB_PC] = pc; | ||
72 | gdb_regs[GDB_NPC] = pc + 4; | ||
73 | |||
74 | cwp = __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP]; | ||
75 | |||
76 | gdb_regs[GDB_STATE] = (TSTATE_PRIV | TSTATE_IE | cwp); | ||
77 | gdb_regs[GDB_FSR] = 0; | ||
78 | gdb_regs[GDB_FPRS] = 0; | ||
79 | gdb_regs[GDB_Y] = 0; | ||
80 | } | ||
81 | |||
82 | void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) | ||
83 | { | ||
84 | struct reg_window *win; | ||
85 | int i; | ||
86 | |||
87 | for (i = 0; i < 15; i++) | ||
88 | regs->u_regs[UREG_G1 + i] = gdb_regs[GDB_G1 + i]; | ||
89 | |||
90 | /* If the TSTATE register is changing, we have to preserve | ||
91 | * the CWP field, otherwise window save/restore explodes. | ||
92 | */ | ||
93 | if (regs->tstate != gdb_regs[GDB_STATE]) { | ||
94 | unsigned long cwp = regs->tstate & TSTATE_CWP; | ||
95 | |||
96 | regs->tstate = (gdb_regs[GDB_STATE] & ~TSTATE_CWP) | cwp; | ||
97 | } | ||
98 | |||
99 | regs->tpc = gdb_regs[GDB_PC]; | ||
100 | regs->tnpc = gdb_regs[GDB_NPC]; | ||
101 | regs->y = gdb_regs[GDB_Y]; | ||
102 | |||
103 | win = (struct reg_window *) (regs->u_regs[UREG_FP] + STACK_BIAS); | ||
104 | for (i = 0; i < 8; i++) | ||
105 | win->locals[i] = gdb_regs[GDB_L0 + i]; | ||
106 | for (i = 0; i < 8; i++) | ||
107 | win->ins[i] = gdb_regs[GDB_I0 + i]; | ||
108 | } | ||
109 | |||
110 | #ifdef CONFIG_SMP | ||
111 | void smp_kgdb_capture_client(struct pt_regs *regs) | ||
112 | { | ||
113 | unsigned long flags; | ||
114 | |||
115 | __asm__ __volatile__("rdpr %%pstate, %0\n\t" | ||
116 | "wrpr %0, %1, %%pstate" | ||
117 | : "=r" (flags) | ||
118 | : "i" (PSTATE_IE)); | ||
119 | |||
120 | flushw_all(); | ||
121 | |||
122 | if (atomic_read(&kgdb_active) != -1) | ||
123 | kgdb_nmicallback(raw_smp_processor_id(), regs); | ||
124 | |||
125 | __asm__ __volatile__("wrpr %0, 0, %%pstate" | ||
126 | : : "r" (flags)); | ||
127 | } | ||
128 | #endif | ||
129 | |||
130 | int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, | ||
131 | char *remcomInBuffer, char *remcomOutBuffer, | ||
132 | struct pt_regs *linux_regs) | ||
133 | { | ||
134 | unsigned long addr; | ||
135 | char *ptr; | ||
136 | |||
137 | switch (remcomInBuffer[0]) { | ||
138 | case 'c': | ||
139 | /* try to read optional parameter, pc unchanged if no parm */ | ||
140 | ptr = &remcomInBuffer[1]; | ||
141 | if (kgdb_hex2long(&ptr, &addr)) { | ||
142 | linux_regs->tpc = addr; | ||
143 | linux_regs->tnpc = addr + 4; | ||
144 | } | ||
145 | /* fallthru */ | ||
146 | |||
147 | case 'D': | ||
148 | case 'k': | ||
149 | if (linux_regs->tpc == (unsigned long) arch_kgdb_breakpoint) { | ||
150 | linux_regs->tpc = linux_regs->tnpc; | ||
151 | linux_regs->tnpc += 4; | ||
152 | } | ||
153 | return 0; | ||
154 | } | ||
155 | return -1; | ||
156 | } | ||
157 | |||
158 | asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs) | ||
159 | { | ||
160 | unsigned long flags; | ||
161 | |||
162 | if (user_mode(regs)) { | ||
163 | bad_trap(regs, trap_level); | ||
164 | return; | ||
165 | } | ||
166 | |||
167 | flushw_all(); | ||
168 | |||
169 | local_irq_save(flags); | ||
170 | kgdb_handle_exception(0x172, SIGTRAP, 0, regs); | ||
171 | local_irq_restore(flags); | ||
172 | } | ||
173 | |||
174 | int kgdb_arch_init(void) | ||
175 | { | ||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | void kgdb_arch_exit(void) | ||
180 | { | ||
181 | } | ||
182 | |||
183 | struct kgdb_arch arch_kgdb_ops = { | ||
184 | /* Breakpoint instruction: ta 0x72 */ | ||
185 | .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 }, | ||
186 | }; | ||
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c new file mode 100644 index 000000000000..201a6e547e4a --- /dev/null +++ b/arch/sparc/kernel/kprobes.c | |||
@@ -0,0 +1,593 @@ | |||
1 | /* arch/sparc64/kernel/kprobes.c | ||
2 | * | ||
3 | * Copyright (C) 2004 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/kprobes.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/kdebug.h> | ||
10 | #include <asm/signal.h> | ||
11 | #include <asm/cacheflush.h> | ||
12 | #include <asm/uaccess.h> | ||
13 | |||
14 | /* We do not have hardware single-stepping on sparc64. | ||
15 | * So we implement software single-stepping with breakpoint | ||
16 | * traps. The top-level scheme is similar to that used | ||
17 | * in the x86 kprobes implementation. | ||
18 | * | ||
19 | * In the kprobe->ainsn.insn[] array we store the original | ||
20 | * instruction at index zero and a break instruction at | ||
21 | * index one. | ||
22 | * | ||
23 | * When we hit a kprobe we: | ||
24 | * - Run the pre-handler | ||
25 | * - Remember "regs->tnpc" and interrupt level stored in | ||
26 | * "regs->tstate" so we can restore them later | ||
27 | * - Disable PIL interrupts | ||
28 | * - Set regs->tpc to point to kprobe->ainsn.insn[0] | ||
29 | * - Set regs->tnpc to point to kprobe->ainsn.insn[1] | ||
30 | * - Mark that we are actively in a kprobe | ||
31 | * | ||
32 | * At this point we wait for the second breakpoint at | ||
33 | * kprobe->ainsn.insn[1] to hit. When it does we: | ||
34 | * - Run the post-handler | ||
35 | * - Set regs->tpc to "remembered" regs->tnpc stored above, | ||
36 | * restore the PIL interrupt level in "regs->tstate" as well | ||
37 | * - Make any adjustments necessary to regs->tnpc in order | ||
38 | * to handle relative branches correctly. See below. | ||
39 | * - Mark that we are no longer actively in a kprobe. | ||
40 | */ | ||
41 | |||
42 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; | ||
43 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | ||
44 | |||
45 | struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; | ||
46 | |||
47 | int __kprobes arch_prepare_kprobe(struct kprobe *p) | ||
48 | { | ||
49 | p->ainsn.insn[0] = *p->addr; | ||
50 | flushi(&p->ainsn.insn[0]); | ||
51 | |||
52 | p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2; | ||
53 | flushi(&p->ainsn.insn[1]); | ||
54 | |||
55 | p->opcode = *p->addr; | ||
56 | return 0; | ||
57 | } | ||
58 | |||
59 | void __kprobes arch_arm_kprobe(struct kprobe *p) | ||
60 | { | ||
61 | *p->addr = BREAKPOINT_INSTRUCTION; | ||
62 | flushi(p->addr); | ||
63 | } | ||
64 | |||
65 | void __kprobes arch_disarm_kprobe(struct kprobe *p) | ||
66 | { | ||
67 | *p->addr = p->opcode; | ||
68 | flushi(p->addr); | ||
69 | } | ||
70 | |||
71 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) | ||
72 | { | ||
73 | kcb->prev_kprobe.kp = kprobe_running(); | ||
74 | kcb->prev_kprobe.status = kcb->kprobe_status; | ||
75 | kcb->prev_kprobe.orig_tnpc = kcb->kprobe_orig_tnpc; | ||
76 | kcb->prev_kprobe.orig_tstate_pil = kcb->kprobe_orig_tstate_pil; | ||
77 | } | ||
78 | |||
79 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | ||
80 | { | ||
81 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; | ||
82 | kcb->kprobe_status = kcb->prev_kprobe.status; | ||
83 | kcb->kprobe_orig_tnpc = kcb->prev_kprobe.orig_tnpc; | ||
84 | kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil; | ||
85 | } | ||
86 | |||
87 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | ||
88 | struct kprobe_ctlblk *kcb) | ||
89 | { | ||
90 | __get_cpu_var(current_kprobe) = p; | ||
91 | kcb->kprobe_orig_tnpc = regs->tnpc; | ||
92 | kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL); | ||
93 | } | ||
94 | |||
95 | static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs, | ||
96 | struct kprobe_ctlblk *kcb) | ||
97 | { | ||
98 | regs->tstate |= TSTATE_PIL; | ||
99 | |||
100 | /*single step inline, if it a breakpoint instruction*/ | ||
101 | if (p->opcode == BREAKPOINT_INSTRUCTION) { | ||
102 | regs->tpc = (unsigned long) p->addr; | ||
103 | regs->tnpc = kcb->kprobe_orig_tnpc; | ||
104 | } else { | ||
105 | regs->tpc = (unsigned long) &p->ainsn.insn[0]; | ||
106 | regs->tnpc = (unsigned long) &p->ainsn.insn[1]; | ||
107 | } | ||
108 | } | ||
109 | |||
110 | static int __kprobes kprobe_handler(struct pt_regs *regs) | ||
111 | { | ||
112 | struct kprobe *p; | ||
113 | void *addr = (void *) regs->tpc; | ||
114 | int ret = 0; | ||
115 | struct kprobe_ctlblk *kcb; | ||
116 | |||
117 | /* | ||
118 | * We don't want to be preempted for the entire | ||
119 | * duration of kprobe processing | ||
120 | */ | ||
121 | preempt_disable(); | ||
122 | kcb = get_kprobe_ctlblk(); | ||
123 | |||
124 | if (kprobe_running()) { | ||
125 | p = get_kprobe(addr); | ||
126 | if (p) { | ||
127 | if (kcb->kprobe_status == KPROBE_HIT_SS) { | ||
128 | regs->tstate = ((regs->tstate & ~TSTATE_PIL) | | ||
129 | kcb->kprobe_orig_tstate_pil); | ||
130 | goto no_kprobe; | ||
131 | } | ||
132 | /* We have reentered the kprobe_handler(), since | ||
133 | * another probe was hit while within the handler. | ||
134 | * We here save the original kprobes variables and | ||
135 | * just single step on the instruction of the new probe | ||
136 | * without calling any user handlers. | ||
137 | */ | ||
138 | save_previous_kprobe(kcb); | ||
139 | set_current_kprobe(p, regs, kcb); | ||
140 | kprobes_inc_nmissed_count(p); | ||
141 | kcb->kprobe_status = KPROBE_REENTER; | ||
142 | prepare_singlestep(p, regs, kcb); | ||
143 | return 1; | ||
144 | } else { | ||
145 | if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { | ||
146 | /* The breakpoint instruction was removed by | ||
147 | * another cpu right after we hit, no further | ||
148 | * handling of this interrupt is appropriate | ||
149 | */ | ||
150 | ret = 1; | ||
151 | goto no_kprobe; | ||
152 | } | ||
153 | p = __get_cpu_var(current_kprobe); | ||
154 | if (p->break_handler && p->break_handler(p, regs)) | ||
155 | goto ss_probe; | ||
156 | } | ||
157 | goto no_kprobe; | ||
158 | } | ||
159 | |||
160 | p = get_kprobe(addr); | ||
161 | if (!p) { | ||
162 | if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { | ||
163 | /* | ||
164 | * The breakpoint instruction was removed right | ||
165 | * after we hit it. Another cpu has removed | ||
166 | * either a probepoint or a debugger breakpoint | ||
167 | * at this address. In either case, no further | ||
168 | * handling of this interrupt is appropriate. | ||
169 | */ | ||
170 | ret = 1; | ||
171 | } | ||
172 | /* Not one of ours: let kernel handle it */ | ||
173 | goto no_kprobe; | ||
174 | } | ||
175 | |||
176 | set_current_kprobe(p, regs, kcb); | ||
177 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | ||
178 | if (p->pre_handler && p->pre_handler(p, regs)) | ||
179 | return 1; | ||
180 | |||
181 | ss_probe: | ||
182 | prepare_singlestep(p, regs, kcb); | ||
183 | kcb->kprobe_status = KPROBE_HIT_SS; | ||
184 | return 1; | ||
185 | |||
186 | no_kprobe: | ||
187 | preempt_enable_no_resched(); | ||
188 | return ret; | ||
189 | } | ||
190 | |||
191 | /* If INSN is a relative control transfer instruction, | ||
192 | * return the corrected branch destination value. | ||
193 | * | ||
194 | * regs->tpc and regs->tnpc still hold the values of the | ||
195 | * program counters at the time of trap due to the execution | ||
196 | * of the BREAKPOINT_INSTRUCTION_2 at p->ainsn.insn[1] | ||
197 | * | ||
198 | */ | ||
199 | static unsigned long __kprobes relbranch_fixup(u32 insn, struct kprobe *p, | ||
200 | struct pt_regs *regs) | ||
201 | { | ||
202 | unsigned long real_pc = (unsigned long) p->addr; | ||
203 | |||
204 | /* Branch not taken, no mods necessary. */ | ||
205 | if (regs->tnpc == regs->tpc + 0x4UL) | ||
206 | return real_pc + 0x8UL; | ||
207 | |||
208 | /* The three cases are call, branch w/prediction, | ||
209 | * and traditional branch. | ||
210 | */ | ||
211 | if ((insn & 0xc0000000) == 0x40000000 || | ||
212 | (insn & 0xc1c00000) == 0x00400000 || | ||
213 | (insn & 0xc1c00000) == 0x00800000) { | ||
214 | unsigned long ainsn_addr; | ||
215 | |||
216 | ainsn_addr = (unsigned long) &p->ainsn.insn[0]; | ||
217 | |||
218 | /* The instruction did all the work for us | ||
219 | * already, just apply the offset to the correct | ||
220 | * instruction location. | ||
221 | */ | ||
222 | return (real_pc + (regs->tnpc - ainsn_addr)); | ||
223 | } | ||
224 | |||
225 | /* It is jmpl or some other absolute PC modification instruction, | ||
226 | * leave NPC as-is. | ||
227 | */ | ||
228 | return regs->tnpc; | ||
229 | } | ||
230 | |||
231 | /* If INSN is an instruction which writes it's PC location | ||
232 | * into a destination register, fix that up. | ||
233 | */ | ||
234 | static void __kprobes retpc_fixup(struct pt_regs *regs, u32 insn, | ||
235 | unsigned long real_pc) | ||
236 | { | ||
237 | unsigned long *slot = NULL; | ||
238 | |||
239 | /* Simplest case is 'call', which always uses %o7 */ | ||
240 | if ((insn & 0xc0000000) == 0x40000000) { | ||
241 | slot = ®s->u_regs[UREG_I7]; | ||
242 | } | ||
243 | |||
244 | /* 'jmpl' encodes the register inside of the opcode */ | ||
245 | if ((insn & 0xc1f80000) == 0x81c00000) { | ||
246 | unsigned long rd = ((insn >> 25) & 0x1f); | ||
247 | |||
248 | if (rd <= 15) { | ||
249 | slot = ®s->u_regs[rd]; | ||
250 | } else { | ||
251 | /* Hard case, it goes onto the stack. */ | ||
252 | flushw_all(); | ||
253 | |||
254 | rd -= 16; | ||
255 | slot = (unsigned long *) | ||
256 | (regs->u_regs[UREG_FP] + STACK_BIAS); | ||
257 | slot += rd; | ||
258 | } | ||
259 | } | ||
260 | if (slot != NULL) | ||
261 | *slot = real_pc; | ||
262 | } | ||
263 | |||
264 | /* | ||
265 | * Called after single-stepping. p->addr is the address of the | ||
266 | * instruction which has been replaced by the breakpoint | ||
267 | * instruction. To avoid the SMP problems that can occur when we | ||
268 | * temporarily put back the original opcode to single-step, we | ||
269 | * single-stepped a copy of the instruction. The address of this | ||
270 | * copy is &p->ainsn.insn[0]. | ||
271 | * | ||
272 | * This function prepares to return from the post-single-step | ||
273 | * breakpoint trap. | ||
274 | */ | ||
275 | static void __kprobes resume_execution(struct kprobe *p, | ||
276 | struct pt_regs *regs, struct kprobe_ctlblk *kcb) | ||
277 | { | ||
278 | u32 insn = p->ainsn.insn[0]; | ||
279 | |||
280 | regs->tnpc = relbranch_fixup(insn, p, regs); | ||
281 | |||
282 | /* This assignment must occur after relbranch_fixup() */ | ||
283 | regs->tpc = kcb->kprobe_orig_tnpc; | ||
284 | |||
285 | retpc_fixup(regs, insn, (unsigned long) p->addr); | ||
286 | |||
287 | regs->tstate = ((regs->tstate & ~TSTATE_PIL) | | ||
288 | kcb->kprobe_orig_tstate_pil); | ||
289 | } | ||
290 | |||
291 | static int __kprobes post_kprobe_handler(struct pt_regs *regs) | ||
292 | { | ||
293 | struct kprobe *cur = kprobe_running(); | ||
294 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
295 | |||
296 | if (!cur) | ||
297 | return 0; | ||
298 | |||
299 | if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { | ||
300 | kcb->kprobe_status = KPROBE_HIT_SSDONE; | ||
301 | cur->post_handler(cur, regs, 0); | ||
302 | } | ||
303 | |||
304 | resume_execution(cur, regs, kcb); | ||
305 | |||
306 | /*Restore back the original saved kprobes variables and continue. */ | ||
307 | if (kcb->kprobe_status == KPROBE_REENTER) { | ||
308 | restore_previous_kprobe(kcb); | ||
309 | goto out; | ||
310 | } | ||
311 | reset_current_kprobe(); | ||
312 | out: | ||
313 | preempt_enable_no_resched(); | ||
314 | |||
315 | return 1; | ||
316 | } | ||
317 | |||
318 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | ||
319 | { | ||
320 | struct kprobe *cur = kprobe_running(); | ||
321 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
322 | const struct exception_table_entry *entry; | ||
323 | |||
324 | switch(kcb->kprobe_status) { | ||
325 | case KPROBE_HIT_SS: | ||
326 | case KPROBE_REENTER: | ||
327 | /* | ||
328 | * We are here because the instruction being single | ||
329 | * stepped caused a page fault. We reset the current | ||
330 | * kprobe and the tpc points back to the probe address | ||
331 | * and allow the page fault handler to continue as a | ||
332 | * normal page fault. | ||
333 | */ | ||
334 | regs->tpc = (unsigned long)cur->addr; | ||
335 | regs->tnpc = kcb->kprobe_orig_tnpc; | ||
336 | regs->tstate = ((regs->tstate & ~TSTATE_PIL) | | ||
337 | kcb->kprobe_orig_tstate_pil); | ||
338 | if (kcb->kprobe_status == KPROBE_REENTER) | ||
339 | restore_previous_kprobe(kcb); | ||
340 | else | ||
341 | reset_current_kprobe(); | ||
342 | preempt_enable_no_resched(); | ||
343 | break; | ||
344 | case KPROBE_HIT_ACTIVE: | ||
345 | case KPROBE_HIT_SSDONE: | ||
346 | /* | ||
347 | * We increment the nmissed count for accounting, | ||
348 | * we can also use npre/npostfault count for accouting | ||
349 | * these specific fault cases. | ||
350 | */ | ||
351 | kprobes_inc_nmissed_count(cur); | ||
352 | |||
353 | /* | ||
354 | * We come here because instructions in the pre/post | ||
355 | * handler caused the page_fault, this could happen | ||
356 | * if handler tries to access user space by | ||
357 | * copy_from_user(), get_user() etc. Let the | ||
358 | * user-specified handler try to fix it first. | ||
359 | */ | ||
360 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) | ||
361 | return 1; | ||
362 | |||
363 | /* | ||
364 | * In case the user-specified fault handler returned | ||
365 | * zero, try to fix up. | ||
366 | */ | ||
367 | |||
368 | entry = search_exception_tables(regs->tpc); | ||
369 | if (entry) { | ||
370 | regs->tpc = entry->fixup; | ||
371 | regs->tnpc = regs->tpc + 4; | ||
372 | return 1; | ||
373 | } | ||
374 | |||
375 | /* | ||
376 | * fixup_exception() could not handle it, | ||
377 | * Let do_page_fault() fix it. | ||
378 | */ | ||
379 | break; | ||
380 | default: | ||
381 | break; | ||
382 | } | ||
383 | |||
384 | return 0; | ||
385 | } | ||
386 | |||
387 | /* | ||
388 | * Wrapper routine to for handling exceptions. | ||
389 | */ | ||
390 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | ||
391 | unsigned long val, void *data) | ||
392 | { | ||
393 | struct die_args *args = (struct die_args *)data; | ||
394 | int ret = NOTIFY_DONE; | ||
395 | |||
396 | if (args->regs && user_mode(args->regs)) | ||
397 | return ret; | ||
398 | |||
399 | switch (val) { | ||
400 | case DIE_DEBUG: | ||
401 | if (kprobe_handler(args->regs)) | ||
402 | ret = NOTIFY_STOP; | ||
403 | break; | ||
404 | case DIE_DEBUG_2: | ||
405 | if (post_kprobe_handler(args->regs)) | ||
406 | ret = NOTIFY_STOP; | ||
407 | break; | ||
408 | default: | ||
409 | break; | ||
410 | } | ||
411 | return ret; | ||
412 | } | ||
413 | |||
414 | asmlinkage void __kprobes kprobe_trap(unsigned long trap_level, | ||
415 | struct pt_regs *regs) | ||
416 | { | ||
417 | BUG_ON(trap_level != 0x170 && trap_level != 0x171); | ||
418 | |||
419 | if (user_mode(regs)) { | ||
420 | local_irq_enable(); | ||
421 | bad_trap(regs, trap_level); | ||
422 | return; | ||
423 | } | ||
424 | |||
425 | /* trap_level == 0x170 --> ta 0x70 | ||
426 | * trap_level == 0x171 --> ta 0x71 | ||
427 | */ | ||
428 | if (notify_die((trap_level == 0x170) ? DIE_DEBUG : DIE_DEBUG_2, | ||
429 | (trap_level == 0x170) ? "debug" : "debug_2", | ||
430 | regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP) | ||
431 | bad_trap(regs, trap_level); | ||
432 | } | ||
433 | |||
434 | /* Jprobes support. */ | ||
435 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | ||
436 | { | ||
437 | struct jprobe *jp = container_of(p, struct jprobe, kp); | ||
438 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
439 | |||
440 | memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs)); | ||
441 | |||
442 | regs->tpc = (unsigned long) jp->entry; | ||
443 | regs->tnpc = ((unsigned long) jp->entry) + 0x4UL; | ||
444 | regs->tstate |= TSTATE_PIL; | ||
445 | |||
446 | return 1; | ||
447 | } | ||
448 | |||
449 | void __kprobes jprobe_return(void) | ||
450 | { | ||
451 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
452 | register unsigned long orig_fp asm("g1"); | ||
453 | |||
454 | orig_fp = kcb->jprobe_saved_regs.u_regs[UREG_FP]; | ||
455 | __asm__ __volatile__("\n" | ||
456 | "1: cmp %%sp, %0\n\t" | ||
457 | "blu,a,pt %%xcc, 1b\n\t" | ||
458 | " restore\n\t" | ||
459 | ".globl jprobe_return_trap_instruction\n" | ||
460 | "jprobe_return_trap_instruction:\n\t" | ||
461 | "ta 0x70" | ||
462 | : /* no outputs */ | ||
463 | : "r" (orig_fp)); | ||
464 | } | ||
465 | |||
466 | extern void jprobe_return_trap_instruction(void); | ||
467 | |||
468 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | ||
469 | { | ||
470 | u32 *addr = (u32 *) regs->tpc; | ||
471 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
472 | |||
473 | if (addr == (u32 *) jprobe_return_trap_instruction) { | ||
474 | memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs)); | ||
475 | preempt_enable_no_resched(); | ||
476 | return 1; | ||
477 | } | ||
478 | return 0; | ||
479 | } | ||
480 | |||
481 | /* The value stored in the return address register is actually 2 | ||
482 | * instructions before where the callee will return to. | ||
483 | * Sequences usually look something like this | ||
484 | * | ||
485 | * call some_function <--- return register points here | ||
486 | * nop <--- call delay slot | ||
487 | * whatever <--- where callee returns to | ||
488 | * | ||
489 | * To keep trampoline_probe_handler logic simpler, we normalize the | ||
490 | * value kept in ri->ret_addr so we don't need to keep adjusting it | ||
491 | * back and forth. | ||
492 | */ | ||
493 | void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, | ||
494 | struct pt_regs *regs) | ||
495 | { | ||
496 | ri->ret_addr = (kprobe_opcode_t *)(regs->u_regs[UREG_RETPC] + 8); | ||
497 | |||
498 | /* Replace the return addr with trampoline addr */ | ||
499 | regs->u_regs[UREG_RETPC] = | ||
500 | ((unsigned long)kretprobe_trampoline) - 8; | ||
501 | } | ||
502 | |||
503 | /* | ||
504 | * Called when the probe at kretprobe trampoline is hit | ||
505 | */ | ||
506 | int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | ||
507 | { | ||
508 | struct kretprobe_instance *ri = NULL; | ||
509 | struct hlist_head *head, empty_rp; | ||
510 | struct hlist_node *node, *tmp; | ||
511 | unsigned long flags, orig_ret_address = 0; | ||
512 | unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; | ||
513 | |||
514 | INIT_HLIST_HEAD(&empty_rp); | ||
515 | kretprobe_hash_lock(current, &head, &flags); | ||
516 | |||
517 | /* | ||
518 | * It is possible to have multiple instances associated with a given | ||
519 | * task either because an multiple functions in the call path | ||
520 | * have a return probe installed on them, and/or more then one return | ||
521 | * return probe was registered for a target function. | ||
522 | * | ||
523 | * We can handle this because: | ||
524 | * - instances are always inserted at the head of the list | ||
525 | * - when multiple return probes are registered for the same | ||
526 | * function, the first instance's ret_addr will point to the | ||
527 | * real return address, and all the rest will point to | ||
528 | * kretprobe_trampoline | ||
529 | */ | ||
530 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | ||
531 | if (ri->task != current) | ||
532 | /* another task is sharing our hash bucket */ | ||
533 | continue; | ||
534 | |||
535 | if (ri->rp && ri->rp->handler) | ||
536 | ri->rp->handler(ri, regs); | ||
537 | |||
538 | orig_ret_address = (unsigned long)ri->ret_addr; | ||
539 | recycle_rp_inst(ri, &empty_rp); | ||
540 | |||
541 | if (orig_ret_address != trampoline_address) | ||
542 | /* | ||
543 | * This is the real return address. Any other | ||
544 | * instances associated with this task are for | ||
545 | * other calls deeper on the call stack | ||
546 | */ | ||
547 | break; | ||
548 | } | ||
549 | |||
550 | kretprobe_assert(ri, orig_ret_address, trampoline_address); | ||
551 | regs->tpc = orig_ret_address; | ||
552 | regs->tnpc = orig_ret_address + 4; | ||
553 | |||
554 | reset_current_kprobe(); | ||
555 | kretprobe_hash_unlock(current, &flags); | ||
556 | preempt_enable_no_resched(); | ||
557 | |||
558 | hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { | ||
559 | hlist_del(&ri->hlist); | ||
560 | kfree(ri); | ||
561 | } | ||
562 | /* | ||
563 | * By returning a non-zero value, we are telling | ||
564 | * kprobe_handler() that we don't want the post_handler | ||
565 | * to run (and have re-enabled preemption) | ||
566 | */ | ||
567 | return 1; | ||
568 | } | ||
569 | |||
570 | void kretprobe_trampoline_holder(void) | ||
571 | { | ||
572 | asm volatile(".global kretprobe_trampoline\n" | ||
573 | "kretprobe_trampoline:\n" | ||
574 | "\tnop\n" | ||
575 | "\tnop\n"); | ||
576 | } | ||
577 | static struct kprobe trampoline_p = { | ||
578 | .addr = (kprobe_opcode_t *) &kretprobe_trampoline, | ||
579 | .pre_handler = trampoline_probe_handler | ||
580 | }; | ||
581 | |||
582 | int __init arch_init_kprobes(void) | ||
583 | { | ||
584 | return register_kprobe(&trampoline_p); | ||
585 | } | ||
586 | |||
587 | int __kprobes arch_trampoline_kprobe(struct kprobe *p) | ||
588 | { | ||
589 | if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline) | ||
590 | return 1; | ||
591 | |||
592 | return 0; | ||
593 | } | ||
diff --git a/arch/sparc/kernel/kstack.h b/arch/sparc/kernel/kstack.h new file mode 100644 index 000000000000..4248d969272f --- /dev/null +++ b/arch/sparc/kernel/kstack.h | |||
@@ -0,0 +1,60 @@ | |||
1 | #ifndef _KSTACK_H | ||
2 | #define _KSTACK_H | ||
3 | |||
4 | #include <linux/thread_info.h> | ||
5 | #include <linux/sched.h> | ||
6 | #include <asm/ptrace.h> | ||
7 | #include <asm/irq.h> | ||
8 | |||
9 | /* SP must be STACK_BIAS adjusted already. */ | ||
10 | static inline bool kstack_valid(struct thread_info *tp, unsigned long sp) | ||
11 | { | ||
12 | unsigned long base = (unsigned long) tp; | ||
13 | |||
14 | if (sp >= (base + sizeof(struct thread_info)) && | ||
15 | sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf))) | ||
16 | return true; | ||
17 | |||
18 | if (hardirq_stack[tp->cpu]) { | ||
19 | base = (unsigned long) hardirq_stack[tp->cpu]; | ||
20 | if (sp >= base && | ||
21 | sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf))) | ||
22 | return true; | ||
23 | base = (unsigned long) softirq_stack[tp->cpu]; | ||
24 | if (sp >= base && | ||
25 | sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf))) | ||
26 | return true; | ||
27 | } | ||
28 | return false; | ||
29 | } | ||
30 | |||
31 | /* Does "regs" point to a valid pt_regs trap frame? */ | ||
32 | static inline bool kstack_is_trap_frame(struct thread_info *tp, struct pt_regs *regs) | ||
33 | { | ||
34 | unsigned long base = (unsigned long) tp; | ||
35 | unsigned long addr = (unsigned long) regs; | ||
36 | |||
37 | if (addr >= base && | ||
38 | addr <= (base + THREAD_SIZE - sizeof(*regs))) | ||
39 | goto check_magic; | ||
40 | |||
41 | if (hardirq_stack[tp->cpu]) { | ||
42 | base = (unsigned long) hardirq_stack[tp->cpu]; | ||
43 | if (addr >= base && | ||
44 | addr <= (base + THREAD_SIZE - sizeof(*regs))) | ||
45 | goto check_magic; | ||
46 | base = (unsigned long) softirq_stack[tp->cpu]; | ||
47 | if (addr >= base && | ||
48 | addr <= (base + THREAD_SIZE - sizeof(*regs))) | ||
49 | goto check_magic; | ||
50 | } | ||
51 | return false; | ||
52 | |||
53 | check_magic: | ||
54 | if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC) | ||
55 | return true; | ||
56 | return false; | ||
57 | |||
58 | } | ||
59 | |||
60 | #endif /* _KSTACK_H */ | ||
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S new file mode 100644 index 000000000000..cef8defcd7a9 --- /dev/null +++ b/arch/sparc/kernel/ktlb.S | |||
@@ -0,0 +1,304 @@ | |||
1 | /* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling. | ||
2 | * | ||
3 | * Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net> | ||
4 | * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de) | ||
5 | * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) | ||
6 | * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
7 | */ | ||
8 | |||
9 | #include <asm/head.h> | ||
10 | #include <asm/asi.h> | ||
11 | #include <asm/page.h> | ||
12 | #include <asm/pgtable.h> | ||
13 | #include <asm/tsb.h> | ||
14 | |||
15 | .text | ||
16 | .align 32 | ||
17 | |||
18 | kvmap_itlb: | ||
19 | /* g6: TAG TARGET */ | ||
20 | mov TLB_TAG_ACCESS, %g4 | ||
21 | ldxa [%g4] ASI_IMMU, %g4 | ||
22 | |||
23 | /* sun4v_itlb_miss branches here with the missing virtual | ||
24 | * address already loaded into %g4 | ||
25 | */ | ||
26 | kvmap_itlb_4v: | ||
27 | |||
28 | kvmap_itlb_nonlinear: | ||
29 | /* Catch kernel NULL pointer calls. */ | ||
30 | sethi %hi(PAGE_SIZE), %g5 | ||
31 | cmp %g4, %g5 | ||
32 | bleu,pn %xcc, kvmap_dtlb_longpath | ||
33 | nop | ||
34 | |||
35 | KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load) | ||
36 | |||
37 | kvmap_itlb_tsb_miss: | ||
38 | sethi %hi(LOW_OBP_ADDRESS), %g5 | ||
39 | cmp %g4, %g5 | ||
40 | blu,pn %xcc, kvmap_itlb_vmalloc_addr | ||
41 | mov 0x1, %g5 | ||
42 | sllx %g5, 32, %g5 | ||
43 | cmp %g4, %g5 | ||
44 | blu,pn %xcc, kvmap_itlb_obp | ||
45 | nop | ||
46 | |||
47 | kvmap_itlb_vmalloc_addr: | ||
48 | KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath) | ||
49 | |||
50 | KTSB_LOCK_TAG(%g1, %g2, %g7) | ||
51 | |||
52 | /* Load and check PTE. */ | ||
53 | ldxa [%g5] ASI_PHYS_USE_EC, %g5 | ||
54 | mov 1, %g7 | ||
55 | sllx %g7, TSB_TAG_INVALID_BIT, %g7 | ||
56 | brgez,a,pn %g5, kvmap_itlb_longpath | ||
57 | KTSB_STORE(%g1, %g7) | ||
58 | |||
59 | KTSB_WRITE(%g1, %g5, %g6) | ||
60 | |||
61 | /* fallthrough to TLB load */ | ||
62 | |||
63 | kvmap_itlb_load: | ||
64 | |||
65 | 661: stxa %g5, [%g0] ASI_ITLB_DATA_IN | ||
66 | retry | ||
67 | .section .sun4v_2insn_patch, "ax" | ||
68 | .word 661b | ||
69 | nop | ||
70 | nop | ||
71 | .previous | ||
72 | |||
73 | /* For sun4v the ASI_ITLB_DATA_IN store and the retry | ||
74 | * instruction get nop'd out and we get here to branch | ||
75 | * to the sun4v tlb load code. The registers are setup | ||
76 | * as follows: | ||
77 | * | ||
78 | * %g4: vaddr | ||
79 | * %g5: PTE | ||
80 | * %g6: TAG | ||
81 | * | ||
82 | * The sun4v TLB load wants the PTE in %g3 so we fix that | ||
83 | * up here. | ||
84 | */ | ||
85 | ba,pt %xcc, sun4v_itlb_load | ||
86 | mov %g5, %g3 | ||
87 | |||
88 | kvmap_itlb_longpath: | ||
89 | |||
90 | 661: rdpr %pstate, %g5 | ||
91 | wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate | ||
92 | .section .sun4v_2insn_patch, "ax" | ||
93 | .word 661b | ||
94 | SET_GL(1) | ||
95 | nop | ||
96 | .previous | ||
97 | |||
98 | rdpr %tpc, %g5 | ||
99 | ba,pt %xcc, sparc64_realfault_common | ||
100 | mov FAULT_CODE_ITLB, %g4 | ||
101 | |||
102 | kvmap_itlb_obp: | ||
103 | OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath) | ||
104 | |||
105 | KTSB_LOCK_TAG(%g1, %g2, %g7) | ||
106 | |||
107 | KTSB_WRITE(%g1, %g5, %g6) | ||
108 | |||
109 | ba,pt %xcc, kvmap_itlb_load | ||
110 | nop | ||
111 | |||
112 | kvmap_dtlb_obp: | ||
113 | OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath) | ||
114 | |||
115 | KTSB_LOCK_TAG(%g1, %g2, %g7) | ||
116 | |||
117 | KTSB_WRITE(%g1, %g5, %g6) | ||
118 | |||
119 | ba,pt %xcc, kvmap_dtlb_load | ||
120 | nop | ||
121 | |||
122 | .align 32 | ||
123 | kvmap_dtlb_tsb4m_load: | ||
124 | KTSB_LOCK_TAG(%g1, %g2, %g7) | ||
125 | KTSB_WRITE(%g1, %g5, %g6) | ||
126 | ba,pt %xcc, kvmap_dtlb_load | ||
127 | nop | ||
128 | |||
129 | kvmap_dtlb: | ||
130 | /* %g6: TAG TARGET */ | ||
131 | mov TLB_TAG_ACCESS, %g4 | ||
132 | ldxa [%g4] ASI_DMMU, %g4 | ||
133 | |||
134 | /* sun4v_dtlb_miss branches here with the missing virtual | ||
135 | * address already loaded into %g4 | ||
136 | */ | ||
137 | kvmap_dtlb_4v: | ||
138 | brgez,pn %g4, kvmap_dtlb_nonlinear | ||
139 | nop | ||
140 | |||
141 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
142 | /* Index through the base page size TSB even for linear | ||
143 | * mappings when using page allocation debugging. | ||
144 | */ | ||
145 | KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) | ||
146 | #else | ||
147 | /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */ | ||
148 | KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) | ||
149 | #endif | ||
150 | /* TSB entry address left in %g1, lookup linear PTE. | ||
151 | * Must preserve %g1 and %g6 (TAG). | ||
152 | */ | ||
153 | kvmap_dtlb_tsb4m_miss: | ||
154 | sethi %hi(kpte_linear_bitmap), %g2 | ||
155 | or %g2, %lo(kpte_linear_bitmap), %g2 | ||
156 | |||
157 | /* Clear the PAGE_OFFSET top virtual bits, then shift | ||
158 | * down to get a 256MB physical address index. | ||
159 | */ | ||
160 | sllx %g4, 21, %g5 | ||
161 | mov 1, %g7 | ||
162 | srlx %g5, 21 + 28, %g5 | ||
163 | |||
164 | /* Don't try this at home kids... this depends upon srlx | ||
165 | * only taking the low 6 bits of the shift count in %g5. | ||
166 | */ | ||
167 | sllx %g7, %g5, %g7 | ||
168 | |||
169 | /* Divide by 64 to get the offset into the bitmask. */ | ||
170 | srlx %g5, 6, %g5 | ||
171 | sllx %g5, 3, %g5 | ||
172 | |||
173 | /* kern_linear_pte_xor[((mask & bit) ? 1 : 0)] */ | ||
174 | ldx [%g2 + %g5], %g2 | ||
175 | andcc %g2, %g7, %g0 | ||
176 | sethi %hi(kern_linear_pte_xor), %g5 | ||
177 | or %g5, %lo(kern_linear_pte_xor), %g5 | ||
178 | bne,a,pt %xcc, 1f | ||
179 | add %g5, 8, %g5 | ||
180 | |||
181 | 1: ldx [%g5], %g2 | ||
182 | |||
183 | .globl kvmap_linear_patch | ||
184 | kvmap_linear_patch: | ||
185 | ba,pt %xcc, kvmap_dtlb_tsb4m_load | ||
186 | xor %g2, %g4, %g5 | ||
187 | |||
188 | kvmap_dtlb_vmalloc_addr: | ||
189 | KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) | ||
190 | |||
191 | KTSB_LOCK_TAG(%g1, %g2, %g7) | ||
192 | |||
193 | /* Load and check PTE. */ | ||
194 | ldxa [%g5] ASI_PHYS_USE_EC, %g5 | ||
195 | mov 1, %g7 | ||
196 | sllx %g7, TSB_TAG_INVALID_BIT, %g7 | ||
197 | brgez,a,pn %g5, kvmap_dtlb_longpath | ||
198 | KTSB_STORE(%g1, %g7) | ||
199 | |||
200 | KTSB_WRITE(%g1, %g5, %g6) | ||
201 | |||
202 | /* fallthrough to TLB load */ | ||
203 | |||
204 | kvmap_dtlb_load: | ||
205 | |||
206 | 661: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB | ||
207 | retry | ||
208 | .section .sun4v_2insn_patch, "ax" | ||
209 | .word 661b | ||
210 | nop | ||
211 | nop | ||
212 | .previous | ||
213 | |||
214 | /* For sun4v the ASI_DTLB_DATA_IN store and the retry | ||
215 | * instruction get nop'd out and we get here to branch | ||
216 | * to the sun4v tlb load code. The registers are setup | ||
217 | * as follows: | ||
218 | * | ||
219 | * %g4: vaddr | ||
220 | * %g5: PTE | ||
221 | * %g6: TAG | ||
222 | * | ||
223 | * The sun4v TLB load wants the PTE in %g3 so we fix that | ||
224 | * up here. | ||
225 | */ | ||
226 | ba,pt %xcc, sun4v_dtlb_load | ||
227 | mov %g5, %g3 | ||
228 | |||
229 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | ||
230 | kvmap_vmemmap: | ||
231 | sub %g4, %g5, %g5 | ||
232 | srlx %g5, 22, %g5 | ||
233 | sethi %hi(vmemmap_table), %g1 | ||
234 | sllx %g5, 3, %g5 | ||
235 | or %g1, %lo(vmemmap_table), %g1 | ||
236 | ba,pt %xcc, kvmap_dtlb_load | ||
237 | ldx [%g1 + %g5], %g5 | ||
238 | #endif | ||
239 | |||
240 | kvmap_dtlb_nonlinear: | ||
241 | /* Catch kernel NULL pointer derefs. */ | ||
242 | sethi %hi(PAGE_SIZE), %g5 | ||
243 | cmp %g4, %g5 | ||
244 | bleu,pn %xcc, kvmap_dtlb_longpath | ||
245 | nop | ||
246 | |||
247 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | ||
248 | /* Do not use the TSB for vmemmap. */ | ||
249 | mov (VMEMMAP_BASE >> 24), %g5 | ||
250 | sllx %g5, 24, %g5 | ||
251 | cmp %g4,%g5 | ||
252 | bgeu,pn %xcc, kvmap_vmemmap | ||
253 | nop | ||
254 | #endif | ||
255 | |||
256 | KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) | ||
257 | |||
258 | kvmap_dtlb_tsbmiss: | ||
259 | sethi %hi(MODULES_VADDR), %g5 | ||
260 | cmp %g4, %g5 | ||
261 | blu,pn %xcc, kvmap_dtlb_longpath | ||
262 | mov (VMALLOC_END >> 24), %g5 | ||
263 | sllx %g5, 24, %g5 | ||
264 | cmp %g4, %g5 | ||
265 | bgeu,pn %xcc, kvmap_dtlb_longpath | ||
266 | nop | ||
267 | |||
268 | kvmap_check_obp: | ||
269 | sethi %hi(LOW_OBP_ADDRESS), %g5 | ||
270 | cmp %g4, %g5 | ||
271 | blu,pn %xcc, kvmap_dtlb_vmalloc_addr | ||
272 | mov 0x1, %g5 | ||
273 | sllx %g5, 32, %g5 | ||
274 | cmp %g4, %g5 | ||
275 | blu,pn %xcc, kvmap_dtlb_obp | ||
276 | nop | ||
277 | ba,pt %xcc, kvmap_dtlb_vmalloc_addr | ||
278 | nop | ||
279 | |||
280 | kvmap_dtlb_longpath: | ||
281 | |||
282 | 661: rdpr %pstate, %g5 | ||
283 | wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate | ||
284 | .section .sun4v_2insn_patch, "ax" | ||
285 | .word 661b | ||
286 | SET_GL(1) | ||
287 | ldxa [%g0] ASI_SCRATCHPAD, %g5 | ||
288 | .previous | ||
289 | |||
290 | rdpr %tl, %g3 | ||
291 | cmp %g3, 1 | ||
292 | |||
293 | 661: mov TLB_TAG_ACCESS, %g4 | ||
294 | ldxa [%g4] ASI_DMMU, %g5 | ||
295 | .section .sun4v_2insn_patch, "ax" | ||
296 | .word 661b | ||
297 | ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5 | ||
298 | nop | ||
299 | .previous | ||
300 | |||
301 | be,pt %xcc, sparc64_realfault_common | ||
302 | mov FAULT_CODE_DTLB, %g4 | ||
303 | ba,pt %xcc, winfix_trampoline | ||
304 | nop | ||
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c new file mode 100644 index 000000000000..d68982330f66 --- /dev/null +++ b/arch/sparc/kernel/ldc.c | |||
@@ -0,0 +1,2378 @@ | |||
1 | /* ldc.c: Logical Domain Channel link-layer protocol driver. | ||
2 | * | ||
3 | * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <linux/spinlock.h> | ||
10 | #include <linux/delay.h> | ||
11 | #include <linux/errno.h> | ||
12 | #include <linux/string.h> | ||
13 | #include <linux/scatterlist.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/list.h> | ||
16 | #include <linux/init.h> | ||
17 | |||
18 | #include <asm/hypervisor.h> | ||
19 | #include <asm/iommu.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <asm/ldc.h> | ||
22 | #include <asm/mdesc.h> | ||
23 | |||
24 | #define DRV_MODULE_NAME "ldc" | ||
25 | #define PFX DRV_MODULE_NAME ": " | ||
26 | #define DRV_MODULE_VERSION "1.1" | ||
27 | #define DRV_MODULE_RELDATE "July 22, 2008" | ||
28 | |||
29 | static char version[] __devinitdata = | ||
30 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | ||
31 | #define LDC_PACKET_SIZE 64 | ||
32 | |||
33 | /* Packet header layout for unreliable and reliable mode frames. | ||
34 | * When in RAW mode, packets are simply straight 64-byte payloads | ||
35 | * with no headers. | ||
36 | */ | ||
37 | struct ldc_packet { | ||
38 | u8 type; | ||
39 | #define LDC_CTRL 0x01 | ||
40 | #define LDC_DATA 0x02 | ||
41 | #define LDC_ERR 0x10 | ||
42 | |||
43 | u8 stype; | ||
44 | #define LDC_INFO 0x01 | ||
45 | #define LDC_ACK 0x02 | ||
46 | #define LDC_NACK 0x04 | ||
47 | |||
48 | u8 ctrl; | ||
49 | #define LDC_VERS 0x01 /* Link Version */ | ||
50 | #define LDC_RTS 0x02 /* Request To Send */ | ||
51 | #define LDC_RTR 0x03 /* Ready To Receive */ | ||
52 | #define LDC_RDX 0x04 /* Ready for Data eXchange */ | ||
53 | #define LDC_CTRL_MSK 0x0f | ||
54 | |||
55 | u8 env; | ||
56 | #define LDC_LEN 0x3f | ||
57 | #define LDC_FRAG_MASK 0xc0 | ||
58 | #define LDC_START 0x40 | ||
59 | #define LDC_STOP 0x80 | ||
60 | |||
61 | u32 seqid; | ||
62 | |||
63 | union { | ||
64 | u8 u_data[LDC_PACKET_SIZE - 8]; | ||
65 | struct { | ||
66 | u32 pad; | ||
67 | u32 ackid; | ||
68 | u8 r_data[LDC_PACKET_SIZE - 8 - 8]; | ||
69 | } r; | ||
70 | } u; | ||
71 | }; | ||
72 | |||
73 | struct ldc_version { | ||
74 | u16 major; | ||
75 | u16 minor; | ||
76 | }; | ||
77 | |||
78 | /* Ordered from largest major to lowest. */ | ||
79 | static struct ldc_version ver_arr[] = { | ||
80 | { .major = 1, .minor = 0 }, | ||
81 | }; | ||
82 | |||
83 | #define LDC_DEFAULT_MTU (4 * LDC_PACKET_SIZE) | ||
84 | #define LDC_DEFAULT_NUM_ENTRIES (PAGE_SIZE / LDC_PACKET_SIZE) | ||
85 | |||
86 | struct ldc_channel; | ||
87 | |||
88 | struct ldc_mode_ops { | ||
89 | int (*write)(struct ldc_channel *, const void *, unsigned int); | ||
90 | int (*read)(struct ldc_channel *, void *, unsigned int); | ||
91 | }; | ||
92 | |||
93 | static const struct ldc_mode_ops raw_ops; | ||
94 | static const struct ldc_mode_ops nonraw_ops; | ||
95 | static const struct ldc_mode_ops stream_ops; | ||
96 | |||
97 | int ldom_domaining_enabled; | ||
98 | |||
99 | struct ldc_iommu { | ||
100 | /* Protects arena alloc/free. */ | ||
101 | spinlock_t lock; | ||
102 | struct iommu_arena arena; | ||
103 | struct ldc_mtable_entry *page_table; | ||
104 | }; | ||
105 | |||
106 | struct ldc_channel { | ||
107 | /* Protects all operations that depend upon channel state. */ | ||
108 | spinlock_t lock; | ||
109 | |||
110 | unsigned long id; | ||
111 | |||
112 | u8 *mssbuf; | ||
113 | u32 mssbuf_len; | ||
114 | u32 mssbuf_off; | ||
115 | |||
116 | struct ldc_packet *tx_base; | ||
117 | unsigned long tx_head; | ||
118 | unsigned long tx_tail; | ||
119 | unsigned long tx_num_entries; | ||
120 | unsigned long tx_ra; | ||
121 | |||
122 | unsigned long tx_acked; | ||
123 | |||
124 | struct ldc_packet *rx_base; | ||
125 | unsigned long rx_head; | ||
126 | unsigned long rx_tail; | ||
127 | unsigned long rx_num_entries; | ||
128 | unsigned long rx_ra; | ||
129 | |||
130 | u32 rcv_nxt; | ||
131 | u32 snd_nxt; | ||
132 | |||
133 | unsigned long chan_state; | ||
134 | |||
135 | struct ldc_channel_config cfg; | ||
136 | void *event_arg; | ||
137 | |||
138 | const struct ldc_mode_ops *mops; | ||
139 | |||
140 | struct ldc_iommu iommu; | ||
141 | |||
142 | struct ldc_version ver; | ||
143 | |||
144 | u8 hs_state; | ||
145 | #define LDC_HS_CLOSED 0x00 | ||
146 | #define LDC_HS_OPEN 0x01 | ||
147 | #define LDC_HS_GOTVERS 0x02 | ||
148 | #define LDC_HS_SENTRTR 0x03 | ||
149 | #define LDC_HS_GOTRTR 0x04 | ||
150 | #define LDC_HS_COMPLETE 0x10 | ||
151 | |||
152 | u8 flags; | ||
153 | #define LDC_FLAG_ALLOCED_QUEUES 0x01 | ||
154 | #define LDC_FLAG_REGISTERED_QUEUES 0x02 | ||
155 | #define LDC_FLAG_REGISTERED_IRQS 0x04 | ||
156 | #define LDC_FLAG_RESET 0x10 | ||
157 | |||
158 | u8 mss; | ||
159 | u8 state; | ||
160 | |||
161 | #define LDC_IRQ_NAME_MAX 32 | ||
162 | char rx_irq_name[LDC_IRQ_NAME_MAX]; | ||
163 | char tx_irq_name[LDC_IRQ_NAME_MAX]; | ||
164 | |||
165 | struct hlist_head mh_list; | ||
166 | |||
167 | struct hlist_node list; | ||
168 | }; | ||
169 | |||
170 | #define ldcdbg(TYPE, f, a...) \ | ||
171 | do { if (lp->cfg.debug & LDC_DEBUG_##TYPE) \ | ||
172 | printk(KERN_INFO PFX "ID[%lu] " f, lp->id, ## a); \ | ||
173 | } while (0) | ||
174 | |||
175 | static const char *state_to_str(u8 state) | ||
176 | { | ||
177 | switch (state) { | ||
178 | case LDC_STATE_INVALID: | ||
179 | return "INVALID"; | ||
180 | case LDC_STATE_INIT: | ||
181 | return "INIT"; | ||
182 | case LDC_STATE_BOUND: | ||
183 | return "BOUND"; | ||
184 | case LDC_STATE_READY: | ||
185 | return "READY"; | ||
186 | case LDC_STATE_CONNECTED: | ||
187 | return "CONNECTED"; | ||
188 | default: | ||
189 | return "<UNKNOWN>"; | ||
190 | } | ||
191 | } | ||
192 | |||
193 | static void ldc_set_state(struct ldc_channel *lp, u8 state) | ||
194 | { | ||
195 | ldcdbg(STATE, "STATE (%s) --> (%s)\n", | ||
196 | state_to_str(lp->state), | ||
197 | state_to_str(state)); | ||
198 | |||
199 | lp->state = state; | ||
200 | } | ||
201 | |||
202 | static unsigned long __advance(unsigned long off, unsigned long num_entries) | ||
203 | { | ||
204 | off += LDC_PACKET_SIZE; | ||
205 | if (off == (num_entries * LDC_PACKET_SIZE)) | ||
206 | off = 0; | ||
207 | |||
208 | return off; | ||
209 | } | ||
210 | |||
211 | static unsigned long rx_advance(struct ldc_channel *lp, unsigned long off) | ||
212 | { | ||
213 | return __advance(off, lp->rx_num_entries); | ||
214 | } | ||
215 | |||
216 | static unsigned long tx_advance(struct ldc_channel *lp, unsigned long off) | ||
217 | { | ||
218 | return __advance(off, lp->tx_num_entries); | ||
219 | } | ||
220 | |||
221 | static struct ldc_packet *handshake_get_tx_packet(struct ldc_channel *lp, | ||
222 | unsigned long *new_tail) | ||
223 | { | ||
224 | struct ldc_packet *p; | ||
225 | unsigned long t; | ||
226 | |||
227 | t = tx_advance(lp, lp->tx_tail); | ||
228 | if (t == lp->tx_head) | ||
229 | return NULL; | ||
230 | |||
231 | *new_tail = t; | ||
232 | |||
233 | p = lp->tx_base; | ||
234 | return p + (lp->tx_tail / LDC_PACKET_SIZE); | ||
235 | } | ||
236 | |||
237 | /* When we are in reliable or stream mode, have to track the next packet | ||
238 | * we haven't gotten an ACK for in the TX queue using tx_acked. We have | ||
239 | * to be careful not to stomp over the queue past that point. During | ||
240 | * the handshake, we don't have TX data packets pending in the queue | ||
241 | * and that's why handshake_get_tx_packet() need not be mindful of | ||
242 | * lp->tx_acked. | ||
243 | */ | ||
244 | static unsigned long head_for_data(struct ldc_channel *lp) | ||
245 | { | ||
246 | if (lp->cfg.mode == LDC_MODE_STREAM) | ||
247 | return lp->tx_acked; | ||
248 | return lp->tx_head; | ||
249 | } | ||
250 | |||
251 | static int tx_has_space_for(struct ldc_channel *lp, unsigned int size) | ||
252 | { | ||
253 | unsigned long limit, tail, new_tail, diff; | ||
254 | unsigned int mss; | ||
255 | |||
256 | limit = head_for_data(lp); | ||
257 | tail = lp->tx_tail; | ||
258 | new_tail = tx_advance(lp, tail); | ||
259 | if (new_tail == limit) | ||
260 | return 0; | ||
261 | |||
262 | if (limit > new_tail) | ||
263 | diff = limit - new_tail; | ||
264 | else | ||
265 | diff = (limit + | ||
266 | ((lp->tx_num_entries * LDC_PACKET_SIZE) - new_tail)); | ||
267 | diff /= LDC_PACKET_SIZE; | ||
268 | mss = lp->mss; | ||
269 | |||
270 | if (diff * mss < size) | ||
271 | return 0; | ||
272 | |||
273 | return 1; | ||
274 | } | ||
275 | |||
276 | static struct ldc_packet *data_get_tx_packet(struct ldc_channel *lp, | ||
277 | unsigned long *new_tail) | ||
278 | { | ||
279 | struct ldc_packet *p; | ||
280 | unsigned long h, t; | ||
281 | |||
282 | h = head_for_data(lp); | ||
283 | t = tx_advance(lp, lp->tx_tail); | ||
284 | if (t == h) | ||
285 | return NULL; | ||
286 | |||
287 | *new_tail = t; | ||
288 | |||
289 | p = lp->tx_base; | ||
290 | return p + (lp->tx_tail / LDC_PACKET_SIZE); | ||
291 | } | ||
292 | |||
293 | static int set_tx_tail(struct ldc_channel *lp, unsigned long tail) | ||
294 | { | ||
295 | unsigned long orig_tail = lp->tx_tail; | ||
296 | int limit = 1000; | ||
297 | |||
298 | lp->tx_tail = tail; | ||
299 | while (limit-- > 0) { | ||
300 | unsigned long err; | ||
301 | |||
302 | err = sun4v_ldc_tx_set_qtail(lp->id, tail); | ||
303 | if (!err) | ||
304 | return 0; | ||
305 | |||
306 | if (err != HV_EWOULDBLOCK) { | ||
307 | lp->tx_tail = orig_tail; | ||
308 | return -EINVAL; | ||
309 | } | ||
310 | udelay(1); | ||
311 | } | ||
312 | |||
313 | lp->tx_tail = orig_tail; | ||
314 | return -EBUSY; | ||
315 | } | ||
316 | |||
317 | /* This just updates the head value in the hypervisor using | ||
318 | * a polling loop with a timeout. The caller takes care of | ||
319 | * upating software state representing the head change, if any. | ||
320 | */ | ||
321 | static int __set_rx_head(struct ldc_channel *lp, unsigned long head) | ||
322 | { | ||
323 | int limit = 1000; | ||
324 | |||
325 | while (limit-- > 0) { | ||
326 | unsigned long err; | ||
327 | |||
328 | err = sun4v_ldc_rx_set_qhead(lp->id, head); | ||
329 | if (!err) | ||
330 | return 0; | ||
331 | |||
332 | if (err != HV_EWOULDBLOCK) | ||
333 | return -EINVAL; | ||
334 | |||
335 | udelay(1); | ||
336 | } | ||
337 | |||
338 | return -EBUSY; | ||
339 | } | ||
340 | |||
341 | static int send_tx_packet(struct ldc_channel *lp, | ||
342 | struct ldc_packet *p, | ||
343 | unsigned long new_tail) | ||
344 | { | ||
345 | BUG_ON(p != (lp->tx_base + (lp->tx_tail / LDC_PACKET_SIZE))); | ||
346 | |||
347 | return set_tx_tail(lp, new_tail); | ||
348 | } | ||
349 | |||
350 | static struct ldc_packet *handshake_compose_ctrl(struct ldc_channel *lp, | ||
351 | u8 stype, u8 ctrl, | ||
352 | void *data, int dlen, | ||
353 | unsigned long *new_tail) | ||
354 | { | ||
355 | struct ldc_packet *p = handshake_get_tx_packet(lp, new_tail); | ||
356 | |||
357 | if (p) { | ||
358 | memset(p, 0, sizeof(*p)); | ||
359 | p->type = LDC_CTRL; | ||
360 | p->stype = stype; | ||
361 | p->ctrl = ctrl; | ||
362 | if (data) | ||
363 | memcpy(p->u.u_data, data, dlen); | ||
364 | } | ||
365 | return p; | ||
366 | } | ||
367 | |||
368 | static int start_handshake(struct ldc_channel *lp) | ||
369 | { | ||
370 | struct ldc_packet *p; | ||
371 | struct ldc_version *ver; | ||
372 | unsigned long new_tail; | ||
373 | |||
374 | ver = &ver_arr[0]; | ||
375 | |||
376 | ldcdbg(HS, "SEND VER INFO maj[%u] min[%u]\n", | ||
377 | ver->major, ver->minor); | ||
378 | |||
379 | p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS, | ||
380 | ver, sizeof(*ver), &new_tail); | ||
381 | if (p) { | ||
382 | int err = send_tx_packet(lp, p, new_tail); | ||
383 | if (!err) | ||
384 | lp->flags &= ~LDC_FLAG_RESET; | ||
385 | return err; | ||
386 | } | ||
387 | return -EBUSY; | ||
388 | } | ||
389 | |||
390 | static int send_version_nack(struct ldc_channel *lp, | ||
391 | u16 major, u16 minor) | ||
392 | { | ||
393 | struct ldc_packet *p; | ||
394 | struct ldc_version ver; | ||
395 | unsigned long new_tail; | ||
396 | |||
397 | ver.major = major; | ||
398 | ver.minor = minor; | ||
399 | |||
400 | p = handshake_compose_ctrl(lp, LDC_NACK, LDC_VERS, | ||
401 | &ver, sizeof(ver), &new_tail); | ||
402 | if (p) { | ||
403 | ldcdbg(HS, "SEND VER NACK maj[%u] min[%u]\n", | ||
404 | ver.major, ver.minor); | ||
405 | |||
406 | return send_tx_packet(lp, p, new_tail); | ||
407 | } | ||
408 | return -EBUSY; | ||
409 | } | ||
410 | |||
411 | static int send_version_ack(struct ldc_channel *lp, | ||
412 | struct ldc_version *vp) | ||
413 | { | ||
414 | struct ldc_packet *p; | ||
415 | unsigned long new_tail; | ||
416 | |||
417 | p = handshake_compose_ctrl(lp, LDC_ACK, LDC_VERS, | ||
418 | vp, sizeof(*vp), &new_tail); | ||
419 | if (p) { | ||
420 | ldcdbg(HS, "SEND VER ACK maj[%u] min[%u]\n", | ||
421 | vp->major, vp->minor); | ||
422 | |||
423 | return send_tx_packet(lp, p, new_tail); | ||
424 | } | ||
425 | return -EBUSY; | ||
426 | } | ||
427 | |||
428 | static int send_rts(struct ldc_channel *lp) | ||
429 | { | ||
430 | struct ldc_packet *p; | ||
431 | unsigned long new_tail; | ||
432 | |||
433 | p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTS, NULL, 0, | ||
434 | &new_tail); | ||
435 | if (p) { | ||
436 | p->env = lp->cfg.mode; | ||
437 | p->seqid = 0; | ||
438 | lp->rcv_nxt = 0; | ||
439 | |||
440 | ldcdbg(HS, "SEND RTS env[0x%x] seqid[0x%x]\n", | ||
441 | p->env, p->seqid); | ||
442 | |||
443 | return send_tx_packet(lp, p, new_tail); | ||
444 | } | ||
445 | return -EBUSY; | ||
446 | } | ||
447 | |||
448 | static int send_rtr(struct ldc_channel *lp) | ||
449 | { | ||
450 | struct ldc_packet *p; | ||
451 | unsigned long new_tail; | ||
452 | |||
453 | p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTR, NULL, 0, | ||
454 | &new_tail); | ||
455 | if (p) { | ||
456 | p->env = lp->cfg.mode; | ||
457 | p->seqid = 0; | ||
458 | |||
459 | ldcdbg(HS, "SEND RTR env[0x%x] seqid[0x%x]\n", | ||
460 | p->env, p->seqid); | ||
461 | |||
462 | return send_tx_packet(lp, p, new_tail); | ||
463 | } | ||
464 | return -EBUSY; | ||
465 | } | ||
466 | |||
467 | static int send_rdx(struct ldc_channel *lp) | ||
468 | { | ||
469 | struct ldc_packet *p; | ||
470 | unsigned long new_tail; | ||
471 | |||
472 | p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RDX, NULL, 0, | ||
473 | &new_tail); | ||
474 | if (p) { | ||
475 | p->env = 0; | ||
476 | p->seqid = ++lp->snd_nxt; | ||
477 | p->u.r.ackid = lp->rcv_nxt; | ||
478 | |||
479 | ldcdbg(HS, "SEND RDX env[0x%x] seqid[0x%x] ackid[0x%x]\n", | ||
480 | p->env, p->seqid, p->u.r.ackid); | ||
481 | |||
482 | return send_tx_packet(lp, p, new_tail); | ||
483 | } | ||
484 | return -EBUSY; | ||
485 | } | ||
486 | |||
487 | static int send_data_nack(struct ldc_channel *lp, struct ldc_packet *data_pkt) | ||
488 | { | ||
489 | struct ldc_packet *p; | ||
490 | unsigned long new_tail; | ||
491 | int err; | ||
492 | |||
493 | p = data_get_tx_packet(lp, &new_tail); | ||
494 | if (!p) | ||
495 | return -EBUSY; | ||
496 | memset(p, 0, sizeof(*p)); | ||
497 | p->type = data_pkt->type; | ||
498 | p->stype = LDC_NACK; | ||
499 | p->ctrl = data_pkt->ctrl & LDC_CTRL_MSK; | ||
500 | p->seqid = lp->snd_nxt + 1; | ||
501 | p->u.r.ackid = lp->rcv_nxt; | ||
502 | |||
503 | ldcdbg(HS, "SEND DATA NACK type[0x%x] ctl[0x%x] seq[0x%x] ack[0x%x]\n", | ||
504 | p->type, p->ctrl, p->seqid, p->u.r.ackid); | ||
505 | |||
506 | err = send_tx_packet(lp, p, new_tail); | ||
507 | if (!err) | ||
508 | lp->snd_nxt++; | ||
509 | |||
510 | return err; | ||
511 | } | ||
512 | |||
513 | static int ldc_abort(struct ldc_channel *lp) | ||
514 | { | ||
515 | unsigned long hv_err; | ||
516 | |||
517 | ldcdbg(STATE, "ABORT\n"); | ||
518 | |||
519 | /* We report but do not act upon the hypervisor errors because | ||
520 | * there really isn't much we can do if they fail at this point. | ||
521 | */ | ||
522 | hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries); | ||
523 | if (hv_err) | ||
524 | printk(KERN_ERR PFX "ldc_abort: " | ||
525 | "sun4v_ldc_tx_qconf(%lx,%lx,%lx) failed, err=%lu\n", | ||
526 | lp->id, lp->tx_ra, lp->tx_num_entries, hv_err); | ||
527 | |||
528 | hv_err = sun4v_ldc_tx_get_state(lp->id, | ||
529 | &lp->tx_head, | ||
530 | &lp->tx_tail, | ||
531 | &lp->chan_state); | ||
532 | if (hv_err) | ||
533 | printk(KERN_ERR PFX "ldc_abort: " | ||
534 | "sun4v_ldc_tx_get_state(%lx,...) failed, err=%lu\n", | ||
535 | lp->id, hv_err); | ||
536 | |||
537 | hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries); | ||
538 | if (hv_err) | ||
539 | printk(KERN_ERR PFX "ldc_abort: " | ||
540 | "sun4v_ldc_rx_qconf(%lx,%lx,%lx) failed, err=%lu\n", | ||
541 | lp->id, lp->rx_ra, lp->rx_num_entries, hv_err); | ||
542 | |||
543 | /* Refetch the RX queue state as well, because we could be invoked | ||
544 | * here in the queue processing context. | ||
545 | */ | ||
546 | hv_err = sun4v_ldc_rx_get_state(lp->id, | ||
547 | &lp->rx_head, | ||
548 | &lp->rx_tail, | ||
549 | &lp->chan_state); | ||
550 | if (hv_err) | ||
551 | printk(KERN_ERR PFX "ldc_abort: " | ||
552 | "sun4v_ldc_rx_get_state(%lx,...) failed, err=%lu\n", | ||
553 | lp->id, hv_err); | ||
554 | |||
555 | return -ECONNRESET; | ||
556 | } | ||
557 | |||
558 | static struct ldc_version *find_by_major(u16 major) | ||
559 | { | ||
560 | struct ldc_version *ret = NULL; | ||
561 | int i; | ||
562 | |||
563 | for (i = 0; i < ARRAY_SIZE(ver_arr); i++) { | ||
564 | struct ldc_version *v = &ver_arr[i]; | ||
565 | if (v->major <= major) { | ||
566 | ret = v; | ||
567 | break; | ||
568 | } | ||
569 | } | ||
570 | return ret; | ||
571 | } | ||
572 | |||
573 | static int process_ver_info(struct ldc_channel *lp, struct ldc_version *vp) | ||
574 | { | ||
575 | struct ldc_version *vap; | ||
576 | int err; | ||
577 | |||
578 | ldcdbg(HS, "GOT VERSION INFO major[%x] minor[%x]\n", | ||
579 | vp->major, vp->minor); | ||
580 | |||
581 | if (lp->hs_state == LDC_HS_GOTVERS) { | ||
582 | lp->hs_state = LDC_HS_OPEN; | ||
583 | memset(&lp->ver, 0, sizeof(lp->ver)); | ||
584 | } | ||
585 | |||
586 | vap = find_by_major(vp->major); | ||
587 | if (!vap) { | ||
588 | err = send_version_nack(lp, 0, 0); | ||
589 | } else if (vap->major != vp->major) { | ||
590 | err = send_version_nack(lp, vap->major, vap->minor); | ||
591 | } else { | ||
592 | struct ldc_version ver = *vp; | ||
593 | if (ver.minor > vap->minor) | ||
594 | ver.minor = vap->minor; | ||
595 | err = send_version_ack(lp, &ver); | ||
596 | if (!err) { | ||
597 | lp->ver = ver; | ||
598 | lp->hs_state = LDC_HS_GOTVERS; | ||
599 | } | ||
600 | } | ||
601 | if (err) | ||
602 | return ldc_abort(lp); | ||
603 | |||
604 | return 0; | ||
605 | } | ||
606 | |||
607 | static int process_ver_ack(struct ldc_channel *lp, struct ldc_version *vp) | ||
608 | { | ||
609 | ldcdbg(HS, "GOT VERSION ACK major[%x] minor[%x]\n", | ||
610 | vp->major, vp->minor); | ||
611 | |||
612 | if (lp->hs_state == LDC_HS_GOTVERS) { | ||
613 | if (lp->ver.major != vp->major || | ||
614 | lp->ver.minor != vp->minor) | ||
615 | return ldc_abort(lp); | ||
616 | } else { | ||
617 | lp->ver = *vp; | ||
618 | lp->hs_state = LDC_HS_GOTVERS; | ||
619 | } | ||
620 | if (send_rts(lp)) | ||
621 | return ldc_abort(lp); | ||
622 | return 0; | ||
623 | } | ||
624 | |||
625 | static int process_ver_nack(struct ldc_channel *lp, struct ldc_version *vp) | ||
626 | { | ||
627 | struct ldc_version *vap; | ||
628 | |||
629 | if ((vp->major == 0 && vp->minor == 0) || | ||
630 | !(vap = find_by_major(vp->major))) { | ||
631 | return ldc_abort(lp); | ||
632 | } else { | ||
633 | struct ldc_packet *p; | ||
634 | unsigned long new_tail; | ||
635 | |||
636 | p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS, | ||
637 | vap, sizeof(*vap), | ||
638 | &new_tail); | ||
639 | if (p) | ||
640 | return send_tx_packet(lp, p, new_tail); | ||
641 | else | ||
642 | return ldc_abort(lp); | ||
643 | } | ||
644 | } | ||
645 | |||
646 | static int process_version(struct ldc_channel *lp, | ||
647 | struct ldc_packet *p) | ||
648 | { | ||
649 | struct ldc_version *vp; | ||
650 | |||
651 | vp = (struct ldc_version *) p->u.u_data; | ||
652 | |||
653 | switch (p->stype) { | ||
654 | case LDC_INFO: | ||
655 | return process_ver_info(lp, vp); | ||
656 | |||
657 | case LDC_ACK: | ||
658 | return process_ver_ack(lp, vp); | ||
659 | |||
660 | case LDC_NACK: | ||
661 | return process_ver_nack(lp, vp); | ||
662 | |||
663 | default: | ||
664 | return ldc_abort(lp); | ||
665 | } | ||
666 | } | ||
667 | |||
668 | static int process_rts(struct ldc_channel *lp, | ||
669 | struct ldc_packet *p) | ||
670 | { | ||
671 | ldcdbg(HS, "GOT RTS stype[%x] seqid[%x] env[%x]\n", | ||
672 | p->stype, p->seqid, p->env); | ||
673 | |||
674 | if (p->stype != LDC_INFO || | ||
675 | lp->hs_state != LDC_HS_GOTVERS || | ||
676 | p->env != lp->cfg.mode) | ||
677 | return ldc_abort(lp); | ||
678 | |||
679 | lp->snd_nxt = p->seqid; | ||
680 | lp->rcv_nxt = p->seqid; | ||
681 | lp->hs_state = LDC_HS_SENTRTR; | ||
682 | if (send_rtr(lp)) | ||
683 | return ldc_abort(lp); | ||
684 | |||
685 | return 0; | ||
686 | } | ||
687 | |||
688 | static int process_rtr(struct ldc_channel *lp, | ||
689 | struct ldc_packet *p) | ||
690 | { | ||
691 | ldcdbg(HS, "GOT RTR stype[%x] seqid[%x] env[%x]\n", | ||
692 | p->stype, p->seqid, p->env); | ||
693 | |||
694 | if (p->stype != LDC_INFO || | ||
695 | p->env != lp->cfg.mode) | ||
696 | return ldc_abort(lp); | ||
697 | |||
698 | lp->snd_nxt = p->seqid; | ||
699 | lp->hs_state = LDC_HS_COMPLETE; | ||
700 | ldc_set_state(lp, LDC_STATE_CONNECTED); | ||
701 | send_rdx(lp); | ||
702 | |||
703 | return LDC_EVENT_UP; | ||
704 | } | ||
705 | |||
706 | static int rx_seq_ok(struct ldc_channel *lp, u32 seqid) | ||
707 | { | ||
708 | return lp->rcv_nxt + 1 == seqid; | ||
709 | } | ||
710 | |||
711 | static int process_rdx(struct ldc_channel *lp, | ||
712 | struct ldc_packet *p) | ||
713 | { | ||
714 | ldcdbg(HS, "GOT RDX stype[%x] seqid[%x] env[%x] ackid[%x]\n", | ||
715 | p->stype, p->seqid, p->env, p->u.r.ackid); | ||
716 | |||
717 | if (p->stype != LDC_INFO || | ||
718 | !(rx_seq_ok(lp, p->seqid))) | ||
719 | return ldc_abort(lp); | ||
720 | |||
721 | lp->rcv_nxt = p->seqid; | ||
722 | |||
723 | lp->hs_state = LDC_HS_COMPLETE; | ||
724 | ldc_set_state(lp, LDC_STATE_CONNECTED); | ||
725 | |||
726 | return LDC_EVENT_UP; | ||
727 | } | ||
728 | |||
729 | static int process_control_frame(struct ldc_channel *lp, | ||
730 | struct ldc_packet *p) | ||
731 | { | ||
732 | switch (p->ctrl) { | ||
733 | case LDC_VERS: | ||
734 | return process_version(lp, p); | ||
735 | |||
736 | case LDC_RTS: | ||
737 | return process_rts(lp, p); | ||
738 | |||
739 | case LDC_RTR: | ||
740 | return process_rtr(lp, p); | ||
741 | |||
742 | case LDC_RDX: | ||
743 | return process_rdx(lp, p); | ||
744 | |||
745 | default: | ||
746 | return ldc_abort(lp); | ||
747 | } | ||
748 | } | ||
749 | |||
750 | static int process_error_frame(struct ldc_channel *lp, | ||
751 | struct ldc_packet *p) | ||
752 | { | ||
753 | return ldc_abort(lp); | ||
754 | } | ||
755 | |||
756 | static int process_data_ack(struct ldc_channel *lp, | ||
757 | struct ldc_packet *ack) | ||
758 | { | ||
759 | unsigned long head = lp->tx_acked; | ||
760 | u32 ackid = ack->u.r.ackid; | ||
761 | |||
762 | while (1) { | ||
763 | struct ldc_packet *p = lp->tx_base + (head / LDC_PACKET_SIZE); | ||
764 | |||
765 | head = tx_advance(lp, head); | ||
766 | |||
767 | if (p->seqid == ackid) { | ||
768 | lp->tx_acked = head; | ||
769 | return 0; | ||
770 | } | ||
771 | if (head == lp->tx_tail) | ||
772 | return ldc_abort(lp); | ||
773 | } | ||
774 | |||
775 | return 0; | ||
776 | } | ||
777 | |||
778 | static void send_events(struct ldc_channel *lp, unsigned int event_mask) | ||
779 | { | ||
780 | if (event_mask & LDC_EVENT_RESET) | ||
781 | lp->cfg.event(lp->event_arg, LDC_EVENT_RESET); | ||
782 | if (event_mask & LDC_EVENT_UP) | ||
783 | lp->cfg.event(lp->event_arg, LDC_EVENT_UP); | ||
784 | if (event_mask & LDC_EVENT_DATA_READY) | ||
785 | lp->cfg.event(lp->event_arg, LDC_EVENT_DATA_READY); | ||
786 | } | ||
787 | |||
788 | static irqreturn_t ldc_rx(int irq, void *dev_id) | ||
789 | { | ||
790 | struct ldc_channel *lp = dev_id; | ||
791 | unsigned long orig_state, hv_err, flags; | ||
792 | unsigned int event_mask; | ||
793 | |||
794 | spin_lock_irqsave(&lp->lock, flags); | ||
795 | |||
796 | orig_state = lp->chan_state; | ||
797 | hv_err = sun4v_ldc_rx_get_state(lp->id, | ||
798 | &lp->rx_head, | ||
799 | &lp->rx_tail, | ||
800 | &lp->chan_state); | ||
801 | |||
802 | ldcdbg(RX, "RX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n", | ||
803 | orig_state, lp->chan_state, lp->rx_head, lp->rx_tail); | ||
804 | |||
805 | event_mask = 0; | ||
806 | |||
807 | if (lp->cfg.mode == LDC_MODE_RAW && | ||
808 | lp->chan_state == LDC_CHANNEL_UP) { | ||
809 | lp->hs_state = LDC_HS_COMPLETE; | ||
810 | ldc_set_state(lp, LDC_STATE_CONNECTED); | ||
811 | |||
812 | event_mask |= LDC_EVENT_UP; | ||
813 | |||
814 | orig_state = lp->chan_state; | ||
815 | } | ||
816 | |||
817 | /* If we are in reset state, flush the RX queue and ignore | ||
818 | * everything. | ||
819 | */ | ||
820 | if (lp->flags & LDC_FLAG_RESET) { | ||
821 | (void) __set_rx_head(lp, lp->rx_tail); | ||
822 | goto out; | ||
823 | } | ||
824 | |||
825 | /* Once we finish the handshake, we let the ldc_read() | ||
826 | * paths do all of the control frame and state management. | ||
827 | * Just trigger the callback. | ||
828 | */ | ||
829 | if (lp->hs_state == LDC_HS_COMPLETE) { | ||
830 | handshake_complete: | ||
831 | if (lp->chan_state != orig_state) { | ||
832 | unsigned int event = LDC_EVENT_RESET; | ||
833 | |||
834 | if (lp->chan_state == LDC_CHANNEL_UP) | ||
835 | event = LDC_EVENT_UP; | ||
836 | |||
837 | event_mask |= event; | ||
838 | } | ||
839 | if (lp->rx_head != lp->rx_tail) | ||
840 | event_mask |= LDC_EVENT_DATA_READY; | ||
841 | |||
842 | goto out; | ||
843 | } | ||
844 | |||
845 | if (lp->chan_state != orig_state) | ||
846 | goto out; | ||
847 | |||
848 | while (lp->rx_head != lp->rx_tail) { | ||
849 | struct ldc_packet *p; | ||
850 | unsigned long new; | ||
851 | int err; | ||
852 | |||
853 | p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE); | ||
854 | |||
855 | switch (p->type) { | ||
856 | case LDC_CTRL: | ||
857 | err = process_control_frame(lp, p); | ||
858 | if (err > 0) | ||
859 | event_mask |= err; | ||
860 | break; | ||
861 | |||
862 | case LDC_DATA: | ||
863 | event_mask |= LDC_EVENT_DATA_READY; | ||
864 | err = 0; | ||
865 | break; | ||
866 | |||
867 | case LDC_ERR: | ||
868 | err = process_error_frame(lp, p); | ||
869 | break; | ||
870 | |||
871 | default: | ||
872 | err = ldc_abort(lp); | ||
873 | break; | ||
874 | } | ||
875 | |||
876 | if (err < 0) | ||
877 | break; | ||
878 | |||
879 | new = lp->rx_head; | ||
880 | new += LDC_PACKET_SIZE; | ||
881 | if (new == (lp->rx_num_entries * LDC_PACKET_SIZE)) | ||
882 | new = 0; | ||
883 | lp->rx_head = new; | ||
884 | |||
885 | err = __set_rx_head(lp, new); | ||
886 | if (err < 0) { | ||
887 | (void) ldc_abort(lp); | ||
888 | break; | ||
889 | } | ||
890 | if (lp->hs_state == LDC_HS_COMPLETE) | ||
891 | goto handshake_complete; | ||
892 | } | ||
893 | |||
894 | out: | ||
895 | spin_unlock_irqrestore(&lp->lock, flags); | ||
896 | |||
897 | send_events(lp, event_mask); | ||
898 | |||
899 | return IRQ_HANDLED; | ||
900 | } | ||
901 | |||
902 | static irqreturn_t ldc_tx(int irq, void *dev_id) | ||
903 | { | ||
904 | struct ldc_channel *lp = dev_id; | ||
905 | unsigned long flags, hv_err, orig_state; | ||
906 | unsigned int event_mask = 0; | ||
907 | |||
908 | spin_lock_irqsave(&lp->lock, flags); | ||
909 | |||
910 | orig_state = lp->chan_state; | ||
911 | hv_err = sun4v_ldc_tx_get_state(lp->id, | ||
912 | &lp->tx_head, | ||
913 | &lp->tx_tail, | ||
914 | &lp->chan_state); | ||
915 | |||
916 | ldcdbg(TX, " TX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n", | ||
917 | orig_state, lp->chan_state, lp->tx_head, lp->tx_tail); | ||
918 | |||
919 | if (lp->cfg.mode == LDC_MODE_RAW && | ||
920 | lp->chan_state == LDC_CHANNEL_UP) { | ||
921 | lp->hs_state = LDC_HS_COMPLETE; | ||
922 | ldc_set_state(lp, LDC_STATE_CONNECTED); | ||
923 | |||
924 | event_mask |= LDC_EVENT_UP; | ||
925 | } | ||
926 | |||
927 | spin_unlock_irqrestore(&lp->lock, flags); | ||
928 | |||
929 | send_events(lp, event_mask); | ||
930 | |||
931 | return IRQ_HANDLED; | ||
932 | } | ||
933 | |||
934 | /* XXX ldc_alloc() and ldc_free() needs to run under a mutex so | ||
935 | * XXX that addition and removal from the ldc_channel_list has | ||
936 | * XXX atomicity, otherwise the __ldc_channel_exists() check is | ||
937 | * XXX totally pointless as another thread can slip into ldc_alloc() | ||
938 | * XXX and add a channel with the same ID. There also needs to be | ||
939 | * XXX a spinlock for ldc_channel_list. | ||
940 | */ | ||
941 | static HLIST_HEAD(ldc_channel_list); | ||
942 | |||
943 | static int __ldc_channel_exists(unsigned long id) | ||
944 | { | ||
945 | struct ldc_channel *lp; | ||
946 | struct hlist_node *n; | ||
947 | |||
948 | hlist_for_each_entry(lp, n, &ldc_channel_list, list) { | ||
949 | if (lp->id == id) | ||
950 | return 1; | ||
951 | } | ||
952 | return 0; | ||
953 | } | ||
954 | |||
955 | static int alloc_queue(const char *name, unsigned long num_entries, | ||
956 | struct ldc_packet **base, unsigned long *ra) | ||
957 | { | ||
958 | unsigned long size, order; | ||
959 | void *q; | ||
960 | |||
961 | size = num_entries * LDC_PACKET_SIZE; | ||
962 | order = get_order(size); | ||
963 | |||
964 | q = (void *) __get_free_pages(GFP_KERNEL, order); | ||
965 | if (!q) { | ||
966 | printk(KERN_ERR PFX "Alloc of %s queue failed with " | ||
967 | "size=%lu order=%lu\n", name, size, order); | ||
968 | return -ENOMEM; | ||
969 | } | ||
970 | |||
971 | memset(q, 0, PAGE_SIZE << order); | ||
972 | |||
973 | *base = q; | ||
974 | *ra = __pa(q); | ||
975 | |||
976 | return 0; | ||
977 | } | ||
978 | |||
979 | static void free_queue(unsigned long num_entries, struct ldc_packet *q) | ||
980 | { | ||
981 | unsigned long size, order; | ||
982 | |||
983 | if (!q) | ||
984 | return; | ||
985 | |||
986 | size = num_entries * LDC_PACKET_SIZE; | ||
987 | order = get_order(size); | ||
988 | |||
989 | free_pages((unsigned long)q, order); | ||
990 | } | ||
991 | |||
992 | /* XXX Make this configurable... XXX */ | ||
993 | #define LDC_IOTABLE_SIZE (8 * 1024) | ||
994 | |||
995 | static int ldc_iommu_init(struct ldc_channel *lp) | ||
996 | { | ||
997 | unsigned long sz, num_tsb_entries, tsbsize, order; | ||
998 | struct ldc_iommu *iommu = &lp->iommu; | ||
999 | struct ldc_mtable_entry *table; | ||
1000 | unsigned long hv_err; | ||
1001 | int err; | ||
1002 | |||
1003 | num_tsb_entries = LDC_IOTABLE_SIZE; | ||
1004 | tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry); | ||
1005 | |||
1006 | spin_lock_init(&iommu->lock); | ||
1007 | |||
1008 | sz = num_tsb_entries / 8; | ||
1009 | sz = (sz + 7UL) & ~7UL; | ||
1010 | iommu->arena.map = kzalloc(sz, GFP_KERNEL); | ||
1011 | if (!iommu->arena.map) { | ||
1012 | printk(KERN_ERR PFX "Alloc of arena map failed, sz=%lu\n", sz); | ||
1013 | return -ENOMEM; | ||
1014 | } | ||
1015 | |||
1016 | iommu->arena.limit = num_tsb_entries; | ||
1017 | |||
1018 | order = get_order(tsbsize); | ||
1019 | |||
1020 | table = (struct ldc_mtable_entry *) | ||
1021 | __get_free_pages(GFP_KERNEL, order); | ||
1022 | err = -ENOMEM; | ||
1023 | if (!table) { | ||
1024 | printk(KERN_ERR PFX "Alloc of MTE table failed, " | ||
1025 | "size=%lu order=%lu\n", tsbsize, order); | ||
1026 | goto out_free_map; | ||
1027 | } | ||
1028 | |||
1029 | memset(table, 0, PAGE_SIZE << order); | ||
1030 | |||
1031 | iommu->page_table = table; | ||
1032 | |||
1033 | hv_err = sun4v_ldc_set_map_table(lp->id, __pa(table), | ||
1034 | num_tsb_entries); | ||
1035 | err = -EINVAL; | ||
1036 | if (hv_err) | ||
1037 | goto out_free_table; | ||
1038 | |||
1039 | return 0; | ||
1040 | |||
1041 | out_free_table: | ||
1042 | free_pages((unsigned long) table, order); | ||
1043 | iommu->page_table = NULL; | ||
1044 | |||
1045 | out_free_map: | ||
1046 | kfree(iommu->arena.map); | ||
1047 | iommu->arena.map = NULL; | ||
1048 | |||
1049 | return err; | ||
1050 | } | ||
1051 | |||
1052 | static void ldc_iommu_release(struct ldc_channel *lp) | ||
1053 | { | ||
1054 | struct ldc_iommu *iommu = &lp->iommu; | ||
1055 | unsigned long num_tsb_entries, tsbsize, order; | ||
1056 | |||
1057 | (void) sun4v_ldc_set_map_table(lp->id, 0, 0); | ||
1058 | |||
1059 | num_tsb_entries = iommu->arena.limit; | ||
1060 | tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry); | ||
1061 | order = get_order(tsbsize); | ||
1062 | |||
1063 | free_pages((unsigned long) iommu->page_table, order); | ||
1064 | iommu->page_table = NULL; | ||
1065 | |||
1066 | kfree(iommu->arena.map); | ||
1067 | iommu->arena.map = NULL; | ||
1068 | } | ||
1069 | |||
1070 | struct ldc_channel *ldc_alloc(unsigned long id, | ||
1071 | const struct ldc_channel_config *cfgp, | ||
1072 | void *event_arg) | ||
1073 | { | ||
1074 | struct ldc_channel *lp; | ||
1075 | const struct ldc_mode_ops *mops; | ||
1076 | unsigned long dummy1, dummy2, hv_err; | ||
1077 | u8 mss, *mssbuf; | ||
1078 | int err; | ||
1079 | |||
1080 | err = -ENODEV; | ||
1081 | if (!ldom_domaining_enabled) | ||
1082 | goto out_err; | ||
1083 | |||
1084 | err = -EINVAL; | ||
1085 | if (!cfgp) | ||
1086 | goto out_err; | ||
1087 | |||
1088 | switch (cfgp->mode) { | ||
1089 | case LDC_MODE_RAW: | ||
1090 | mops = &raw_ops; | ||
1091 | mss = LDC_PACKET_SIZE; | ||
1092 | break; | ||
1093 | |||
1094 | case LDC_MODE_UNRELIABLE: | ||
1095 | mops = &nonraw_ops; | ||
1096 | mss = LDC_PACKET_SIZE - 8; | ||
1097 | break; | ||
1098 | |||
1099 | case LDC_MODE_STREAM: | ||
1100 | mops = &stream_ops; | ||
1101 | mss = LDC_PACKET_SIZE - 8 - 8; | ||
1102 | break; | ||
1103 | |||
1104 | default: | ||
1105 | goto out_err; | ||
1106 | } | ||
1107 | |||
1108 | if (!cfgp->event || !event_arg || !cfgp->rx_irq || !cfgp->tx_irq) | ||
1109 | goto out_err; | ||
1110 | |||
1111 | hv_err = sun4v_ldc_tx_qinfo(id, &dummy1, &dummy2); | ||
1112 | err = -ENODEV; | ||
1113 | if (hv_err == HV_ECHANNEL) | ||
1114 | goto out_err; | ||
1115 | |||
1116 | err = -EEXIST; | ||
1117 | if (__ldc_channel_exists(id)) | ||
1118 | goto out_err; | ||
1119 | |||
1120 | mssbuf = NULL; | ||
1121 | |||
1122 | lp = kzalloc(sizeof(*lp), GFP_KERNEL); | ||
1123 | err = -ENOMEM; | ||
1124 | if (!lp) | ||
1125 | goto out_err; | ||
1126 | |||
1127 | spin_lock_init(&lp->lock); | ||
1128 | |||
1129 | lp->id = id; | ||
1130 | |||
1131 | err = ldc_iommu_init(lp); | ||
1132 | if (err) | ||
1133 | goto out_free_ldc; | ||
1134 | |||
1135 | lp->mops = mops; | ||
1136 | lp->mss = mss; | ||
1137 | |||
1138 | lp->cfg = *cfgp; | ||
1139 | if (!lp->cfg.mtu) | ||
1140 | lp->cfg.mtu = LDC_DEFAULT_MTU; | ||
1141 | |||
1142 | if (lp->cfg.mode == LDC_MODE_STREAM) { | ||
1143 | mssbuf = kzalloc(lp->cfg.mtu, GFP_KERNEL); | ||
1144 | if (!mssbuf) { | ||
1145 | err = -ENOMEM; | ||
1146 | goto out_free_iommu; | ||
1147 | } | ||
1148 | lp->mssbuf = mssbuf; | ||
1149 | } | ||
1150 | |||
1151 | lp->event_arg = event_arg; | ||
1152 | |||
1153 | /* XXX allow setting via ldc_channel_config to override defaults | ||
1154 | * XXX or use some formula based upon mtu | ||
1155 | */ | ||
1156 | lp->tx_num_entries = LDC_DEFAULT_NUM_ENTRIES; | ||
1157 | lp->rx_num_entries = LDC_DEFAULT_NUM_ENTRIES; | ||
1158 | |||
1159 | err = alloc_queue("TX", lp->tx_num_entries, | ||
1160 | &lp->tx_base, &lp->tx_ra); | ||
1161 | if (err) | ||
1162 | goto out_free_mssbuf; | ||
1163 | |||
1164 | err = alloc_queue("RX", lp->rx_num_entries, | ||
1165 | &lp->rx_base, &lp->rx_ra); | ||
1166 | if (err) | ||
1167 | goto out_free_txq; | ||
1168 | |||
1169 | lp->flags |= LDC_FLAG_ALLOCED_QUEUES; | ||
1170 | |||
1171 | lp->hs_state = LDC_HS_CLOSED; | ||
1172 | ldc_set_state(lp, LDC_STATE_INIT); | ||
1173 | |||
1174 | INIT_HLIST_NODE(&lp->list); | ||
1175 | hlist_add_head(&lp->list, &ldc_channel_list); | ||
1176 | |||
1177 | INIT_HLIST_HEAD(&lp->mh_list); | ||
1178 | |||
1179 | return lp; | ||
1180 | |||
1181 | out_free_txq: | ||
1182 | free_queue(lp->tx_num_entries, lp->tx_base); | ||
1183 | |||
1184 | out_free_mssbuf: | ||
1185 | if (mssbuf) | ||
1186 | kfree(mssbuf); | ||
1187 | |||
1188 | out_free_iommu: | ||
1189 | ldc_iommu_release(lp); | ||
1190 | |||
1191 | out_free_ldc: | ||
1192 | kfree(lp); | ||
1193 | |||
1194 | out_err: | ||
1195 | return ERR_PTR(err); | ||
1196 | } | ||
1197 | EXPORT_SYMBOL(ldc_alloc); | ||
1198 | |||
1199 | void ldc_free(struct ldc_channel *lp) | ||
1200 | { | ||
1201 | if (lp->flags & LDC_FLAG_REGISTERED_IRQS) { | ||
1202 | free_irq(lp->cfg.rx_irq, lp); | ||
1203 | free_irq(lp->cfg.tx_irq, lp); | ||
1204 | } | ||
1205 | |||
1206 | if (lp->flags & LDC_FLAG_REGISTERED_QUEUES) { | ||
1207 | sun4v_ldc_tx_qconf(lp->id, 0, 0); | ||
1208 | sun4v_ldc_rx_qconf(lp->id, 0, 0); | ||
1209 | lp->flags &= ~LDC_FLAG_REGISTERED_QUEUES; | ||
1210 | } | ||
1211 | if (lp->flags & LDC_FLAG_ALLOCED_QUEUES) { | ||
1212 | free_queue(lp->tx_num_entries, lp->tx_base); | ||
1213 | free_queue(lp->rx_num_entries, lp->rx_base); | ||
1214 | lp->flags &= ~LDC_FLAG_ALLOCED_QUEUES; | ||
1215 | } | ||
1216 | |||
1217 | hlist_del(&lp->list); | ||
1218 | |||
1219 | if (lp->mssbuf) | ||
1220 | kfree(lp->mssbuf); | ||
1221 | |||
1222 | ldc_iommu_release(lp); | ||
1223 | |||
1224 | kfree(lp); | ||
1225 | } | ||
1226 | EXPORT_SYMBOL(ldc_free); | ||
1227 | |||
1228 | /* Bind the channel. This registers the LDC queues with | ||
1229 | * the hypervisor and puts the channel into a pseudo-listening | ||
1230 | * state. This does not initiate a handshake, ldc_connect() does | ||
1231 | * that. | ||
1232 | */ | ||
1233 | int ldc_bind(struct ldc_channel *lp, const char *name) | ||
1234 | { | ||
1235 | unsigned long hv_err, flags; | ||
1236 | int err = -EINVAL; | ||
1237 | |||
1238 | if (!name || | ||
1239 | (lp->state != LDC_STATE_INIT)) | ||
1240 | return -EINVAL; | ||
1241 | |||
1242 | snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name); | ||
1243 | snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name); | ||
1244 | |||
1245 | err = request_irq(lp->cfg.rx_irq, ldc_rx, | ||
1246 | IRQF_SAMPLE_RANDOM | IRQF_SHARED, | ||
1247 | lp->rx_irq_name, lp); | ||
1248 | if (err) | ||
1249 | return err; | ||
1250 | |||
1251 | err = request_irq(lp->cfg.tx_irq, ldc_tx, | ||
1252 | IRQF_SAMPLE_RANDOM | IRQF_SHARED, | ||
1253 | lp->tx_irq_name, lp); | ||
1254 | if (err) { | ||
1255 | free_irq(lp->cfg.rx_irq, lp); | ||
1256 | return err; | ||
1257 | } | ||
1258 | |||
1259 | |||
1260 | spin_lock_irqsave(&lp->lock, flags); | ||
1261 | |||
1262 | enable_irq(lp->cfg.rx_irq); | ||
1263 | enable_irq(lp->cfg.tx_irq); | ||
1264 | |||
1265 | lp->flags |= LDC_FLAG_REGISTERED_IRQS; | ||
1266 | |||
1267 | err = -ENODEV; | ||
1268 | hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0); | ||
1269 | if (hv_err) | ||
1270 | goto out_free_irqs; | ||
1271 | |||
1272 | hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries); | ||
1273 | if (hv_err) | ||
1274 | goto out_free_irqs; | ||
1275 | |||
1276 | hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0); | ||
1277 | if (hv_err) | ||
1278 | goto out_unmap_tx; | ||
1279 | |||
1280 | hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries); | ||
1281 | if (hv_err) | ||
1282 | goto out_unmap_tx; | ||
1283 | |||
1284 | lp->flags |= LDC_FLAG_REGISTERED_QUEUES; | ||
1285 | |||
1286 | hv_err = sun4v_ldc_tx_get_state(lp->id, | ||
1287 | &lp->tx_head, | ||
1288 | &lp->tx_tail, | ||
1289 | &lp->chan_state); | ||
1290 | err = -EBUSY; | ||
1291 | if (hv_err) | ||
1292 | goto out_unmap_rx; | ||
1293 | |||
1294 | lp->tx_acked = lp->tx_head; | ||
1295 | |||
1296 | lp->hs_state = LDC_HS_OPEN; | ||
1297 | ldc_set_state(lp, LDC_STATE_BOUND); | ||
1298 | |||
1299 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1300 | |||
1301 | return 0; | ||
1302 | |||
1303 | out_unmap_rx: | ||
1304 | lp->flags &= ~LDC_FLAG_REGISTERED_QUEUES; | ||
1305 | sun4v_ldc_rx_qconf(lp->id, 0, 0); | ||
1306 | |||
1307 | out_unmap_tx: | ||
1308 | sun4v_ldc_tx_qconf(lp->id, 0, 0); | ||
1309 | |||
1310 | out_free_irqs: | ||
1311 | lp->flags &= ~LDC_FLAG_REGISTERED_IRQS; | ||
1312 | free_irq(lp->cfg.tx_irq, lp); | ||
1313 | free_irq(lp->cfg.rx_irq, lp); | ||
1314 | |||
1315 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1316 | |||
1317 | return err; | ||
1318 | } | ||
1319 | EXPORT_SYMBOL(ldc_bind); | ||
1320 | |||
1321 | int ldc_connect(struct ldc_channel *lp) | ||
1322 | { | ||
1323 | unsigned long flags; | ||
1324 | int err; | ||
1325 | |||
1326 | if (lp->cfg.mode == LDC_MODE_RAW) | ||
1327 | return -EINVAL; | ||
1328 | |||
1329 | spin_lock_irqsave(&lp->lock, flags); | ||
1330 | |||
1331 | if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) || | ||
1332 | !(lp->flags & LDC_FLAG_REGISTERED_QUEUES) || | ||
1333 | lp->hs_state != LDC_HS_OPEN) | ||
1334 | err = -EINVAL; | ||
1335 | else | ||
1336 | err = start_handshake(lp); | ||
1337 | |||
1338 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1339 | |||
1340 | return err; | ||
1341 | } | ||
1342 | EXPORT_SYMBOL(ldc_connect); | ||
1343 | |||
1344 | int ldc_disconnect(struct ldc_channel *lp) | ||
1345 | { | ||
1346 | unsigned long hv_err, flags; | ||
1347 | int err; | ||
1348 | |||
1349 | if (lp->cfg.mode == LDC_MODE_RAW) | ||
1350 | return -EINVAL; | ||
1351 | |||
1352 | if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) || | ||
1353 | !(lp->flags & LDC_FLAG_REGISTERED_QUEUES)) | ||
1354 | return -EINVAL; | ||
1355 | |||
1356 | spin_lock_irqsave(&lp->lock, flags); | ||
1357 | |||
1358 | err = -ENODEV; | ||
1359 | hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0); | ||
1360 | if (hv_err) | ||
1361 | goto out_err; | ||
1362 | |||
1363 | hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries); | ||
1364 | if (hv_err) | ||
1365 | goto out_err; | ||
1366 | |||
1367 | hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0); | ||
1368 | if (hv_err) | ||
1369 | goto out_err; | ||
1370 | |||
1371 | hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries); | ||
1372 | if (hv_err) | ||
1373 | goto out_err; | ||
1374 | |||
1375 | ldc_set_state(lp, LDC_STATE_BOUND); | ||
1376 | lp->hs_state = LDC_HS_OPEN; | ||
1377 | lp->flags |= LDC_FLAG_RESET; | ||
1378 | |||
1379 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1380 | |||
1381 | return 0; | ||
1382 | |||
1383 | out_err: | ||
1384 | sun4v_ldc_tx_qconf(lp->id, 0, 0); | ||
1385 | sun4v_ldc_rx_qconf(lp->id, 0, 0); | ||
1386 | free_irq(lp->cfg.tx_irq, lp); | ||
1387 | free_irq(lp->cfg.rx_irq, lp); | ||
1388 | lp->flags &= ~(LDC_FLAG_REGISTERED_IRQS | | ||
1389 | LDC_FLAG_REGISTERED_QUEUES); | ||
1390 | ldc_set_state(lp, LDC_STATE_INIT); | ||
1391 | |||
1392 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1393 | |||
1394 | return err; | ||
1395 | } | ||
1396 | EXPORT_SYMBOL(ldc_disconnect); | ||
1397 | |||
1398 | int ldc_state(struct ldc_channel *lp) | ||
1399 | { | ||
1400 | return lp->state; | ||
1401 | } | ||
1402 | EXPORT_SYMBOL(ldc_state); | ||
1403 | |||
1404 | static int write_raw(struct ldc_channel *lp, const void *buf, unsigned int size) | ||
1405 | { | ||
1406 | struct ldc_packet *p; | ||
1407 | unsigned long new_tail; | ||
1408 | int err; | ||
1409 | |||
1410 | if (size > LDC_PACKET_SIZE) | ||
1411 | return -EMSGSIZE; | ||
1412 | |||
1413 | p = data_get_tx_packet(lp, &new_tail); | ||
1414 | if (!p) | ||
1415 | return -EAGAIN; | ||
1416 | |||
1417 | memcpy(p, buf, size); | ||
1418 | |||
1419 | err = send_tx_packet(lp, p, new_tail); | ||
1420 | if (!err) | ||
1421 | err = size; | ||
1422 | |||
1423 | return err; | ||
1424 | } | ||
1425 | |||
1426 | static int read_raw(struct ldc_channel *lp, void *buf, unsigned int size) | ||
1427 | { | ||
1428 | struct ldc_packet *p; | ||
1429 | unsigned long hv_err, new; | ||
1430 | int err; | ||
1431 | |||
1432 | if (size < LDC_PACKET_SIZE) | ||
1433 | return -EINVAL; | ||
1434 | |||
1435 | hv_err = sun4v_ldc_rx_get_state(lp->id, | ||
1436 | &lp->rx_head, | ||
1437 | &lp->rx_tail, | ||
1438 | &lp->chan_state); | ||
1439 | if (hv_err) | ||
1440 | return ldc_abort(lp); | ||
1441 | |||
1442 | if (lp->chan_state == LDC_CHANNEL_DOWN || | ||
1443 | lp->chan_state == LDC_CHANNEL_RESETTING) | ||
1444 | return -ECONNRESET; | ||
1445 | |||
1446 | if (lp->rx_head == lp->rx_tail) | ||
1447 | return 0; | ||
1448 | |||
1449 | p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE); | ||
1450 | memcpy(buf, p, LDC_PACKET_SIZE); | ||
1451 | |||
1452 | new = rx_advance(lp, lp->rx_head); | ||
1453 | lp->rx_head = new; | ||
1454 | |||
1455 | err = __set_rx_head(lp, new); | ||
1456 | if (err < 0) | ||
1457 | err = -ECONNRESET; | ||
1458 | else | ||
1459 | err = LDC_PACKET_SIZE; | ||
1460 | |||
1461 | return err; | ||
1462 | } | ||
1463 | |||
1464 | static const struct ldc_mode_ops raw_ops = { | ||
1465 | .write = write_raw, | ||
1466 | .read = read_raw, | ||
1467 | }; | ||
1468 | |||
1469 | static int write_nonraw(struct ldc_channel *lp, const void *buf, | ||
1470 | unsigned int size) | ||
1471 | { | ||
1472 | unsigned long hv_err, tail; | ||
1473 | unsigned int copied; | ||
1474 | u32 seq; | ||
1475 | int err; | ||
1476 | |||
1477 | hv_err = sun4v_ldc_tx_get_state(lp->id, &lp->tx_head, &lp->tx_tail, | ||
1478 | &lp->chan_state); | ||
1479 | if (unlikely(hv_err)) | ||
1480 | return -EBUSY; | ||
1481 | |||
1482 | if (unlikely(lp->chan_state != LDC_CHANNEL_UP)) | ||
1483 | return ldc_abort(lp); | ||
1484 | |||
1485 | if (!tx_has_space_for(lp, size)) | ||
1486 | return -EAGAIN; | ||
1487 | |||
1488 | seq = lp->snd_nxt; | ||
1489 | copied = 0; | ||
1490 | tail = lp->tx_tail; | ||
1491 | while (copied < size) { | ||
1492 | struct ldc_packet *p = lp->tx_base + (tail / LDC_PACKET_SIZE); | ||
1493 | u8 *data = ((lp->cfg.mode == LDC_MODE_UNRELIABLE) ? | ||
1494 | p->u.u_data : | ||
1495 | p->u.r.r_data); | ||
1496 | int data_len; | ||
1497 | |||
1498 | p->type = LDC_DATA; | ||
1499 | p->stype = LDC_INFO; | ||
1500 | p->ctrl = 0; | ||
1501 | |||
1502 | data_len = size - copied; | ||
1503 | if (data_len > lp->mss) | ||
1504 | data_len = lp->mss; | ||
1505 | |||
1506 | BUG_ON(data_len > LDC_LEN); | ||
1507 | |||
1508 | p->env = (data_len | | ||
1509 | (copied == 0 ? LDC_START : 0) | | ||
1510 | (data_len == size - copied ? LDC_STOP : 0)); | ||
1511 | |||
1512 | p->seqid = ++seq; | ||
1513 | |||
1514 | ldcdbg(DATA, "SENT DATA [%02x:%02x:%02x:%02x:%08x]\n", | ||
1515 | p->type, | ||
1516 | p->stype, | ||
1517 | p->ctrl, | ||
1518 | p->env, | ||
1519 | p->seqid); | ||
1520 | |||
1521 | memcpy(data, buf, data_len); | ||
1522 | buf += data_len; | ||
1523 | copied += data_len; | ||
1524 | |||
1525 | tail = tx_advance(lp, tail); | ||
1526 | } | ||
1527 | |||
1528 | err = set_tx_tail(lp, tail); | ||
1529 | if (!err) { | ||
1530 | lp->snd_nxt = seq; | ||
1531 | err = size; | ||
1532 | } | ||
1533 | |||
1534 | return err; | ||
1535 | } | ||
1536 | |||
1537 | static int rx_bad_seq(struct ldc_channel *lp, struct ldc_packet *p, | ||
1538 | struct ldc_packet *first_frag) | ||
1539 | { | ||
1540 | int err; | ||
1541 | |||
1542 | if (first_frag) | ||
1543 | lp->rcv_nxt = first_frag->seqid - 1; | ||
1544 | |||
1545 | err = send_data_nack(lp, p); | ||
1546 | if (err) | ||
1547 | return err; | ||
1548 | |||
1549 | err = __set_rx_head(lp, lp->rx_tail); | ||
1550 | if (err < 0) | ||
1551 | return ldc_abort(lp); | ||
1552 | |||
1553 | return 0; | ||
1554 | } | ||
1555 | |||
1556 | static int data_ack_nack(struct ldc_channel *lp, struct ldc_packet *p) | ||
1557 | { | ||
1558 | if (p->stype & LDC_ACK) { | ||
1559 | int err = process_data_ack(lp, p); | ||
1560 | if (err) | ||
1561 | return err; | ||
1562 | } | ||
1563 | if (p->stype & LDC_NACK) | ||
1564 | return ldc_abort(lp); | ||
1565 | |||
1566 | return 0; | ||
1567 | } | ||
1568 | |||
1569 | static int rx_data_wait(struct ldc_channel *lp, unsigned long cur_head) | ||
1570 | { | ||
1571 | unsigned long dummy; | ||
1572 | int limit = 1000; | ||
1573 | |||
1574 | ldcdbg(DATA, "DATA WAIT cur_head[%lx] rx_head[%lx] rx_tail[%lx]\n", | ||
1575 | cur_head, lp->rx_head, lp->rx_tail); | ||
1576 | while (limit-- > 0) { | ||
1577 | unsigned long hv_err; | ||
1578 | |||
1579 | hv_err = sun4v_ldc_rx_get_state(lp->id, | ||
1580 | &dummy, | ||
1581 | &lp->rx_tail, | ||
1582 | &lp->chan_state); | ||
1583 | if (hv_err) | ||
1584 | return ldc_abort(lp); | ||
1585 | |||
1586 | if (lp->chan_state == LDC_CHANNEL_DOWN || | ||
1587 | lp->chan_state == LDC_CHANNEL_RESETTING) | ||
1588 | return -ECONNRESET; | ||
1589 | |||
1590 | if (cur_head != lp->rx_tail) { | ||
1591 | ldcdbg(DATA, "DATA WAIT DONE " | ||
1592 | "head[%lx] tail[%lx] chan_state[%lx]\n", | ||
1593 | dummy, lp->rx_tail, lp->chan_state); | ||
1594 | return 0; | ||
1595 | } | ||
1596 | |||
1597 | udelay(1); | ||
1598 | } | ||
1599 | return -EAGAIN; | ||
1600 | } | ||
1601 | |||
1602 | static int rx_set_head(struct ldc_channel *lp, unsigned long head) | ||
1603 | { | ||
1604 | int err = __set_rx_head(lp, head); | ||
1605 | |||
1606 | if (err < 0) | ||
1607 | return ldc_abort(lp); | ||
1608 | |||
1609 | lp->rx_head = head; | ||
1610 | return 0; | ||
1611 | } | ||
1612 | |||
1613 | static void send_data_ack(struct ldc_channel *lp) | ||
1614 | { | ||
1615 | unsigned long new_tail; | ||
1616 | struct ldc_packet *p; | ||
1617 | |||
1618 | p = data_get_tx_packet(lp, &new_tail); | ||
1619 | if (likely(p)) { | ||
1620 | int err; | ||
1621 | |||
1622 | memset(p, 0, sizeof(*p)); | ||
1623 | p->type = LDC_DATA; | ||
1624 | p->stype = LDC_ACK; | ||
1625 | p->ctrl = 0; | ||
1626 | p->seqid = lp->snd_nxt + 1; | ||
1627 | p->u.r.ackid = lp->rcv_nxt; | ||
1628 | |||
1629 | err = send_tx_packet(lp, p, new_tail); | ||
1630 | if (!err) | ||
1631 | lp->snd_nxt++; | ||
1632 | } | ||
1633 | } | ||
1634 | |||
1635 | static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size) | ||
1636 | { | ||
1637 | struct ldc_packet *first_frag; | ||
1638 | unsigned long hv_err, new; | ||
1639 | int err, copied; | ||
1640 | |||
1641 | hv_err = sun4v_ldc_rx_get_state(lp->id, | ||
1642 | &lp->rx_head, | ||
1643 | &lp->rx_tail, | ||
1644 | &lp->chan_state); | ||
1645 | if (hv_err) | ||
1646 | return ldc_abort(lp); | ||
1647 | |||
1648 | if (lp->chan_state == LDC_CHANNEL_DOWN || | ||
1649 | lp->chan_state == LDC_CHANNEL_RESETTING) | ||
1650 | return -ECONNRESET; | ||
1651 | |||
1652 | if (lp->rx_head == lp->rx_tail) | ||
1653 | return 0; | ||
1654 | |||
1655 | first_frag = NULL; | ||
1656 | copied = err = 0; | ||
1657 | new = lp->rx_head; | ||
1658 | while (1) { | ||
1659 | struct ldc_packet *p; | ||
1660 | int pkt_len; | ||
1661 | |||
1662 | BUG_ON(new == lp->rx_tail); | ||
1663 | p = lp->rx_base + (new / LDC_PACKET_SIZE); | ||
1664 | |||
1665 | ldcdbg(RX, "RX read pkt[%02x:%02x:%02x:%02x:%08x:%08x] " | ||
1666 | "rcv_nxt[%08x]\n", | ||
1667 | p->type, | ||
1668 | p->stype, | ||
1669 | p->ctrl, | ||
1670 | p->env, | ||
1671 | p->seqid, | ||
1672 | p->u.r.ackid, | ||
1673 | lp->rcv_nxt); | ||
1674 | |||
1675 | if (unlikely(!rx_seq_ok(lp, p->seqid))) { | ||
1676 | err = rx_bad_seq(lp, p, first_frag); | ||
1677 | copied = 0; | ||
1678 | break; | ||
1679 | } | ||
1680 | |||
1681 | if (p->type & LDC_CTRL) { | ||
1682 | err = process_control_frame(lp, p); | ||
1683 | if (err < 0) | ||
1684 | break; | ||
1685 | err = 0; | ||
1686 | } | ||
1687 | |||
1688 | lp->rcv_nxt = p->seqid; | ||
1689 | |||
1690 | if (!(p->type & LDC_DATA)) { | ||
1691 | new = rx_advance(lp, new); | ||
1692 | goto no_data; | ||
1693 | } | ||
1694 | if (p->stype & (LDC_ACK | LDC_NACK)) { | ||
1695 | err = data_ack_nack(lp, p); | ||
1696 | if (err) | ||
1697 | break; | ||
1698 | } | ||
1699 | if (!(p->stype & LDC_INFO)) { | ||
1700 | new = rx_advance(lp, new); | ||
1701 | err = rx_set_head(lp, new); | ||
1702 | if (err) | ||
1703 | break; | ||
1704 | goto no_data; | ||
1705 | } | ||
1706 | |||
1707 | pkt_len = p->env & LDC_LEN; | ||
1708 | |||
1709 | /* Every initial packet starts with the START bit set. | ||
1710 | * | ||
1711 | * Singleton packets will have both START+STOP set. | ||
1712 | * | ||
1713 | * Fragments will have START set in the first frame, STOP | ||
1714 | * set in the last frame, and neither bit set in middle | ||
1715 | * frames of the packet. | ||
1716 | * | ||
1717 | * Therefore if we are at the beginning of a packet and | ||
1718 | * we don't see START, or we are in the middle of a fragmented | ||
1719 | * packet and do see START, we are unsynchronized and should | ||
1720 | * flush the RX queue. | ||
1721 | */ | ||
1722 | if ((first_frag == NULL && !(p->env & LDC_START)) || | ||
1723 | (first_frag != NULL && (p->env & LDC_START))) { | ||
1724 | if (!first_frag) | ||
1725 | new = rx_advance(lp, new); | ||
1726 | |||
1727 | err = rx_set_head(lp, new); | ||
1728 | if (err) | ||
1729 | break; | ||
1730 | |||
1731 | if (!first_frag) | ||
1732 | goto no_data; | ||
1733 | } | ||
1734 | if (!first_frag) | ||
1735 | first_frag = p; | ||
1736 | |||
1737 | if (pkt_len > size - copied) { | ||
1738 | /* User didn't give us a big enough buffer, | ||
1739 | * what to do? This is a pretty serious error. | ||
1740 | * | ||
1741 | * Since we haven't updated the RX ring head to | ||
1742 | * consume any of the packets, signal the error | ||
1743 | * to the user and just leave the RX ring alone. | ||
1744 | * | ||
1745 | * This seems the best behavior because this allows | ||
1746 | * a user of the LDC layer to start with a small | ||
1747 | * RX buffer for ldc_read() calls and use -EMSGSIZE | ||
1748 | * as a cue to enlarge it's read buffer. | ||
1749 | */ | ||
1750 | err = -EMSGSIZE; | ||
1751 | break; | ||
1752 | } | ||
1753 | |||
1754 | /* Ok, we are gonna eat this one. */ | ||
1755 | new = rx_advance(lp, new); | ||
1756 | |||
1757 | memcpy(buf, | ||
1758 | (lp->cfg.mode == LDC_MODE_UNRELIABLE ? | ||
1759 | p->u.u_data : p->u.r.r_data), pkt_len); | ||
1760 | buf += pkt_len; | ||
1761 | copied += pkt_len; | ||
1762 | |||
1763 | if (p->env & LDC_STOP) | ||
1764 | break; | ||
1765 | |||
1766 | no_data: | ||
1767 | if (new == lp->rx_tail) { | ||
1768 | err = rx_data_wait(lp, new); | ||
1769 | if (err) | ||
1770 | break; | ||
1771 | } | ||
1772 | } | ||
1773 | |||
1774 | if (!err) | ||
1775 | err = rx_set_head(lp, new); | ||
1776 | |||
1777 | if (err && first_frag) | ||
1778 | lp->rcv_nxt = first_frag->seqid - 1; | ||
1779 | |||
1780 | if (!err) { | ||
1781 | err = copied; | ||
1782 | if (err > 0 && lp->cfg.mode != LDC_MODE_UNRELIABLE) | ||
1783 | send_data_ack(lp); | ||
1784 | } | ||
1785 | |||
1786 | return err; | ||
1787 | } | ||
1788 | |||
1789 | static const struct ldc_mode_ops nonraw_ops = { | ||
1790 | .write = write_nonraw, | ||
1791 | .read = read_nonraw, | ||
1792 | }; | ||
1793 | |||
1794 | static int write_stream(struct ldc_channel *lp, const void *buf, | ||
1795 | unsigned int size) | ||
1796 | { | ||
1797 | if (size > lp->cfg.mtu) | ||
1798 | size = lp->cfg.mtu; | ||
1799 | return write_nonraw(lp, buf, size); | ||
1800 | } | ||
1801 | |||
1802 | static int read_stream(struct ldc_channel *lp, void *buf, unsigned int size) | ||
1803 | { | ||
1804 | if (!lp->mssbuf_len) { | ||
1805 | int err = read_nonraw(lp, lp->mssbuf, lp->cfg.mtu); | ||
1806 | if (err < 0) | ||
1807 | return err; | ||
1808 | |||
1809 | lp->mssbuf_len = err; | ||
1810 | lp->mssbuf_off = 0; | ||
1811 | } | ||
1812 | |||
1813 | if (size > lp->mssbuf_len) | ||
1814 | size = lp->mssbuf_len; | ||
1815 | memcpy(buf, lp->mssbuf + lp->mssbuf_off, size); | ||
1816 | |||
1817 | lp->mssbuf_off += size; | ||
1818 | lp->mssbuf_len -= size; | ||
1819 | |||
1820 | return size; | ||
1821 | } | ||
1822 | |||
1823 | static const struct ldc_mode_ops stream_ops = { | ||
1824 | .write = write_stream, | ||
1825 | .read = read_stream, | ||
1826 | }; | ||
1827 | |||
1828 | int ldc_write(struct ldc_channel *lp, const void *buf, unsigned int size) | ||
1829 | { | ||
1830 | unsigned long flags; | ||
1831 | int err; | ||
1832 | |||
1833 | if (!buf) | ||
1834 | return -EINVAL; | ||
1835 | |||
1836 | if (!size) | ||
1837 | return 0; | ||
1838 | |||
1839 | spin_lock_irqsave(&lp->lock, flags); | ||
1840 | |||
1841 | if (lp->hs_state != LDC_HS_COMPLETE) | ||
1842 | err = -ENOTCONN; | ||
1843 | else | ||
1844 | err = lp->mops->write(lp, buf, size); | ||
1845 | |||
1846 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1847 | |||
1848 | return err; | ||
1849 | } | ||
1850 | EXPORT_SYMBOL(ldc_write); | ||
1851 | |||
1852 | int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size) | ||
1853 | { | ||
1854 | unsigned long flags; | ||
1855 | int err; | ||
1856 | |||
1857 | if (!buf) | ||
1858 | return -EINVAL; | ||
1859 | |||
1860 | if (!size) | ||
1861 | return 0; | ||
1862 | |||
1863 | spin_lock_irqsave(&lp->lock, flags); | ||
1864 | |||
1865 | if (lp->hs_state != LDC_HS_COMPLETE) | ||
1866 | err = -ENOTCONN; | ||
1867 | else | ||
1868 | err = lp->mops->read(lp, buf, size); | ||
1869 | |||
1870 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1871 | |||
1872 | return err; | ||
1873 | } | ||
1874 | EXPORT_SYMBOL(ldc_read); | ||
1875 | |||
1876 | static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages) | ||
1877 | { | ||
1878 | struct iommu_arena *arena = &iommu->arena; | ||
1879 | unsigned long n, i, start, end, limit; | ||
1880 | int pass; | ||
1881 | |||
1882 | limit = arena->limit; | ||
1883 | start = arena->hint; | ||
1884 | pass = 0; | ||
1885 | |||
1886 | again: | ||
1887 | n = find_next_zero_bit(arena->map, limit, start); | ||
1888 | end = n + npages; | ||
1889 | if (unlikely(end >= limit)) { | ||
1890 | if (likely(pass < 1)) { | ||
1891 | limit = start; | ||
1892 | start = 0; | ||
1893 | pass++; | ||
1894 | goto again; | ||
1895 | } else { | ||
1896 | /* Scanned the whole thing, give up. */ | ||
1897 | return -1; | ||
1898 | } | ||
1899 | } | ||
1900 | |||
1901 | for (i = n; i < end; i++) { | ||
1902 | if (test_bit(i, arena->map)) { | ||
1903 | start = i + 1; | ||
1904 | goto again; | ||
1905 | } | ||
1906 | } | ||
1907 | |||
1908 | for (i = n; i < end; i++) | ||
1909 | __set_bit(i, arena->map); | ||
1910 | |||
1911 | arena->hint = end; | ||
1912 | |||
1913 | return n; | ||
1914 | } | ||
1915 | |||
1916 | #define COOKIE_PGSZ_CODE 0xf000000000000000ULL | ||
1917 | #define COOKIE_PGSZ_CODE_SHIFT 60ULL | ||
1918 | |||
1919 | static u64 pagesize_code(void) | ||
1920 | { | ||
1921 | switch (PAGE_SIZE) { | ||
1922 | default: | ||
1923 | case (8ULL * 1024ULL): | ||
1924 | return 0; | ||
1925 | case (64ULL * 1024ULL): | ||
1926 | return 1; | ||
1927 | case (512ULL * 1024ULL): | ||
1928 | return 2; | ||
1929 | case (4ULL * 1024ULL * 1024ULL): | ||
1930 | return 3; | ||
1931 | case (32ULL * 1024ULL * 1024ULL): | ||
1932 | return 4; | ||
1933 | case (256ULL * 1024ULL * 1024ULL): | ||
1934 | return 5; | ||
1935 | } | ||
1936 | } | ||
1937 | |||
1938 | static u64 make_cookie(u64 index, u64 pgsz_code, u64 page_offset) | ||
1939 | { | ||
1940 | return ((pgsz_code << COOKIE_PGSZ_CODE_SHIFT) | | ||
1941 | (index << PAGE_SHIFT) | | ||
1942 | page_offset); | ||
1943 | } | ||
1944 | |||
1945 | static u64 cookie_to_index(u64 cookie, unsigned long *shift) | ||
1946 | { | ||
1947 | u64 szcode = cookie >> COOKIE_PGSZ_CODE_SHIFT; | ||
1948 | |||
1949 | cookie &= ~COOKIE_PGSZ_CODE; | ||
1950 | |||
1951 | *shift = szcode * 3; | ||
1952 | |||
1953 | return (cookie >> (13ULL + (szcode * 3ULL))); | ||
1954 | } | ||
1955 | |||
1956 | static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu, | ||
1957 | unsigned long npages) | ||
1958 | { | ||
1959 | long entry; | ||
1960 | |||
1961 | entry = arena_alloc(iommu, npages); | ||
1962 | if (unlikely(entry < 0)) | ||
1963 | return NULL; | ||
1964 | |||
1965 | return iommu->page_table + entry; | ||
1966 | } | ||
1967 | |||
1968 | static u64 perm_to_mte(unsigned int map_perm) | ||
1969 | { | ||
1970 | u64 mte_base; | ||
1971 | |||
1972 | mte_base = pagesize_code(); | ||
1973 | |||
1974 | if (map_perm & LDC_MAP_SHADOW) { | ||
1975 | if (map_perm & LDC_MAP_R) | ||
1976 | mte_base |= LDC_MTE_COPY_R; | ||
1977 | if (map_perm & LDC_MAP_W) | ||
1978 | mte_base |= LDC_MTE_COPY_W; | ||
1979 | } | ||
1980 | if (map_perm & LDC_MAP_DIRECT) { | ||
1981 | if (map_perm & LDC_MAP_R) | ||
1982 | mte_base |= LDC_MTE_READ; | ||
1983 | if (map_perm & LDC_MAP_W) | ||
1984 | mte_base |= LDC_MTE_WRITE; | ||
1985 | if (map_perm & LDC_MAP_X) | ||
1986 | mte_base |= LDC_MTE_EXEC; | ||
1987 | } | ||
1988 | if (map_perm & LDC_MAP_IO) { | ||
1989 | if (map_perm & LDC_MAP_R) | ||
1990 | mte_base |= LDC_MTE_IOMMU_R; | ||
1991 | if (map_perm & LDC_MAP_W) | ||
1992 | mte_base |= LDC_MTE_IOMMU_W; | ||
1993 | } | ||
1994 | |||
1995 | return mte_base; | ||
1996 | } | ||
1997 | |||
1998 | static int pages_in_region(unsigned long base, long len) | ||
1999 | { | ||
2000 | int count = 0; | ||
2001 | |||
2002 | do { | ||
2003 | unsigned long new = (base + PAGE_SIZE) & PAGE_MASK; | ||
2004 | |||
2005 | len -= (new - base); | ||
2006 | base = new; | ||
2007 | count++; | ||
2008 | } while (len > 0); | ||
2009 | |||
2010 | return count; | ||
2011 | } | ||
2012 | |||
2013 | struct cookie_state { | ||
2014 | struct ldc_mtable_entry *page_table; | ||
2015 | struct ldc_trans_cookie *cookies; | ||
2016 | u64 mte_base; | ||
2017 | u64 prev_cookie; | ||
2018 | u32 pte_idx; | ||
2019 | u32 nc; | ||
2020 | }; | ||
2021 | |||
2022 | static void fill_cookies(struct cookie_state *sp, unsigned long pa, | ||
2023 | unsigned long off, unsigned long len) | ||
2024 | { | ||
2025 | do { | ||
2026 | unsigned long tlen, new = pa + PAGE_SIZE; | ||
2027 | u64 this_cookie; | ||
2028 | |||
2029 | sp->page_table[sp->pte_idx].mte = sp->mte_base | pa; | ||
2030 | |||
2031 | tlen = PAGE_SIZE; | ||
2032 | if (off) | ||
2033 | tlen = PAGE_SIZE - off; | ||
2034 | if (tlen > len) | ||
2035 | tlen = len; | ||
2036 | |||
2037 | this_cookie = make_cookie(sp->pte_idx, | ||
2038 | pagesize_code(), off); | ||
2039 | |||
2040 | off = 0; | ||
2041 | |||
2042 | if (this_cookie == sp->prev_cookie) { | ||
2043 | sp->cookies[sp->nc - 1].cookie_size += tlen; | ||
2044 | } else { | ||
2045 | sp->cookies[sp->nc].cookie_addr = this_cookie; | ||
2046 | sp->cookies[sp->nc].cookie_size = tlen; | ||
2047 | sp->nc++; | ||
2048 | } | ||
2049 | sp->prev_cookie = this_cookie + tlen; | ||
2050 | |||
2051 | sp->pte_idx++; | ||
2052 | |||
2053 | len -= tlen; | ||
2054 | pa = new; | ||
2055 | } while (len > 0); | ||
2056 | } | ||
2057 | |||
2058 | static int sg_count_one(struct scatterlist *sg) | ||
2059 | { | ||
2060 | unsigned long base = page_to_pfn(sg_page(sg)) << PAGE_SHIFT; | ||
2061 | long len = sg->length; | ||
2062 | |||
2063 | if ((sg->offset | len) & (8UL - 1)) | ||
2064 | return -EFAULT; | ||
2065 | |||
2066 | return pages_in_region(base + sg->offset, len); | ||
2067 | } | ||
2068 | |||
2069 | static int sg_count_pages(struct scatterlist *sg, int num_sg) | ||
2070 | { | ||
2071 | int count; | ||
2072 | int i; | ||
2073 | |||
2074 | count = 0; | ||
2075 | for (i = 0; i < num_sg; i++) { | ||
2076 | int err = sg_count_one(sg + i); | ||
2077 | if (err < 0) | ||
2078 | return err; | ||
2079 | count += err; | ||
2080 | } | ||
2081 | |||
2082 | return count; | ||
2083 | } | ||
2084 | |||
2085 | int ldc_map_sg(struct ldc_channel *lp, | ||
2086 | struct scatterlist *sg, int num_sg, | ||
2087 | struct ldc_trans_cookie *cookies, int ncookies, | ||
2088 | unsigned int map_perm) | ||
2089 | { | ||
2090 | unsigned long i, npages, flags; | ||
2091 | struct ldc_mtable_entry *base; | ||
2092 | struct cookie_state state; | ||
2093 | struct ldc_iommu *iommu; | ||
2094 | int err; | ||
2095 | |||
2096 | if (map_perm & ~LDC_MAP_ALL) | ||
2097 | return -EINVAL; | ||
2098 | |||
2099 | err = sg_count_pages(sg, num_sg); | ||
2100 | if (err < 0) | ||
2101 | return err; | ||
2102 | |||
2103 | npages = err; | ||
2104 | if (err > ncookies) | ||
2105 | return -EMSGSIZE; | ||
2106 | |||
2107 | iommu = &lp->iommu; | ||
2108 | |||
2109 | spin_lock_irqsave(&iommu->lock, flags); | ||
2110 | base = alloc_npages(iommu, npages); | ||
2111 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
2112 | |||
2113 | if (!base) | ||
2114 | return -ENOMEM; | ||
2115 | |||
2116 | state.page_table = iommu->page_table; | ||
2117 | state.cookies = cookies; | ||
2118 | state.mte_base = perm_to_mte(map_perm); | ||
2119 | state.prev_cookie = ~(u64)0; | ||
2120 | state.pte_idx = (base - iommu->page_table); | ||
2121 | state.nc = 0; | ||
2122 | |||
2123 | for (i = 0; i < num_sg; i++) | ||
2124 | fill_cookies(&state, page_to_pfn(sg_page(&sg[i])) << PAGE_SHIFT, | ||
2125 | sg[i].offset, sg[i].length); | ||
2126 | |||
2127 | return state.nc; | ||
2128 | } | ||
2129 | EXPORT_SYMBOL(ldc_map_sg); | ||
2130 | |||
2131 | int ldc_map_single(struct ldc_channel *lp, | ||
2132 | void *buf, unsigned int len, | ||
2133 | struct ldc_trans_cookie *cookies, int ncookies, | ||
2134 | unsigned int map_perm) | ||
2135 | { | ||
2136 | unsigned long npages, pa, flags; | ||
2137 | struct ldc_mtable_entry *base; | ||
2138 | struct cookie_state state; | ||
2139 | struct ldc_iommu *iommu; | ||
2140 | |||
2141 | if ((map_perm & ~LDC_MAP_ALL) || (ncookies < 1)) | ||
2142 | return -EINVAL; | ||
2143 | |||
2144 | pa = __pa(buf); | ||
2145 | if ((pa | len) & (8UL - 1)) | ||
2146 | return -EFAULT; | ||
2147 | |||
2148 | npages = pages_in_region(pa, len); | ||
2149 | |||
2150 | iommu = &lp->iommu; | ||
2151 | |||
2152 | spin_lock_irqsave(&iommu->lock, flags); | ||
2153 | base = alloc_npages(iommu, npages); | ||
2154 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
2155 | |||
2156 | if (!base) | ||
2157 | return -ENOMEM; | ||
2158 | |||
2159 | state.page_table = iommu->page_table; | ||
2160 | state.cookies = cookies; | ||
2161 | state.mte_base = perm_to_mte(map_perm); | ||
2162 | state.prev_cookie = ~(u64)0; | ||
2163 | state.pte_idx = (base - iommu->page_table); | ||
2164 | state.nc = 0; | ||
2165 | fill_cookies(&state, (pa & PAGE_MASK), (pa & ~PAGE_MASK), len); | ||
2166 | BUG_ON(state.nc != 1); | ||
2167 | |||
2168 | return state.nc; | ||
2169 | } | ||
2170 | EXPORT_SYMBOL(ldc_map_single); | ||
2171 | |||
2172 | static void free_npages(unsigned long id, struct ldc_iommu *iommu, | ||
2173 | u64 cookie, u64 size) | ||
2174 | { | ||
2175 | struct iommu_arena *arena = &iommu->arena; | ||
2176 | unsigned long i, shift, index, npages; | ||
2177 | struct ldc_mtable_entry *base; | ||
2178 | |||
2179 | npages = PAGE_ALIGN(((cookie & ~PAGE_MASK) + size)) >> PAGE_SHIFT; | ||
2180 | index = cookie_to_index(cookie, &shift); | ||
2181 | base = iommu->page_table + index; | ||
2182 | |||
2183 | BUG_ON(index > arena->limit || | ||
2184 | (index + npages) > arena->limit); | ||
2185 | |||
2186 | for (i = 0; i < npages; i++) { | ||
2187 | if (base->cookie) | ||
2188 | sun4v_ldc_revoke(id, cookie + (i << shift), | ||
2189 | base->cookie); | ||
2190 | base->mte = 0; | ||
2191 | __clear_bit(index + i, arena->map); | ||
2192 | } | ||
2193 | } | ||
2194 | |||
2195 | void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies, | ||
2196 | int ncookies) | ||
2197 | { | ||
2198 | struct ldc_iommu *iommu = &lp->iommu; | ||
2199 | unsigned long flags; | ||
2200 | int i; | ||
2201 | |||
2202 | spin_lock_irqsave(&iommu->lock, flags); | ||
2203 | for (i = 0; i < ncookies; i++) { | ||
2204 | u64 addr = cookies[i].cookie_addr; | ||
2205 | u64 size = cookies[i].cookie_size; | ||
2206 | |||
2207 | free_npages(lp->id, iommu, addr, size); | ||
2208 | } | ||
2209 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
2210 | } | ||
2211 | EXPORT_SYMBOL(ldc_unmap); | ||
2212 | |||
2213 | int ldc_copy(struct ldc_channel *lp, int copy_dir, | ||
2214 | void *buf, unsigned int len, unsigned long offset, | ||
2215 | struct ldc_trans_cookie *cookies, int ncookies) | ||
2216 | { | ||
2217 | unsigned int orig_len; | ||
2218 | unsigned long ra; | ||
2219 | int i; | ||
2220 | |||
2221 | if (copy_dir != LDC_COPY_IN && copy_dir != LDC_COPY_OUT) { | ||
2222 | printk(KERN_ERR PFX "ldc_copy: ID[%lu] Bad copy_dir[%d]\n", | ||
2223 | lp->id, copy_dir); | ||
2224 | return -EINVAL; | ||
2225 | } | ||
2226 | |||
2227 | ra = __pa(buf); | ||
2228 | if ((ra | len | offset) & (8UL - 1)) { | ||
2229 | printk(KERN_ERR PFX "ldc_copy: ID[%lu] Unaligned buffer " | ||
2230 | "ra[%lx] len[%x] offset[%lx]\n", | ||
2231 | lp->id, ra, len, offset); | ||
2232 | return -EFAULT; | ||
2233 | } | ||
2234 | |||
2235 | if (lp->hs_state != LDC_HS_COMPLETE || | ||
2236 | (lp->flags & LDC_FLAG_RESET)) { | ||
2237 | printk(KERN_ERR PFX "ldc_copy: ID[%lu] Link down hs_state[%x] " | ||
2238 | "flags[%x]\n", lp->id, lp->hs_state, lp->flags); | ||
2239 | return -ECONNRESET; | ||
2240 | } | ||
2241 | |||
2242 | orig_len = len; | ||
2243 | for (i = 0; i < ncookies; i++) { | ||
2244 | unsigned long cookie_raddr = cookies[i].cookie_addr; | ||
2245 | unsigned long this_len = cookies[i].cookie_size; | ||
2246 | unsigned long actual_len; | ||
2247 | |||
2248 | if (unlikely(offset)) { | ||
2249 | unsigned long this_off = offset; | ||
2250 | |||
2251 | if (this_off > this_len) | ||
2252 | this_off = this_len; | ||
2253 | |||
2254 | offset -= this_off; | ||
2255 | this_len -= this_off; | ||
2256 | if (!this_len) | ||
2257 | continue; | ||
2258 | cookie_raddr += this_off; | ||
2259 | } | ||
2260 | |||
2261 | if (this_len > len) | ||
2262 | this_len = len; | ||
2263 | |||
2264 | while (1) { | ||
2265 | unsigned long hv_err; | ||
2266 | |||
2267 | hv_err = sun4v_ldc_copy(lp->id, copy_dir, | ||
2268 | cookie_raddr, ra, | ||
2269 | this_len, &actual_len); | ||
2270 | if (unlikely(hv_err)) { | ||
2271 | printk(KERN_ERR PFX "ldc_copy: ID[%lu] " | ||
2272 | "HV error %lu\n", | ||
2273 | lp->id, hv_err); | ||
2274 | if (lp->hs_state != LDC_HS_COMPLETE || | ||
2275 | (lp->flags & LDC_FLAG_RESET)) | ||
2276 | return -ECONNRESET; | ||
2277 | else | ||
2278 | return -EFAULT; | ||
2279 | } | ||
2280 | |||
2281 | cookie_raddr += actual_len; | ||
2282 | ra += actual_len; | ||
2283 | len -= actual_len; | ||
2284 | if (actual_len == this_len) | ||
2285 | break; | ||
2286 | |||
2287 | this_len -= actual_len; | ||
2288 | } | ||
2289 | |||
2290 | if (!len) | ||
2291 | break; | ||
2292 | } | ||
2293 | |||
2294 | /* It is caller policy what to do about short copies. | ||
2295 | * For example, a networking driver can declare the | ||
2296 | * packet a runt and drop it. | ||
2297 | */ | ||
2298 | |||
2299 | return orig_len - len; | ||
2300 | } | ||
2301 | EXPORT_SYMBOL(ldc_copy); | ||
2302 | |||
2303 | void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len, | ||
2304 | struct ldc_trans_cookie *cookies, int *ncookies, | ||
2305 | unsigned int map_perm) | ||
2306 | { | ||
2307 | void *buf; | ||
2308 | int err; | ||
2309 | |||
2310 | if (len & (8UL - 1)) | ||
2311 | return ERR_PTR(-EINVAL); | ||
2312 | |||
2313 | buf = kzalloc(len, GFP_KERNEL); | ||
2314 | if (!buf) | ||
2315 | return ERR_PTR(-ENOMEM); | ||
2316 | |||
2317 | err = ldc_map_single(lp, buf, len, cookies, *ncookies, map_perm); | ||
2318 | if (err < 0) { | ||
2319 | kfree(buf); | ||
2320 | return ERR_PTR(err); | ||
2321 | } | ||
2322 | *ncookies = err; | ||
2323 | |||
2324 | return buf; | ||
2325 | } | ||
2326 | EXPORT_SYMBOL(ldc_alloc_exp_dring); | ||
2327 | |||
2328 | void ldc_free_exp_dring(struct ldc_channel *lp, void *buf, unsigned int len, | ||
2329 | struct ldc_trans_cookie *cookies, int ncookies) | ||
2330 | { | ||
2331 | ldc_unmap(lp, cookies, ncookies); | ||
2332 | kfree(buf); | ||
2333 | } | ||
2334 | EXPORT_SYMBOL(ldc_free_exp_dring); | ||
2335 | |||
2336 | static int __init ldc_init(void) | ||
2337 | { | ||
2338 | unsigned long major, minor; | ||
2339 | struct mdesc_handle *hp; | ||
2340 | const u64 *v; | ||
2341 | int err; | ||
2342 | u64 mp; | ||
2343 | |||
2344 | hp = mdesc_grab(); | ||
2345 | if (!hp) | ||
2346 | return -ENODEV; | ||
2347 | |||
2348 | mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform"); | ||
2349 | err = -ENODEV; | ||
2350 | if (mp == MDESC_NODE_NULL) | ||
2351 | goto out; | ||
2352 | |||
2353 | v = mdesc_get_property(hp, mp, "domaining-enabled", NULL); | ||
2354 | if (!v) | ||
2355 | goto out; | ||
2356 | |||
2357 | major = 1; | ||
2358 | minor = 0; | ||
2359 | if (sun4v_hvapi_register(HV_GRP_LDOM, major, &minor)) { | ||
2360 | printk(KERN_INFO PFX "Could not register LDOM hvapi.\n"); | ||
2361 | goto out; | ||
2362 | } | ||
2363 | |||
2364 | printk(KERN_INFO "%s", version); | ||
2365 | |||
2366 | if (!*v) { | ||
2367 | printk(KERN_INFO PFX "Domaining disabled.\n"); | ||
2368 | goto out; | ||
2369 | } | ||
2370 | ldom_domaining_enabled = 1; | ||
2371 | err = 0; | ||
2372 | |||
2373 | out: | ||
2374 | mdesc_release(hp); | ||
2375 | return err; | ||
2376 | } | ||
2377 | |||
2378 | core_initcall(ldc_init); | ||
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c new file mode 100644 index 000000000000..dde52bcf5c64 --- /dev/null +++ b/arch/sparc/kernel/mdesc.c | |||
@@ -0,0 +1,916 @@ | |||
1 | /* mdesc.c: Sun4V machine description handling. | ||
2 | * | ||
3 | * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/types.h> | ||
7 | #include <linux/lmb.h> | ||
8 | #include <linux/log2.h> | ||
9 | #include <linux/list.h> | ||
10 | #include <linux/slab.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/miscdevice.h> | ||
13 | |||
14 | #include <asm/hypervisor.h> | ||
15 | #include <asm/mdesc.h> | ||
16 | #include <asm/prom.h> | ||
17 | #include <asm/oplib.h> | ||
18 | #include <asm/smp.h> | ||
19 | |||
20 | /* Unlike the OBP device tree, the machine description is a full-on | ||
21 | * DAG. An arbitrary number of ARCs are possible from one | ||
22 | * node to other nodes and thus we can't use the OBP device_node | ||
23 | * data structure to represent these nodes inside of the kernel. | ||
24 | * | ||
25 | * Actually, it isn't even a DAG, because there are back pointers | ||
26 | * which create cycles in the graph. | ||
27 | * | ||
28 | * mdesc_hdr and mdesc_elem describe the layout of the data structure | ||
29 | * we get from the Hypervisor. | ||
30 | */ | ||
31 | struct mdesc_hdr { | ||
32 | u32 version; /* Transport version */ | ||
33 | u32 node_sz; /* node block size */ | ||
34 | u32 name_sz; /* name block size */ | ||
35 | u32 data_sz; /* data block size */ | ||
36 | } __attribute__((aligned(16))); | ||
37 | |||
38 | struct mdesc_elem { | ||
39 | u8 tag; | ||
40 | #define MD_LIST_END 0x00 | ||
41 | #define MD_NODE 0x4e | ||
42 | #define MD_NODE_END 0x45 | ||
43 | #define MD_NOOP 0x20 | ||
44 | #define MD_PROP_ARC 0x61 | ||
45 | #define MD_PROP_VAL 0x76 | ||
46 | #define MD_PROP_STR 0x73 | ||
47 | #define MD_PROP_DATA 0x64 | ||
48 | u8 name_len; | ||
49 | u16 resv; | ||
50 | u32 name_offset; | ||
51 | union { | ||
52 | struct { | ||
53 | u32 data_len; | ||
54 | u32 data_offset; | ||
55 | } data; | ||
56 | u64 val; | ||
57 | } d; | ||
58 | }; | ||
59 | |||
60 | struct mdesc_mem_ops { | ||
61 | struct mdesc_handle *(*alloc)(unsigned int mdesc_size); | ||
62 | void (*free)(struct mdesc_handle *handle); | ||
63 | }; | ||
64 | |||
65 | struct mdesc_handle { | ||
66 | struct list_head list; | ||
67 | struct mdesc_mem_ops *mops; | ||
68 | void *self_base; | ||
69 | atomic_t refcnt; | ||
70 | unsigned int handle_size; | ||
71 | struct mdesc_hdr mdesc; | ||
72 | }; | ||
73 | |||
74 | static void mdesc_handle_init(struct mdesc_handle *hp, | ||
75 | unsigned int handle_size, | ||
76 | void *base) | ||
77 | { | ||
78 | BUG_ON(((unsigned long)&hp->mdesc) & (16UL - 1)); | ||
79 | |||
80 | memset(hp, 0, handle_size); | ||
81 | INIT_LIST_HEAD(&hp->list); | ||
82 | hp->self_base = base; | ||
83 | atomic_set(&hp->refcnt, 1); | ||
84 | hp->handle_size = handle_size; | ||
85 | } | ||
86 | |||
87 | static struct mdesc_handle * __init mdesc_lmb_alloc(unsigned int mdesc_size) | ||
88 | { | ||
89 | unsigned int handle_size, alloc_size; | ||
90 | struct mdesc_handle *hp; | ||
91 | unsigned long paddr; | ||
92 | |||
93 | handle_size = (sizeof(struct mdesc_handle) - | ||
94 | sizeof(struct mdesc_hdr) + | ||
95 | mdesc_size); | ||
96 | alloc_size = PAGE_ALIGN(handle_size); | ||
97 | |||
98 | paddr = lmb_alloc(alloc_size, PAGE_SIZE); | ||
99 | |||
100 | hp = NULL; | ||
101 | if (paddr) { | ||
102 | hp = __va(paddr); | ||
103 | mdesc_handle_init(hp, handle_size, hp); | ||
104 | } | ||
105 | return hp; | ||
106 | } | ||
107 | |||
108 | static void mdesc_lmb_free(struct mdesc_handle *hp) | ||
109 | { | ||
110 | unsigned int alloc_size, handle_size = hp->handle_size; | ||
111 | unsigned long start, end; | ||
112 | |||
113 | BUG_ON(atomic_read(&hp->refcnt) != 0); | ||
114 | BUG_ON(!list_empty(&hp->list)); | ||
115 | |||
116 | alloc_size = PAGE_ALIGN(handle_size); | ||
117 | |||
118 | start = (unsigned long) hp; | ||
119 | end = start + alloc_size; | ||
120 | |||
121 | while (start < end) { | ||
122 | struct page *p; | ||
123 | |||
124 | p = virt_to_page(start); | ||
125 | ClearPageReserved(p); | ||
126 | __free_page(p); | ||
127 | start += PAGE_SIZE; | ||
128 | } | ||
129 | } | ||
130 | |||
131 | static struct mdesc_mem_ops lmb_mdesc_ops = { | ||
132 | .alloc = mdesc_lmb_alloc, | ||
133 | .free = mdesc_lmb_free, | ||
134 | }; | ||
135 | |||
136 | static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size) | ||
137 | { | ||
138 | unsigned int handle_size; | ||
139 | void *base; | ||
140 | |||
141 | handle_size = (sizeof(struct mdesc_handle) - | ||
142 | sizeof(struct mdesc_hdr) + | ||
143 | mdesc_size); | ||
144 | |||
145 | base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_NOFAIL); | ||
146 | if (base) { | ||
147 | struct mdesc_handle *hp; | ||
148 | unsigned long addr; | ||
149 | |||
150 | addr = (unsigned long)base; | ||
151 | addr = (addr + 15UL) & ~15UL; | ||
152 | hp = (struct mdesc_handle *) addr; | ||
153 | |||
154 | mdesc_handle_init(hp, handle_size, base); | ||
155 | return hp; | ||
156 | } | ||
157 | |||
158 | return NULL; | ||
159 | } | ||
160 | |||
161 | static void mdesc_kfree(struct mdesc_handle *hp) | ||
162 | { | ||
163 | BUG_ON(atomic_read(&hp->refcnt) != 0); | ||
164 | BUG_ON(!list_empty(&hp->list)); | ||
165 | |||
166 | kfree(hp->self_base); | ||
167 | } | ||
168 | |||
169 | static struct mdesc_mem_ops kmalloc_mdesc_memops = { | ||
170 | .alloc = mdesc_kmalloc, | ||
171 | .free = mdesc_kfree, | ||
172 | }; | ||
173 | |||
174 | static struct mdesc_handle *mdesc_alloc(unsigned int mdesc_size, | ||
175 | struct mdesc_mem_ops *mops) | ||
176 | { | ||
177 | struct mdesc_handle *hp = mops->alloc(mdesc_size); | ||
178 | |||
179 | if (hp) | ||
180 | hp->mops = mops; | ||
181 | |||
182 | return hp; | ||
183 | } | ||
184 | |||
185 | static void mdesc_free(struct mdesc_handle *hp) | ||
186 | { | ||
187 | hp->mops->free(hp); | ||
188 | } | ||
189 | |||
190 | static struct mdesc_handle *cur_mdesc; | ||
191 | static LIST_HEAD(mdesc_zombie_list); | ||
192 | static DEFINE_SPINLOCK(mdesc_lock); | ||
193 | |||
194 | struct mdesc_handle *mdesc_grab(void) | ||
195 | { | ||
196 | struct mdesc_handle *hp; | ||
197 | unsigned long flags; | ||
198 | |||
199 | spin_lock_irqsave(&mdesc_lock, flags); | ||
200 | hp = cur_mdesc; | ||
201 | if (hp) | ||
202 | atomic_inc(&hp->refcnt); | ||
203 | spin_unlock_irqrestore(&mdesc_lock, flags); | ||
204 | |||
205 | return hp; | ||
206 | } | ||
207 | EXPORT_SYMBOL(mdesc_grab); | ||
208 | |||
209 | void mdesc_release(struct mdesc_handle *hp) | ||
210 | { | ||
211 | unsigned long flags; | ||
212 | |||
213 | spin_lock_irqsave(&mdesc_lock, flags); | ||
214 | if (atomic_dec_and_test(&hp->refcnt)) { | ||
215 | list_del_init(&hp->list); | ||
216 | hp->mops->free(hp); | ||
217 | } | ||
218 | spin_unlock_irqrestore(&mdesc_lock, flags); | ||
219 | } | ||
220 | EXPORT_SYMBOL(mdesc_release); | ||
221 | |||
222 | static DEFINE_MUTEX(mdesc_mutex); | ||
223 | static struct mdesc_notifier_client *client_list; | ||
224 | |||
225 | void mdesc_register_notifier(struct mdesc_notifier_client *client) | ||
226 | { | ||
227 | u64 node; | ||
228 | |||
229 | mutex_lock(&mdesc_mutex); | ||
230 | client->next = client_list; | ||
231 | client_list = client; | ||
232 | |||
233 | mdesc_for_each_node_by_name(cur_mdesc, node, client->node_name) | ||
234 | client->add(cur_mdesc, node); | ||
235 | |||
236 | mutex_unlock(&mdesc_mutex); | ||
237 | } | ||
238 | |||
239 | static const u64 *parent_cfg_handle(struct mdesc_handle *hp, u64 node) | ||
240 | { | ||
241 | const u64 *id; | ||
242 | u64 a; | ||
243 | |||
244 | id = NULL; | ||
245 | mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) { | ||
246 | u64 target; | ||
247 | |||
248 | target = mdesc_arc_target(hp, a); | ||
249 | id = mdesc_get_property(hp, target, | ||
250 | "cfg-handle", NULL); | ||
251 | if (id) | ||
252 | break; | ||
253 | } | ||
254 | |||
255 | return id; | ||
256 | } | ||
257 | |||
258 | /* Run 'func' on nodes which are in A but not in B. */ | ||
259 | static void invoke_on_missing(const char *name, | ||
260 | struct mdesc_handle *a, | ||
261 | struct mdesc_handle *b, | ||
262 | void (*func)(struct mdesc_handle *, u64)) | ||
263 | { | ||
264 | u64 node; | ||
265 | |||
266 | mdesc_for_each_node_by_name(a, node, name) { | ||
267 | int found = 0, is_vdc_port = 0; | ||
268 | const char *name_prop; | ||
269 | const u64 *id; | ||
270 | u64 fnode; | ||
271 | |||
272 | name_prop = mdesc_get_property(a, node, "name", NULL); | ||
273 | if (name_prop && !strcmp(name_prop, "vdc-port")) { | ||
274 | is_vdc_port = 1; | ||
275 | id = parent_cfg_handle(a, node); | ||
276 | } else | ||
277 | id = mdesc_get_property(a, node, "id", NULL); | ||
278 | |||
279 | if (!id) { | ||
280 | printk(KERN_ERR "MD: Cannot find ID for %s node.\n", | ||
281 | (name_prop ? name_prop : name)); | ||
282 | continue; | ||
283 | } | ||
284 | |||
285 | mdesc_for_each_node_by_name(b, fnode, name) { | ||
286 | const u64 *fid; | ||
287 | |||
288 | if (is_vdc_port) { | ||
289 | name_prop = mdesc_get_property(b, fnode, | ||
290 | "name", NULL); | ||
291 | if (!name_prop || | ||
292 | strcmp(name_prop, "vdc-port")) | ||
293 | continue; | ||
294 | fid = parent_cfg_handle(b, fnode); | ||
295 | if (!fid) { | ||
296 | printk(KERN_ERR "MD: Cannot find ID " | ||
297 | "for vdc-port node.\n"); | ||
298 | continue; | ||
299 | } | ||
300 | } else | ||
301 | fid = mdesc_get_property(b, fnode, | ||
302 | "id", NULL); | ||
303 | |||
304 | if (*id == *fid) { | ||
305 | found = 1; | ||
306 | break; | ||
307 | } | ||
308 | } | ||
309 | if (!found) | ||
310 | func(a, node); | ||
311 | } | ||
312 | } | ||
313 | |||
314 | static void notify_one(struct mdesc_notifier_client *p, | ||
315 | struct mdesc_handle *old_hp, | ||
316 | struct mdesc_handle *new_hp) | ||
317 | { | ||
318 | invoke_on_missing(p->node_name, old_hp, new_hp, p->remove); | ||
319 | invoke_on_missing(p->node_name, new_hp, old_hp, p->add); | ||
320 | } | ||
321 | |||
322 | static void mdesc_notify_clients(struct mdesc_handle *old_hp, | ||
323 | struct mdesc_handle *new_hp) | ||
324 | { | ||
325 | struct mdesc_notifier_client *p = client_list; | ||
326 | |||
327 | while (p) { | ||
328 | notify_one(p, old_hp, new_hp); | ||
329 | p = p->next; | ||
330 | } | ||
331 | } | ||
332 | |||
333 | void mdesc_update(void) | ||
334 | { | ||
335 | unsigned long len, real_len, status; | ||
336 | struct mdesc_handle *hp, *orig_hp; | ||
337 | unsigned long flags; | ||
338 | |||
339 | mutex_lock(&mdesc_mutex); | ||
340 | |||
341 | (void) sun4v_mach_desc(0UL, 0UL, &len); | ||
342 | |||
343 | hp = mdesc_alloc(len, &kmalloc_mdesc_memops); | ||
344 | if (!hp) { | ||
345 | printk(KERN_ERR "MD: mdesc alloc fails\n"); | ||
346 | goto out; | ||
347 | } | ||
348 | |||
349 | status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len); | ||
350 | if (status != HV_EOK || real_len > len) { | ||
351 | printk(KERN_ERR "MD: mdesc reread fails with %lu\n", | ||
352 | status); | ||
353 | atomic_dec(&hp->refcnt); | ||
354 | mdesc_free(hp); | ||
355 | goto out; | ||
356 | } | ||
357 | |||
358 | spin_lock_irqsave(&mdesc_lock, flags); | ||
359 | orig_hp = cur_mdesc; | ||
360 | cur_mdesc = hp; | ||
361 | spin_unlock_irqrestore(&mdesc_lock, flags); | ||
362 | |||
363 | mdesc_notify_clients(orig_hp, hp); | ||
364 | |||
365 | spin_lock_irqsave(&mdesc_lock, flags); | ||
366 | if (atomic_dec_and_test(&orig_hp->refcnt)) | ||
367 | mdesc_free(orig_hp); | ||
368 | else | ||
369 | list_add(&orig_hp->list, &mdesc_zombie_list); | ||
370 | spin_unlock_irqrestore(&mdesc_lock, flags); | ||
371 | |||
372 | out: | ||
373 | mutex_unlock(&mdesc_mutex); | ||
374 | } | ||
375 | |||
376 | static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc) | ||
377 | { | ||
378 | return (struct mdesc_elem *) (mdesc + 1); | ||
379 | } | ||
380 | |||
381 | static void *name_block(struct mdesc_hdr *mdesc) | ||
382 | { | ||
383 | return ((void *) node_block(mdesc)) + mdesc->node_sz; | ||
384 | } | ||
385 | |||
386 | static void *data_block(struct mdesc_hdr *mdesc) | ||
387 | { | ||
388 | return ((void *) name_block(mdesc)) + mdesc->name_sz; | ||
389 | } | ||
390 | |||
391 | u64 mdesc_node_by_name(struct mdesc_handle *hp, | ||
392 | u64 from_node, const char *name) | ||
393 | { | ||
394 | struct mdesc_elem *ep = node_block(&hp->mdesc); | ||
395 | const char *names = name_block(&hp->mdesc); | ||
396 | u64 last_node = hp->mdesc.node_sz / 16; | ||
397 | u64 ret; | ||
398 | |||
399 | if (from_node == MDESC_NODE_NULL) { | ||
400 | ret = from_node = 0; | ||
401 | } else if (from_node >= last_node) { | ||
402 | return MDESC_NODE_NULL; | ||
403 | } else { | ||
404 | ret = ep[from_node].d.val; | ||
405 | } | ||
406 | |||
407 | while (ret < last_node) { | ||
408 | if (ep[ret].tag != MD_NODE) | ||
409 | return MDESC_NODE_NULL; | ||
410 | if (!strcmp(names + ep[ret].name_offset, name)) | ||
411 | break; | ||
412 | ret = ep[ret].d.val; | ||
413 | } | ||
414 | if (ret >= last_node) | ||
415 | ret = MDESC_NODE_NULL; | ||
416 | return ret; | ||
417 | } | ||
418 | EXPORT_SYMBOL(mdesc_node_by_name); | ||
419 | |||
420 | const void *mdesc_get_property(struct mdesc_handle *hp, u64 node, | ||
421 | const char *name, int *lenp) | ||
422 | { | ||
423 | const char *names = name_block(&hp->mdesc); | ||
424 | u64 last_node = hp->mdesc.node_sz / 16; | ||
425 | void *data = data_block(&hp->mdesc); | ||
426 | struct mdesc_elem *ep; | ||
427 | |||
428 | if (node == MDESC_NODE_NULL || node >= last_node) | ||
429 | return NULL; | ||
430 | |||
431 | ep = node_block(&hp->mdesc) + node; | ||
432 | ep++; | ||
433 | for (; ep->tag != MD_NODE_END; ep++) { | ||
434 | void *val = NULL; | ||
435 | int len = 0; | ||
436 | |||
437 | switch (ep->tag) { | ||
438 | case MD_PROP_VAL: | ||
439 | val = &ep->d.val; | ||
440 | len = 8; | ||
441 | break; | ||
442 | |||
443 | case MD_PROP_STR: | ||
444 | case MD_PROP_DATA: | ||
445 | val = data + ep->d.data.data_offset; | ||
446 | len = ep->d.data.data_len; | ||
447 | break; | ||
448 | |||
449 | default: | ||
450 | break; | ||
451 | } | ||
452 | if (!val) | ||
453 | continue; | ||
454 | |||
455 | if (!strcmp(names + ep->name_offset, name)) { | ||
456 | if (lenp) | ||
457 | *lenp = len; | ||
458 | return val; | ||
459 | } | ||
460 | } | ||
461 | |||
462 | return NULL; | ||
463 | } | ||
464 | EXPORT_SYMBOL(mdesc_get_property); | ||
465 | |||
466 | u64 mdesc_next_arc(struct mdesc_handle *hp, u64 from, const char *arc_type) | ||
467 | { | ||
468 | struct mdesc_elem *ep, *base = node_block(&hp->mdesc); | ||
469 | const char *names = name_block(&hp->mdesc); | ||
470 | u64 last_node = hp->mdesc.node_sz / 16; | ||
471 | |||
472 | if (from == MDESC_NODE_NULL || from >= last_node) | ||
473 | return MDESC_NODE_NULL; | ||
474 | |||
475 | ep = base + from; | ||
476 | |||
477 | ep++; | ||
478 | for (; ep->tag != MD_NODE_END; ep++) { | ||
479 | if (ep->tag != MD_PROP_ARC) | ||
480 | continue; | ||
481 | |||
482 | if (strcmp(names + ep->name_offset, arc_type)) | ||
483 | continue; | ||
484 | |||
485 | return ep - base; | ||
486 | } | ||
487 | |||
488 | return MDESC_NODE_NULL; | ||
489 | } | ||
490 | EXPORT_SYMBOL(mdesc_next_arc); | ||
491 | |||
492 | u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc) | ||
493 | { | ||
494 | struct mdesc_elem *ep, *base = node_block(&hp->mdesc); | ||
495 | |||
496 | ep = base + arc; | ||
497 | |||
498 | return ep->d.val; | ||
499 | } | ||
500 | EXPORT_SYMBOL(mdesc_arc_target); | ||
501 | |||
502 | const char *mdesc_node_name(struct mdesc_handle *hp, u64 node) | ||
503 | { | ||
504 | struct mdesc_elem *ep, *base = node_block(&hp->mdesc); | ||
505 | const char *names = name_block(&hp->mdesc); | ||
506 | u64 last_node = hp->mdesc.node_sz / 16; | ||
507 | |||
508 | if (node == MDESC_NODE_NULL || node >= last_node) | ||
509 | return NULL; | ||
510 | |||
511 | ep = base + node; | ||
512 | if (ep->tag != MD_NODE) | ||
513 | return NULL; | ||
514 | |||
515 | return names + ep->name_offset; | ||
516 | } | ||
517 | EXPORT_SYMBOL(mdesc_node_name); | ||
518 | |||
519 | static void __init report_platform_properties(void) | ||
520 | { | ||
521 | struct mdesc_handle *hp = mdesc_grab(); | ||
522 | u64 pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform"); | ||
523 | const char *s; | ||
524 | const u64 *v; | ||
525 | |||
526 | if (pn == MDESC_NODE_NULL) { | ||
527 | prom_printf("No platform node in machine-description.\n"); | ||
528 | prom_halt(); | ||
529 | } | ||
530 | |||
531 | s = mdesc_get_property(hp, pn, "banner-name", NULL); | ||
532 | printk("PLATFORM: banner-name [%s]\n", s); | ||
533 | s = mdesc_get_property(hp, pn, "name", NULL); | ||
534 | printk("PLATFORM: name [%s]\n", s); | ||
535 | |||
536 | v = mdesc_get_property(hp, pn, "hostid", NULL); | ||
537 | if (v) | ||
538 | printk("PLATFORM: hostid [%08lx]\n", *v); | ||
539 | v = mdesc_get_property(hp, pn, "serial#", NULL); | ||
540 | if (v) | ||
541 | printk("PLATFORM: serial# [%08lx]\n", *v); | ||
542 | v = mdesc_get_property(hp, pn, "stick-frequency", NULL); | ||
543 | printk("PLATFORM: stick-frequency [%08lx]\n", *v); | ||
544 | v = mdesc_get_property(hp, pn, "mac-address", NULL); | ||
545 | if (v) | ||
546 | printk("PLATFORM: mac-address [%lx]\n", *v); | ||
547 | v = mdesc_get_property(hp, pn, "watchdog-resolution", NULL); | ||
548 | if (v) | ||
549 | printk("PLATFORM: watchdog-resolution [%lu ms]\n", *v); | ||
550 | v = mdesc_get_property(hp, pn, "watchdog-max-timeout", NULL); | ||
551 | if (v) | ||
552 | printk("PLATFORM: watchdog-max-timeout [%lu ms]\n", *v); | ||
553 | v = mdesc_get_property(hp, pn, "max-cpus", NULL); | ||
554 | if (v) | ||
555 | printk("PLATFORM: max-cpus [%lu]\n", *v); | ||
556 | |||
557 | #ifdef CONFIG_SMP | ||
558 | { | ||
559 | int max_cpu, i; | ||
560 | |||
561 | if (v) { | ||
562 | max_cpu = *v; | ||
563 | if (max_cpu > NR_CPUS) | ||
564 | max_cpu = NR_CPUS; | ||
565 | } else { | ||
566 | max_cpu = NR_CPUS; | ||
567 | } | ||
568 | for (i = 0; i < max_cpu; i++) | ||
569 | cpu_set(i, cpu_possible_map); | ||
570 | } | ||
571 | #endif | ||
572 | |||
573 | mdesc_release(hp); | ||
574 | } | ||
575 | |||
576 | static void __devinit fill_in_one_cache(cpuinfo_sparc *c, | ||
577 | struct mdesc_handle *hp, | ||
578 | u64 mp) | ||
579 | { | ||
580 | const u64 *level = mdesc_get_property(hp, mp, "level", NULL); | ||
581 | const u64 *size = mdesc_get_property(hp, mp, "size", NULL); | ||
582 | const u64 *line_size = mdesc_get_property(hp, mp, "line-size", NULL); | ||
583 | const char *type; | ||
584 | int type_len; | ||
585 | |||
586 | type = mdesc_get_property(hp, mp, "type", &type_len); | ||
587 | |||
588 | switch (*level) { | ||
589 | case 1: | ||
590 | if (of_find_in_proplist(type, "instn", type_len)) { | ||
591 | c->icache_size = *size; | ||
592 | c->icache_line_size = *line_size; | ||
593 | } else if (of_find_in_proplist(type, "data", type_len)) { | ||
594 | c->dcache_size = *size; | ||
595 | c->dcache_line_size = *line_size; | ||
596 | } | ||
597 | break; | ||
598 | |||
599 | case 2: | ||
600 | c->ecache_size = *size; | ||
601 | c->ecache_line_size = *line_size; | ||
602 | break; | ||
603 | |||
604 | default: | ||
605 | break; | ||
606 | } | ||
607 | |||
608 | if (*level == 1) { | ||
609 | u64 a; | ||
610 | |||
611 | mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { | ||
612 | u64 target = mdesc_arc_target(hp, a); | ||
613 | const char *name = mdesc_node_name(hp, target); | ||
614 | |||
615 | if (!strcmp(name, "cache")) | ||
616 | fill_in_one_cache(c, hp, target); | ||
617 | } | ||
618 | } | ||
619 | } | ||
620 | |||
621 | static void __devinit mark_core_ids(struct mdesc_handle *hp, u64 mp, | ||
622 | int core_id) | ||
623 | { | ||
624 | u64 a; | ||
625 | |||
626 | mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) { | ||
627 | u64 t = mdesc_arc_target(hp, a); | ||
628 | const char *name; | ||
629 | const u64 *id; | ||
630 | |||
631 | name = mdesc_node_name(hp, t); | ||
632 | if (!strcmp(name, "cpu")) { | ||
633 | id = mdesc_get_property(hp, t, "id", NULL); | ||
634 | if (*id < NR_CPUS) | ||
635 | cpu_data(*id).core_id = core_id; | ||
636 | } else { | ||
637 | u64 j; | ||
638 | |||
639 | mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_BACK) { | ||
640 | u64 n = mdesc_arc_target(hp, j); | ||
641 | const char *n_name; | ||
642 | |||
643 | n_name = mdesc_node_name(hp, n); | ||
644 | if (strcmp(n_name, "cpu")) | ||
645 | continue; | ||
646 | |||
647 | id = mdesc_get_property(hp, n, "id", NULL); | ||
648 | if (*id < NR_CPUS) | ||
649 | cpu_data(*id).core_id = core_id; | ||
650 | } | ||
651 | } | ||
652 | } | ||
653 | } | ||
654 | |||
655 | static void __devinit set_core_ids(struct mdesc_handle *hp) | ||
656 | { | ||
657 | int idx; | ||
658 | u64 mp; | ||
659 | |||
660 | idx = 1; | ||
661 | mdesc_for_each_node_by_name(hp, mp, "cache") { | ||
662 | const u64 *level; | ||
663 | const char *type; | ||
664 | int len; | ||
665 | |||
666 | level = mdesc_get_property(hp, mp, "level", NULL); | ||
667 | if (*level != 1) | ||
668 | continue; | ||
669 | |||
670 | type = mdesc_get_property(hp, mp, "type", &len); | ||
671 | if (!of_find_in_proplist(type, "instn", len)) | ||
672 | continue; | ||
673 | |||
674 | mark_core_ids(hp, mp, idx); | ||
675 | |||
676 | idx++; | ||
677 | } | ||
678 | } | ||
679 | |||
680 | static void __devinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, | ||
681 | int proc_id) | ||
682 | { | ||
683 | u64 a; | ||
684 | |||
685 | mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) { | ||
686 | u64 t = mdesc_arc_target(hp, a); | ||
687 | const char *name; | ||
688 | const u64 *id; | ||
689 | |||
690 | name = mdesc_node_name(hp, t); | ||
691 | if (strcmp(name, "cpu")) | ||
692 | continue; | ||
693 | |||
694 | id = mdesc_get_property(hp, t, "id", NULL); | ||
695 | if (*id < NR_CPUS) | ||
696 | cpu_data(*id).proc_id = proc_id; | ||
697 | } | ||
698 | } | ||
699 | |||
700 | static void __devinit __set_proc_ids(struct mdesc_handle *hp, | ||
701 | const char *exec_unit_name) | ||
702 | { | ||
703 | int idx; | ||
704 | u64 mp; | ||
705 | |||
706 | idx = 0; | ||
707 | mdesc_for_each_node_by_name(hp, mp, exec_unit_name) { | ||
708 | const char *type; | ||
709 | int len; | ||
710 | |||
711 | type = mdesc_get_property(hp, mp, "type", &len); | ||
712 | if (!of_find_in_proplist(type, "int", len) && | ||
713 | !of_find_in_proplist(type, "integer", len)) | ||
714 | continue; | ||
715 | |||
716 | mark_proc_ids(hp, mp, idx); | ||
717 | |||
718 | idx++; | ||
719 | } | ||
720 | } | ||
721 | |||
722 | static void __devinit set_proc_ids(struct mdesc_handle *hp) | ||
723 | { | ||
724 | __set_proc_ids(hp, "exec_unit"); | ||
725 | __set_proc_ids(hp, "exec-unit"); | ||
726 | } | ||
727 | |||
728 | static void __devinit get_one_mondo_bits(const u64 *p, unsigned int *mask, | ||
729 | unsigned char def) | ||
730 | { | ||
731 | u64 val; | ||
732 | |||
733 | if (!p) | ||
734 | goto use_default; | ||
735 | val = *p; | ||
736 | |||
737 | if (!val || val >= 64) | ||
738 | goto use_default; | ||
739 | |||
740 | *mask = ((1U << val) * 64U) - 1U; | ||
741 | return; | ||
742 | |||
743 | use_default: | ||
744 | *mask = ((1U << def) * 64U) - 1U; | ||
745 | } | ||
746 | |||
747 | static void __devinit get_mondo_data(struct mdesc_handle *hp, u64 mp, | ||
748 | struct trap_per_cpu *tb) | ||
749 | { | ||
750 | const u64 *val; | ||
751 | |||
752 | val = mdesc_get_property(hp, mp, "q-cpu-mondo-#bits", NULL); | ||
753 | get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7); | ||
754 | |||
755 | val = mdesc_get_property(hp, mp, "q-dev-mondo-#bits", NULL); | ||
756 | get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7); | ||
757 | |||
758 | val = mdesc_get_property(hp, mp, "q-resumable-#bits", NULL); | ||
759 | get_one_mondo_bits(val, &tb->resum_qmask, 6); | ||
760 | |||
761 | val = mdesc_get_property(hp, mp, "q-nonresumable-#bits", NULL); | ||
762 | get_one_mondo_bits(val, &tb->nonresum_qmask, 2); | ||
763 | } | ||
764 | |||
765 | void __cpuinit mdesc_fill_in_cpu_data(cpumask_t mask) | ||
766 | { | ||
767 | struct mdesc_handle *hp = mdesc_grab(); | ||
768 | u64 mp; | ||
769 | |||
770 | ncpus_probed = 0; | ||
771 | mdesc_for_each_node_by_name(hp, mp, "cpu") { | ||
772 | const u64 *id = mdesc_get_property(hp, mp, "id", NULL); | ||
773 | const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL); | ||
774 | struct trap_per_cpu *tb; | ||
775 | cpuinfo_sparc *c; | ||
776 | int cpuid; | ||
777 | u64 a; | ||
778 | |||
779 | ncpus_probed++; | ||
780 | |||
781 | cpuid = *id; | ||
782 | |||
783 | #ifdef CONFIG_SMP | ||
784 | if (cpuid >= NR_CPUS) { | ||
785 | printk(KERN_WARNING "Ignoring CPU %d which is " | ||
786 | ">= NR_CPUS (%d)\n", | ||
787 | cpuid, NR_CPUS); | ||
788 | continue; | ||
789 | } | ||
790 | if (!cpu_isset(cpuid, mask)) | ||
791 | continue; | ||
792 | #else | ||
793 | /* On uniprocessor we only want the values for the | ||
794 | * real physical cpu the kernel booted onto, however | ||
795 | * cpu_data() only has one entry at index 0. | ||
796 | */ | ||
797 | if (cpuid != real_hard_smp_processor_id()) | ||
798 | continue; | ||
799 | cpuid = 0; | ||
800 | #endif | ||
801 | |||
802 | c = &cpu_data(cpuid); | ||
803 | c->clock_tick = *cfreq; | ||
804 | |||
805 | tb = &trap_block[cpuid]; | ||
806 | get_mondo_data(hp, mp, tb); | ||
807 | |||
808 | mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { | ||
809 | u64 j, t = mdesc_arc_target(hp, a); | ||
810 | const char *t_name; | ||
811 | |||
812 | t_name = mdesc_node_name(hp, t); | ||
813 | if (!strcmp(t_name, "cache")) { | ||
814 | fill_in_one_cache(c, hp, t); | ||
815 | continue; | ||
816 | } | ||
817 | |||
818 | mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) { | ||
819 | u64 n = mdesc_arc_target(hp, j); | ||
820 | const char *n_name; | ||
821 | |||
822 | n_name = mdesc_node_name(hp, n); | ||
823 | if (!strcmp(n_name, "cache")) | ||
824 | fill_in_one_cache(c, hp, n); | ||
825 | } | ||
826 | } | ||
827 | |||
828 | #ifdef CONFIG_SMP | ||
829 | cpu_set(cpuid, cpu_present_map); | ||
830 | #endif | ||
831 | |||
832 | c->core_id = 0; | ||
833 | c->proc_id = -1; | ||
834 | } | ||
835 | |||
836 | #ifdef CONFIG_SMP | ||
837 | sparc64_multi_core = 1; | ||
838 | #endif | ||
839 | |||
840 | set_core_ids(hp); | ||
841 | set_proc_ids(hp); | ||
842 | |||
843 | smp_fill_in_sib_core_maps(); | ||
844 | |||
845 | mdesc_release(hp); | ||
846 | } | ||
847 | |||
848 | static ssize_t mdesc_read(struct file *file, char __user *buf, | ||
849 | size_t len, loff_t *offp) | ||
850 | { | ||
851 | struct mdesc_handle *hp = mdesc_grab(); | ||
852 | int err; | ||
853 | |||
854 | if (!hp) | ||
855 | return -ENODEV; | ||
856 | |||
857 | err = hp->handle_size; | ||
858 | if (len < hp->handle_size) | ||
859 | err = -EMSGSIZE; | ||
860 | else if (copy_to_user(buf, &hp->mdesc, hp->handle_size)) | ||
861 | err = -EFAULT; | ||
862 | mdesc_release(hp); | ||
863 | |||
864 | return err; | ||
865 | } | ||
866 | |||
867 | static const struct file_operations mdesc_fops = { | ||
868 | .read = mdesc_read, | ||
869 | .owner = THIS_MODULE, | ||
870 | }; | ||
871 | |||
872 | static struct miscdevice mdesc_misc = { | ||
873 | .minor = MISC_DYNAMIC_MINOR, | ||
874 | .name = "mdesc", | ||
875 | .fops = &mdesc_fops, | ||
876 | }; | ||
877 | |||
878 | static int __init mdesc_misc_init(void) | ||
879 | { | ||
880 | return misc_register(&mdesc_misc); | ||
881 | } | ||
882 | |||
883 | __initcall(mdesc_misc_init); | ||
884 | |||
885 | void __init sun4v_mdesc_init(void) | ||
886 | { | ||
887 | struct mdesc_handle *hp; | ||
888 | unsigned long len, real_len, status; | ||
889 | cpumask_t mask; | ||
890 | |||
891 | (void) sun4v_mach_desc(0UL, 0UL, &len); | ||
892 | |||
893 | printk("MDESC: Size is %lu bytes.\n", len); | ||
894 | |||
895 | hp = mdesc_alloc(len, &lmb_mdesc_ops); | ||
896 | if (hp == NULL) { | ||
897 | prom_printf("MDESC: alloc of %lu bytes failed.\n", len); | ||
898 | prom_halt(); | ||
899 | } | ||
900 | |||
901 | status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len); | ||
902 | if (status != HV_EOK || real_len > len) { | ||
903 | prom_printf("sun4v_mach_desc fails, err(%lu), " | ||
904 | "len(%lu), real_len(%lu)\n", | ||
905 | status, len, real_len); | ||
906 | mdesc_free(hp); | ||
907 | prom_halt(); | ||
908 | } | ||
909 | |||
910 | cur_mdesc = hp; | ||
911 | |||
912 | report_platform_properties(); | ||
913 | |||
914 | cpus_setall(mask); | ||
915 | mdesc_fill_in_cpu_data(mask); | ||
916 | } | ||
diff --git a/arch/sparc/kernel/misctrap.S b/arch/sparc/kernel/misctrap.S new file mode 100644 index 000000000000..753b4f031bfb --- /dev/null +++ b/arch/sparc/kernel/misctrap.S | |||
@@ -0,0 +1,97 @@ | |||
1 | #ifdef CONFIG_KGDB | ||
2 | .globl arch_kgdb_breakpoint | ||
3 | .type arch_kgdb_breakpoint,#function | ||
4 | arch_kgdb_breakpoint: | ||
5 | ta 0x72 | ||
6 | retl | ||
7 | nop | ||
8 | .size arch_kgdb_breakpoint,.-arch_kgdb_breakpoint | ||
9 | #endif | ||
10 | |||
11 | .type __do_privact,#function | ||
12 | __do_privact: | ||
13 | mov TLB_SFSR, %g3 | ||
14 | stxa %g0, [%g3] ASI_DMMU ! Clear FaultValid bit | ||
15 | membar #Sync | ||
16 | sethi %hi(109f), %g7 | ||
17 | ba,pt %xcc, etrap | ||
18 | 109: or %g7, %lo(109b), %g7 | ||
19 | call do_privact | ||
20 | add %sp, PTREGS_OFF, %o0 | ||
21 | ba,pt %xcc, rtrap | ||
22 | nop | ||
23 | .size __do_privact,.-__do_privact | ||
24 | |||
25 | .type do_mna,#function | ||
26 | do_mna: | ||
27 | rdpr %tl, %g3 | ||
28 | cmp %g3, 1 | ||
29 | |||
30 | /* Setup %g4/%g5 now as they are used in the | ||
31 | * winfixup code. | ||
32 | */ | ||
33 | mov TLB_SFSR, %g3 | ||
34 | mov DMMU_SFAR, %g4 | ||
35 | ldxa [%g4] ASI_DMMU, %g4 | ||
36 | ldxa [%g3] ASI_DMMU, %g5 | ||
37 | stxa %g0, [%g3] ASI_DMMU ! Clear FaultValid bit | ||
38 | membar #Sync | ||
39 | bgu,pn %icc, winfix_mna | ||
40 | rdpr %tpc, %g3 | ||
41 | |||
42 | 1: sethi %hi(109f), %g7 | ||
43 | ba,pt %xcc, etrap | ||
44 | 109: or %g7, %lo(109b), %g7 | ||
45 | mov %l4, %o1 | ||
46 | mov %l5, %o2 | ||
47 | call mem_address_unaligned | ||
48 | add %sp, PTREGS_OFF, %o0 | ||
49 | ba,pt %xcc, rtrap | ||
50 | nop | ||
51 | .size do_mna,.-do_mna | ||
52 | |||
53 | .type do_lddfmna,#function | ||
54 | do_lddfmna: | ||
55 | sethi %hi(109f), %g7 | ||
56 | mov TLB_SFSR, %g4 | ||
57 | ldxa [%g4] ASI_DMMU, %g5 | ||
58 | stxa %g0, [%g4] ASI_DMMU ! Clear FaultValid bit | ||
59 | membar #Sync | ||
60 | mov DMMU_SFAR, %g4 | ||
61 | ldxa [%g4] ASI_DMMU, %g4 | ||
62 | ba,pt %xcc, etrap | ||
63 | 109: or %g7, %lo(109b), %g7 | ||
64 | mov %l4, %o1 | ||
65 | mov %l5, %o2 | ||
66 | call handle_lddfmna | ||
67 | add %sp, PTREGS_OFF, %o0 | ||
68 | ba,pt %xcc, rtrap | ||
69 | nop | ||
70 | .size do_lddfmna,.-do_lddfmna | ||
71 | |||
72 | .type do_stdfmna,#function | ||
73 | do_stdfmna: | ||
74 | sethi %hi(109f), %g7 | ||
75 | mov TLB_SFSR, %g4 | ||
76 | ldxa [%g4] ASI_DMMU, %g5 | ||
77 | stxa %g0, [%g4] ASI_DMMU ! Clear FaultValid bit | ||
78 | membar #Sync | ||
79 | mov DMMU_SFAR, %g4 | ||
80 | ldxa [%g4] ASI_DMMU, %g4 | ||
81 | ba,pt %xcc, etrap | ||
82 | 109: or %g7, %lo(109b), %g7 | ||
83 | mov %l4, %o1 | ||
84 | mov %l5, %o2 | ||
85 | call handle_stdfmna | ||
86 | add %sp, PTREGS_OFF, %o0 | ||
87 | ba,pt %xcc, rtrap | ||
88 | nop | ||
89 | .size do_stdfmna,.-do_stdfmna | ||
90 | |||
91 | .type breakpoint_trap,#function | ||
92 | breakpoint_trap: | ||
93 | call sparc_breakpoint | ||
94 | add %sp, PTREGS_OFF, %o0 | ||
95 | ba,pt %xcc, rtrap | ||
96 | nop | ||
97 | .size breakpoint_trap,.-breakpoint_trap | ||
diff --git a/arch/sparc/kernel/module_64.c b/arch/sparc/kernel/module_64.c new file mode 100644 index 000000000000..158484bf5999 --- /dev/null +++ b/arch/sparc/kernel/module_64.c | |||
@@ -0,0 +1,213 @@ | |||
1 | /* Kernel module help for sparc64. | ||
2 | * | ||
3 | * Copyright (C) 2001 Rusty Russell. | ||
4 | * Copyright (C) 2002 David S. Miller. | ||
5 | */ | ||
6 | |||
7 | #include <linux/moduleloader.h> | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/elf.h> | ||
10 | #include <linux/vmalloc.h> | ||
11 | #include <linux/fs.h> | ||
12 | #include <linux/string.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/mm.h> | ||
15 | |||
16 | #include <asm/processor.h> | ||
17 | #include <asm/spitfire.h> | ||
18 | |||
19 | static void *module_map(unsigned long size) | ||
20 | { | ||
21 | struct vm_struct *area; | ||
22 | |||
23 | size = PAGE_ALIGN(size); | ||
24 | if (!size || size > MODULES_LEN) | ||
25 | return NULL; | ||
26 | |||
27 | area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END); | ||
28 | if (!area) | ||
29 | return NULL; | ||
30 | |||
31 | return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL); | ||
32 | } | ||
33 | |||
34 | void *module_alloc(unsigned long size) | ||
35 | { | ||
36 | void *ret; | ||
37 | |||
38 | /* We handle the zero case fine, unlike vmalloc */ | ||
39 | if (size == 0) | ||
40 | return NULL; | ||
41 | |||
42 | ret = module_map(size); | ||
43 | if (!ret) | ||
44 | ret = ERR_PTR(-ENOMEM); | ||
45 | else | ||
46 | memset(ret, 0, size); | ||
47 | |||
48 | return ret; | ||
49 | } | ||
50 | |||
51 | /* Free memory returned from module_core_alloc/module_init_alloc */ | ||
52 | void module_free(struct module *mod, void *module_region) | ||
53 | { | ||
54 | vfree(module_region); | ||
55 | /* FIXME: If module_region == mod->init_region, trim exception | ||
56 | table entries. */ | ||
57 | } | ||
58 | |||
59 | /* Make generic code ignore STT_REGISTER dummy undefined symbols. */ | ||
60 | int module_frob_arch_sections(Elf_Ehdr *hdr, | ||
61 | Elf_Shdr *sechdrs, | ||
62 | char *secstrings, | ||
63 | struct module *mod) | ||
64 | { | ||
65 | unsigned int symidx; | ||
66 | Elf64_Sym *sym; | ||
67 | const char *strtab; | ||
68 | int i; | ||
69 | |||
70 | for (symidx = 0; sechdrs[symidx].sh_type != SHT_SYMTAB; symidx++) { | ||
71 | if (symidx == hdr->e_shnum-1) { | ||
72 | printk("%s: no symtab found.\n", mod->name); | ||
73 | return -ENOEXEC; | ||
74 | } | ||
75 | } | ||
76 | sym = (Elf64_Sym *)sechdrs[symidx].sh_addr; | ||
77 | strtab = (char *)sechdrs[sechdrs[symidx].sh_link].sh_addr; | ||
78 | |||
79 | for (i = 1; i < sechdrs[symidx].sh_size / sizeof(Elf_Sym); i++) { | ||
80 | if (sym[i].st_shndx == SHN_UNDEF && | ||
81 | ELF64_ST_TYPE(sym[i].st_info) == STT_REGISTER) | ||
82 | sym[i].st_shndx = SHN_ABS; | ||
83 | } | ||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | int apply_relocate(Elf64_Shdr *sechdrs, | ||
88 | const char *strtab, | ||
89 | unsigned int symindex, | ||
90 | unsigned int relsec, | ||
91 | struct module *me) | ||
92 | { | ||
93 | printk(KERN_ERR "module %s: non-ADD RELOCATION unsupported\n", | ||
94 | me->name); | ||
95 | return -ENOEXEC; | ||
96 | } | ||
97 | |||
98 | int apply_relocate_add(Elf64_Shdr *sechdrs, | ||
99 | const char *strtab, | ||
100 | unsigned int symindex, | ||
101 | unsigned int relsec, | ||
102 | struct module *me) | ||
103 | { | ||
104 | unsigned int i; | ||
105 | Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; | ||
106 | Elf64_Sym *sym; | ||
107 | u8 *location; | ||
108 | u32 *loc32; | ||
109 | |||
110 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { | ||
111 | Elf64_Addr v; | ||
112 | |||
113 | /* This is where to make the change */ | ||
114 | location = (u8 *)sechdrs[sechdrs[relsec].sh_info].sh_addr | ||
115 | + rel[i].r_offset; | ||
116 | loc32 = (u32 *) location; | ||
117 | |||
118 | BUG_ON(((u64)location >> (u64)32) != (u64)0); | ||
119 | |||
120 | /* This is the symbol it is referring to. Note that all | ||
121 | undefined symbols have been resolved. */ | ||
122 | sym = (Elf64_Sym *)sechdrs[symindex].sh_addr | ||
123 | + ELF64_R_SYM(rel[i].r_info); | ||
124 | v = sym->st_value + rel[i].r_addend; | ||
125 | |||
126 | switch (ELF64_R_TYPE(rel[i].r_info) & 0xff) { | ||
127 | case R_SPARC_64: | ||
128 | location[0] = v >> 56; | ||
129 | location[1] = v >> 48; | ||
130 | location[2] = v >> 40; | ||
131 | location[3] = v >> 32; | ||
132 | location[4] = v >> 24; | ||
133 | location[5] = v >> 16; | ||
134 | location[6] = v >> 8; | ||
135 | location[7] = v >> 0; | ||
136 | break; | ||
137 | |||
138 | case R_SPARC_32: | ||
139 | location[0] = v >> 24; | ||
140 | location[1] = v >> 16; | ||
141 | location[2] = v >> 8; | ||
142 | location[3] = v >> 0; | ||
143 | break; | ||
144 | |||
145 | case R_SPARC_DISP32: | ||
146 | v -= (Elf64_Addr) location; | ||
147 | *loc32 = v; | ||
148 | break; | ||
149 | |||
150 | case R_SPARC_WDISP30: | ||
151 | v -= (Elf64_Addr) location; | ||
152 | *loc32 = (*loc32 & ~0x3fffffff) | | ||
153 | ((v >> 2) & 0x3fffffff); | ||
154 | break; | ||
155 | |||
156 | case R_SPARC_WDISP22: | ||
157 | v -= (Elf64_Addr) location; | ||
158 | *loc32 = (*loc32 & ~0x3fffff) | | ||
159 | ((v >> 2) & 0x3fffff); | ||
160 | break; | ||
161 | |||
162 | case R_SPARC_WDISP19: | ||
163 | v -= (Elf64_Addr) location; | ||
164 | *loc32 = (*loc32 & ~0x7ffff) | | ||
165 | ((v >> 2) & 0x7ffff); | ||
166 | break; | ||
167 | |||
168 | case R_SPARC_LO10: | ||
169 | *loc32 = (*loc32 & ~0x3ff) | (v & 0x3ff); | ||
170 | break; | ||
171 | |||
172 | case R_SPARC_HI22: | ||
173 | *loc32 = (*loc32 & ~0x3fffff) | | ||
174 | ((v >> 10) & 0x3fffff); | ||
175 | break; | ||
176 | |||
177 | case R_SPARC_OLO10: | ||
178 | *loc32 = (*loc32 & ~0x1fff) | | ||
179 | (((v & 0x3ff) + | ||
180 | (ELF64_R_TYPE(rel[i].r_info) >> 8)) | ||
181 | & 0x1fff); | ||
182 | break; | ||
183 | |||
184 | default: | ||
185 | printk(KERN_ERR "module %s: Unknown relocation: %x\n", | ||
186 | me->name, | ||
187 | (int) (ELF64_R_TYPE(rel[i].r_info) & 0xff)); | ||
188 | return -ENOEXEC; | ||
189 | }; | ||
190 | } | ||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | int module_finalize(const Elf_Ehdr *hdr, | ||
195 | const Elf_Shdr *sechdrs, | ||
196 | struct module *me) | ||
197 | { | ||
198 | /* Cheetah's I-cache is fully coherent. */ | ||
199 | if (tlb_type == spitfire) { | ||
200 | unsigned long va; | ||
201 | |||
202 | flushw_all(); | ||
203 | for (va = 0; va < (PAGE_SIZE << 1); va += 32) | ||
204 | spitfire_put_icache_tag(va, 0x0); | ||
205 | __asm__ __volatile__("flush %g6"); | ||
206 | } | ||
207 | |||
208 | return 0; | ||
209 | } | ||
210 | |||
211 | void module_arch_cleanup(struct module *mod) | ||
212 | { | ||
213 | } | ||
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c new file mode 100644 index 000000000000..0f616ae3246c --- /dev/null +++ b/arch/sparc/kernel/of_device_64.c | |||
@@ -0,0 +1,898 @@ | |||
1 | #include <linux/string.h> | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/of.h> | ||
4 | #include <linux/init.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/mod_devicetable.h> | ||
7 | #include <linux/slab.h> | ||
8 | #include <linux/errno.h> | ||
9 | #include <linux/irq.h> | ||
10 | #include <linux/of_device.h> | ||
11 | #include <linux/of_platform.h> | ||
12 | |||
13 | void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name) | ||
14 | { | ||
15 | unsigned long ret = res->start + offset; | ||
16 | struct resource *r; | ||
17 | |||
18 | if (res->flags & IORESOURCE_MEM) | ||
19 | r = request_mem_region(ret, size, name); | ||
20 | else | ||
21 | r = request_region(ret, size, name); | ||
22 | if (!r) | ||
23 | ret = 0; | ||
24 | |||
25 | return (void __iomem *) ret; | ||
26 | } | ||
27 | EXPORT_SYMBOL(of_ioremap); | ||
28 | |||
29 | void of_iounmap(struct resource *res, void __iomem *base, unsigned long size) | ||
30 | { | ||
31 | if (res->flags & IORESOURCE_MEM) | ||
32 | release_mem_region((unsigned long) base, size); | ||
33 | else | ||
34 | release_region((unsigned long) base, size); | ||
35 | } | ||
36 | EXPORT_SYMBOL(of_iounmap); | ||
37 | |||
38 | static int node_match(struct device *dev, void *data) | ||
39 | { | ||
40 | struct of_device *op = to_of_device(dev); | ||
41 | struct device_node *dp = data; | ||
42 | |||
43 | return (op->node == dp); | ||
44 | } | ||
45 | |||
46 | struct of_device *of_find_device_by_node(struct device_node *dp) | ||
47 | { | ||
48 | struct device *dev = bus_find_device(&of_platform_bus_type, NULL, | ||
49 | dp, node_match); | ||
50 | |||
51 | if (dev) | ||
52 | return to_of_device(dev); | ||
53 | |||
54 | return NULL; | ||
55 | } | ||
56 | EXPORT_SYMBOL(of_find_device_by_node); | ||
57 | |||
58 | unsigned int irq_of_parse_and_map(struct device_node *node, int index) | ||
59 | { | ||
60 | struct of_device *op = of_find_device_by_node(node); | ||
61 | |||
62 | if (!op || index >= op->num_irqs) | ||
63 | return 0; | ||
64 | |||
65 | return op->irqs[index]; | ||
66 | } | ||
67 | EXPORT_SYMBOL(irq_of_parse_and_map); | ||
68 | |||
69 | /* Take the archdata values for IOMMU, STC, and HOSTDATA found in | ||
70 | * BUS and propagate to all child of_device objects. | ||
71 | */ | ||
72 | void of_propagate_archdata(struct of_device *bus) | ||
73 | { | ||
74 | struct dev_archdata *bus_sd = &bus->dev.archdata; | ||
75 | struct device_node *bus_dp = bus->node; | ||
76 | struct device_node *dp; | ||
77 | |||
78 | for (dp = bus_dp->child; dp; dp = dp->sibling) { | ||
79 | struct of_device *op = of_find_device_by_node(dp); | ||
80 | |||
81 | op->dev.archdata.iommu = bus_sd->iommu; | ||
82 | op->dev.archdata.stc = bus_sd->stc; | ||
83 | op->dev.archdata.host_controller = bus_sd->host_controller; | ||
84 | op->dev.archdata.numa_node = bus_sd->numa_node; | ||
85 | |||
86 | if (dp->child) | ||
87 | of_propagate_archdata(op); | ||
88 | } | ||
89 | } | ||
90 | |||
91 | struct bus_type of_platform_bus_type; | ||
92 | EXPORT_SYMBOL(of_platform_bus_type); | ||
93 | |||
94 | static inline u64 of_read_addr(const u32 *cell, int size) | ||
95 | { | ||
96 | u64 r = 0; | ||
97 | while (size--) | ||
98 | r = (r << 32) | *(cell++); | ||
99 | return r; | ||
100 | } | ||
101 | |||
102 | static void __init get_cells(struct device_node *dp, | ||
103 | int *addrc, int *sizec) | ||
104 | { | ||
105 | if (addrc) | ||
106 | *addrc = of_n_addr_cells(dp); | ||
107 | if (sizec) | ||
108 | *sizec = of_n_size_cells(dp); | ||
109 | } | ||
110 | |||
111 | /* Max address size we deal with */ | ||
112 | #define OF_MAX_ADDR_CELLS 4 | ||
113 | |||
114 | struct of_bus { | ||
115 | const char *name; | ||
116 | const char *addr_prop_name; | ||
117 | int (*match)(struct device_node *parent); | ||
118 | void (*count_cells)(struct device_node *child, | ||
119 | int *addrc, int *sizec); | ||
120 | int (*map)(u32 *addr, const u32 *range, | ||
121 | int na, int ns, int pna); | ||
122 | unsigned long (*get_flags)(const u32 *addr, unsigned long); | ||
123 | }; | ||
124 | |||
125 | /* | ||
126 | * Default translator (generic bus) | ||
127 | */ | ||
128 | |||
129 | static void of_bus_default_count_cells(struct device_node *dev, | ||
130 | int *addrc, int *sizec) | ||
131 | { | ||
132 | get_cells(dev, addrc, sizec); | ||
133 | } | ||
134 | |||
135 | /* Make sure the least significant 64-bits are in-range. Even | ||
136 | * for 3 or 4 cell values it is a good enough approximation. | ||
137 | */ | ||
138 | static int of_out_of_range(const u32 *addr, const u32 *base, | ||
139 | const u32 *size, int na, int ns) | ||
140 | { | ||
141 | u64 a = of_read_addr(addr, na); | ||
142 | u64 b = of_read_addr(base, na); | ||
143 | |||
144 | if (a < b) | ||
145 | return 1; | ||
146 | |||
147 | b += of_read_addr(size, ns); | ||
148 | if (a >= b) | ||
149 | return 1; | ||
150 | |||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | static int of_bus_default_map(u32 *addr, const u32 *range, | ||
155 | int na, int ns, int pna) | ||
156 | { | ||
157 | u32 result[OF_MAX_ADDR_CELLS]; | ||
158 | int i; | ||
159 | |||
160 | if (ns > 2) { | ||
161 | printk("of_device: Cannot handle size cells (%d) > 2.", ns); | ||
162 | return -EINVAL; | ||
163 | } | ||
164 | |||
165 | if (of_out_of_range(addr, range, range + na + pna, na, ns)) | ||
166 | return -EINVAL; | ||
167 | |||
168 | /* Start with the parent range base. */ | ||
169 | memcpy(result, range + na, pna * 4); | ||
170 | |||
171 | /* Add in the child address offset. */ | ||
172 | for (i = 0; i < na; i++) | ||
173 | result[pna - 1 - i] += | ||
174 | (addr[na - 1 - i] - | ||
175 | range[na - 1 - i]); | ||
176 | |||
177 | memcpy(addr, result, pna * 4); | ||
178 | |||
179 | return 0; | ||
180 | } | ||
181 | |||
182 | static unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags) | ||
183 | { | ||
184 | if (flags) | ||
185 | return flags; | ||
186 | return IORESOURCE_MEM; | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * PCI bus specific translator | ||
191 | */ | ||
192 | |||
193 | static int of_bus_pci_match(struct device_node *np) | ||
194 | { | ||
195 | if (!strcmp(np->name, "pci")) { | ||
196 | const char *model = of_get_property(np, "model", NULL); | ||
197 | |||
198 | if (model && !strcmp(model, "SUNW,simba")) | ||
199 | return 0; | ||
200 | |||
201 | /* Do not do PCI specific frobbing if the | ||
202 | * PCI bridge lacks a ranges property. We | ||
203 | * want to pass it through up to the next | ||
204 | * parent as-is, not with the PCI translate | ||
205 | * method which chops off the top address cell. | ||
206 | */ | ||
207 | if (!of_find_property(np, "ranges", NULL)) | ||
208 | return 0; | ||
209 | |||
210 | return 1; | ||
211 | } | ||
212 | |||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | static int of_bus_simba_match(struct device_node *np) | ||
217 | { | ||
218 | const char *model = of_get_property(np, "model", NULL); | ||
219 | |||
220 | if (model && !strcmp(model, "SUNW,simba")) | ||
221 | return 1; | ||
222 | |||
223 | /* Treat PCI busses lacking ranges property just like | ||
224 | * simba. | ||
225 | */ | ||
226 | if (!strcmp(np->name, "pci")) { | ||
227 | if (!of_find_property(np, "ranges", NULL)) | ||
228 | return 1; | ||
229 | } | ||
230 | |||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | static int of_bus_simba_map(u32 *addr, const u32 *range, | ||
235 | int na, int ns, int pna) | ||
236 | { | ||
237 | return 0; | ||
238 | } | ||
239 | |||
240 | static void of_bus_pci_count_cells(struct device_node *np, | ||
241 | int *addrc, int *sizec) | ||
242 | { | ||
243 | if (addrc) | ||
244 | *addrc = 3; | ||
245 | if (sizec) | ||
246 | *sizec = 2; | ||
247 | } | ||
248 | |||
249 | static int of_bus_pci_map(u32 *addr, const u32 *range, | ||
250 | int na, int ns, int pna) | ||
251 | { | ||
252 | u32 result[OF_MAX_ADDR_CELLS]; | ||
253 | int i; | ||
254 | |||
255 | /* Check address type match */ | ||
256 | if ((addr[0] ^ range[0]) & 0x03000000) | ||
257 | return -EINVAL; | ||
258 | |||
259 | if (of_out_of_range(addr + 1, range + 1, range + na + pna, | ||
260 | na - 1, ns)) | ||
261 | return -EINVAL; | ||
262 | |||
263 | /* Start with the parent range base. */ | ||
264 | memcpy(result, range + na, pna * 4); | ||
265 | |||
266 | /* Add in the child address offset, skipping high cell. */ | ||
267 | for (i = 0; i < na - 1; i++) | ||
268 | result[pna - 1 - i] += | ||
269 | (addr[na - 1 - i] - | ||
270 | range[na - 1 - i]); | ||
271 | |||
272 | memcpy(addr, result, pna * 4); | ||
273 | |||
274 | return 0; | ||
275 | } | ||
276 | |||
277 | static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags) | ||
278 | { | ||
279 | u32 w = addr[0]; | ||
280 | |||
281 | /* For PCI, we override whatever child busses may have used. */ | ||
282 | flags = 0; | ||
283 | switch((w >> 24) & 0x03) { | ||
284 | case 0x01: | ||
285 | flags |= IORESOURCE_IO; | ||
286 | break; | ||
287 | |||
288 | case 0x02: /* 32 bits */ | ||
289 | case 0x03: /* 64 bits */ | ||
290 | flags |= IORESOURCE_MEM; | ||
291 | break; | ||
292 | } | ||
293 | if (w & 0x40000000) | ||
294 | flags |= IORESOURCE_PREFETCH; | ||
295 | return flags; | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * SBUS bus specific translator | ||
300 | */ | ||
301 | |||
302 | static int of_bus_sbus_match(struct device_node *np) | ||
303 | { | ||
304 | return !strcmp(np->name, "sbus") || | ||
305 | !strcmp(np->name, "sbi"); | ||
306 | } | ||
307 | |||
308 | static void of_bus_sbus_count_cells(struct device_node *child, | ||
309 | int *addrc, int *sizec) | ||
310 | { | ||
311 | if (addrc) | ||
312 | *addrc = 2; | ||
313 | if (sizec) | ||
314 | *sizec = 1; | ||
315 | } | ||
316 | |||
317 | /* | ||
318 | * FHC/Central bus specific translator. | ||
319 | * | ||
320 | * This is just needed to hard-code the address and size cell | ||
321 | * counts. 'fhc' and 'central' nodes lack the #address-cells and | ||
322 | * #size-cells properties, and if you walk to the root on such | ||
323 | * Enterprise boxes all you'll get is a #size-cells of 2 which is | ||
324 | * not what we want to use. | ||
325 | */ | ||
326 | static int of_bus_fhc_match(struct device_node *np) | ||
327 | { | ||
328 | return !strcmp(np->name, "fhc") || | ||
329 | !strcmp(np->name, "central"); | ||
330 | } | ||
331 | |||
332 | #define of_bus_fhc_count_cells of_bus_sbus_count_cells | ||
333 | |||
334 | /* | ||
335 | * Array of bus specific translators | ||
336 | */ | ||
337 | |||
338 | static struct of_bus of_busses[] = { | ||
339 | /* PCI */ | ||
340 | { | ||
341 | .name = "pci", | ||
342 | .addr_prop_name = "assigned-addresses", | ||
343 | .match = of_bus_pci_match, | ||
344 | .count_cells = of_bus_pci_count_cells, | ||
345 | .map = of_bus_pci_map, | ||
346 | .get_flags = of_bus_pci_get_flags, | ||
347 | }, | ||
348 | /* SIMBA */ | ||
349 | { | ||
350 | .name = "simba", | ||
351 | .addr_prop_name = "assigned-addresses", | ||
352 | .match = of_bus_simba_match, | ||
353 | .count_cells = of_bus_pci_count_cells, | ||
354 | .map = of_bus_simba_map, | ||
355 | .get_flags = of_bus_pci_get_flags, | ||
356 | }, | ||
357 | /* SBUS */ | ||
358 | { | ||
359 | .name = "sbus", | ||
360 | .addr_prop_name = "reg", | ||
361 | .match = of_bus_sbus_match, | ||
362 | .count_cells = of_bus_sbus_count_cells, | ||
363 | .map = of_bus_default_map, | ||
364 | .get_flags = of_bus_default_get_flags, | ||
365 | }, | ||
366 | /* FHC */ | ||
367 | { | ||
368 | .name = "fhc", | ||
369 | .addr_prop_name = "reg", | ||
370 | .match = of_bus_fhc_match, | ||
371 | .count_cells = of_bus_fhc_count_cells, | ||
372 | .map = of_bus_default_map, | ||
373 | .get_flags = of_bus_default_get_flags, | ||
374 | }, | ||
375 | /* Default */ | ||
376 | { | ||
377 | .name = "default", | ||
378 | .addr_prop_name = "reg", | ||
379 | .match = NULL, | ||
380 | .count_cells = of_bus_default_count_cells, | ||
381 | .map = of_bus_default_map, | ||
382 | .get_flags = of_bus_default_get_flags, | ||
383 | }, | ||
384 | }; | ||
385 | |||
386 | static struct of_bus *of_match_bus(struct device_node *np) | ||
387 | { | ||
388 | int i; | ||
389 | |||
390 | for (i = 0; i < ARRAY_SIZE(of_busses); i ++) | ||
391 | if (!of_busses[i].match || of_busses[i].match(np)) | ||
392 | return &of_busses[i]; | ||
393 | BUG(); | ||
394 | return NULL; | ||
395 | } | ||
396 | |||
397 | static int __init build_one_resource(struct device_node *parent, | ||
398 | struct of_bus *bus, | ||
399 | struct of_bus *pbus, | ||
400 | u32 *addr, | ||
401 | int na, int ns, int pna) | ||
402 | { | ||
403 | const u32 *ranges; | ||
404 | int rone, rlen; | ||
405 | |||
406 | ranges = of_get_property(parent, "ranges", &rlen); | ||
407 | if (ranges == NULL || rlen == 0) { | ||
408 | u32 result[OF_MAX_ADDR_CELLS]; | ||
409 | int i; | ||
410 | |||
411 | memset(result, 0, pna * 4); | ||
412 | for (i = 0; i < na; i++) | ||
413 | result[pna - 1 - i] = | ||
414 | addr[na - 1 - i]; | ||
415 | |||
416 | memcpy(addr, result, pna * 4); | ||
417 | return 0; | ||
418 | } | ||
419 | |||
420 | /* Now walk through the ranges */ | ||
421 | rlen /= 4; | ||
422 | rone = na + pna + ns; | ||
423 | for (; rlen >= rone; rlen -= rone, ranges += rone) { | ||
424 | if (!bus->map(addr, ranges, na, ns, pna)) | ||
425 | return 0; | ||
426 | } | ||
427 | |||
428 | /* When we miss an I/O space match on PCI, just pass it up | ||
429 | * to the next PCI bridge and/or controller. | ||
430 | */ | ||
431 | if (!strcmp(bus->name, "pci") && | ||
432 | (addr[0] & 0x03000000) == 0x01000000) | ||
433 | return 0; | ||
434 | |||
435 | return 1; | ||
436 | } | ||
437 | |||
438 | static int __init use_1to1_mapping(struct device_node *pp) | ||
439 | { | ||
440 | /* If we have a ranges property in the parent, use it. */ | ||
441 | if (of_find_property(pp, "ranges", NULL) != NULL) | ||
442 | return 0; | ||
443 | |||
444 | /* If the parent is the dma node of an ISA bus, pass | ||
445 | * the translation up to the root. | ||
446 | * | ||
447 | * Some SBUS devices use intermediate nodes to express | ||
448 | * hierarchy within the device itself. These aren't | ||
449 | * real bus nodes, and don't have a 'ranges' property. | ||
450 | * But, we should still pass the translation work up | ||
451 | * to the SBUS itself. | ||
452 | */ | ||
453 | if (!strcmp(pp->name, "dma") || | ||
454 | !strcmp(pp->name, "espdma") || | ||
455 | !strcmp(pp->name, "ledma") || | ||
456 | !strcmp(pp->name, "lebuffer")) | ||
457 | return 0; | ||
458 | |||
459 | /* Similarly for all PCI bridges, if we get this far | ||
460 | * it lacks a ranges property, and this will include | ||
461 | * cases like Simba. | ||
462 | */ | ||
463 | if (!strcmp(pp->name, "pci")) | ||
464 | return 0; | ||
465 | |||
466 | return 1; | ||
467 | } | ||
468 | |||
469 | static int of_resource_verbose; | ||
470 | |||
471 | static void __init build_device_resources(struct of_device *op, | ||
472 | struct device *parent) | ||
473 | { | ||
474 | struct of_device *p_op; | ||
475 | struct of_bus *bus; | ||
476 | int na, ns; | ||
477 | int index, num_reg; | ||
478 | const void *preg; | ||
479 | |||
480 | if (!parent) | ||
481 | return; | ||
482 | |||
483 | p_op = to_of_device(parent); | ||
484 | bus = of_match_bus(p_op->node); | ||
485 | bus->count_cells(op->node, &na, &ns); | ||
486 | |||
487 | preg = of_get_property(op->node, bus->addr_prop_name, &num_reg); | ||
488 | if (!preg || num_reg == 0) | ||
489 | return; | ||
490 | |||
491 | /* Convert to num-cells. */ | ||
492 | num_reg /= 4; | ||
493 | |||
494 | /* Convert to num-entries. */ | ||
495 | num_reg /= na + ns; | ||
496 | |||
497 | /* Prevent overrunning the op->resources[] array. */ | ||
498 | if (num_reg > PROMREG_MAX) { | ||
499 | printk(KERN_WARNING "%s: Too many regs (%d), " | ||
500 | "limiting to %d.\n", | ||
501 | op->node->full_name, num_reg, PROMREG_MAX); | ||
502 | num_reg = PROMREG_MAX; | ||
503 | } | ||
504 | |||
505 | for (index = 0; index < num_reg; index++) { | ||
506 | struct resource *r = &op->resource[index]; | ||
507 | u32 addr[OF_MAX_ADDR_CELLS]; | ||
508 | const u32 *reg = (preg + (index * ((na + ns) * 4))); | ||
509 | struct device_node *dp = op->node; | ||
510 | struct device_node *pp = p_op->node; | ||
511 | struct of_bus *pbus, *dbus; | ||
512 | u64 size, result = OF_BAD_ADDR; | ||
513 | unsigned long flags; | ||
514 | int dna, dns; | ||
515 | int pna, pns; | ||
516 | |||
517 | size = of_read_addr(reg + na, ns); | ||
518 | memcpy(addr, reg, na * 4); | ||
519 | |||
520 | flags = bus->get_flags(addr, 0); | ||
521 | |||
522 | if (use_1to1_mapping(pp)) { | ||
523 | result = of_read_addr(addr, na); | ||
524 | goto build_res; | ||
525 | } | ||
526 | |||
527 | dna = na; | ||
528 | dns = ns; | ||
529 | dbus = bus; | ||
530 | |||
531 | while (1) { | ||
532 | dp = pp; | ||
533 | pp = dp->parent; | ||
534 | if (!pp) { | ||
535 | result = of_read_addr(addr, dna); | ||
536 | break; | ||
537 | } | ||
538 | |||
539 | pbus = of_match_bus(pp); | ||
540 | pbus->count_cells(dp, &pna, &pns); | ||
541 | |||
542 | if (build_one_resource(dp, dbus, pbus, addr, | ||
543 | dna, dns, pna)) | ||
544 | break; | ||
545 | |||
546 | flags = pbus->get_flags(addr, flags); | ||
547 | |||
548 | dna = pna; | ||
549 | dns = pns; | ||
550 | dbus = pbus; | ||
551 | } | ||
552 | |||
553 | build_res: | ||
554 | memset(r, 0, sizeof(*r)); | ||
555 | |||
556 | if (of_resource_verbose) | ||
557 | printk("%s reg[%d] -> %lx\n", | ||
558 | op->node->full_name, index, | ||
559 | result); | ||
560 | |||
561 | if (result != OF_BAD_ADDR) { | ||
562 | if (tlb_type == hypervisor) | ||
563 | result &= 0x0fffffffffffffffUL; | ||
564 | |||
565 | r->start = result; | ||
566 | r->end = result + size - 1; | ||
567 | r->flags = flags; | ||
568 | } | ||
569 | r->name = op->node->name; | ||
570 | } | ||
571 | } | ||
572 | |||
573 | static struct device_node * __init | ||
574 | apply_interrupt_map(struct device_node *dp, struct device_node *pp, | ||
575 | const u32 *imap, int imlen, const u32 *imask, | ||
576 | unsigned int *irq_p) | ||
577 | { | ||
578 | struct device_node *cp; | ||
579 | unsigned int irq = *irq_p; | ||
580 | struct of_bus *bus; | ||
581 | phandle handle; | ||
582 | const u32 *reg; | ||
583 | int na, num_reg, i; | ||
584 | |||
585 | bus = of_match_bus(pp); | ||
586 | bus->count_cells(dp, &na, NULL); | ||
587 | |||
588 | reg = of_get_property(dp, "reg", &num_reg); | ||
589 | if (!reg || !num_reg) | ||
590 | return NULL; | ||
591 | |||
592 | imlen /= ((na + 3) * 4); | ||
593 | handle = 0; | ||
594 | for (i = 0; i < imlen; i++) { | ||
595 | int j; | ||
596 | |||
597 | for (j = 0; j < na; j++) { | ||
598 | if ((reg[j] & imask[j]) != imap[j]) | ||
599 | goto next; | ||
600 | } | ||
601 | if (imap[na] == irq) { | ||
602 | handle = imap[na + 1]; | ||
603 | irq = imap[na + 2]; | ||
604 | break; | ||
605 | } | ||
606 | |||
607 | next: | ||
608 | imap += (na + 3); | ||
609 | } | ||
610 | if (i == imlen) { | ||
611 | /* Psycho and Sabre PCI controllers can have 'interrupt-map' | ||
612 | * properties that do not include the on-board device | ||
613 | * interrupts. Instead, the device's 'interrupts' property | ||
614 | * is already a fully specified INO value. | ||
615 | * | ||
616 | * Handle this by deciding that, if we didn't get a | ||
617 | * match in the parent's 'interrupt-map', and the | ||
618 | * parent is an IRQ translater, then use the parent as | ||
619 | * our IRQ controller. | ||
620 | */ | ||
621 | if (pp->irq_trans) | ||
622 | return pp; | ||
623 | |||
624 | return NULL; | ||
625 | } | ||
626 | |||
627 | *irq_p = irq; | ||
628 | cp = of_find_node_by_phandle(handle); | ||
629 | |||
630 | return cp; | ||
631 | } | ||
632 | |||
633 | static unsigned int __init pci_irq_swizzle(struct device_node *dp, | ||
634 | struct device_node *pp, | ||
635 | unsigned int irq) | ||
636 | { | ||
637 | const struct linux_prom_pci_registers *regs; | ||
638 | unsigned int bus, devfn, slot, ret; | ||
639 | |||
640 | if (irq < 1 || irq > 4) | ||
641 | return irq; | ||
642 | |||
643 | regs = of_get_property(dp, "reg", NULL); | ||
644 | if (!regs) | ||
645 | return irq; | ||
646 | |||
647 | bus = (regs->phys_hi >> 16) & 0xff; | ||
648 | devfn = (regs->phys_hi >> 8) & 0xff; | ||
649 | slot = (devfn >> 3) & 0x1f; | ||
650 | |||
651 | if (pp->irq_trans) { | ||
652 | /* Derived from Table 8-3, U2P User's Manual. This branch | ||
653 | * is handling a PCI controller that lacks a proper set of | ||
654 | * interrupt-map and interrupt-map-mask properties. The | ||
655 | * Ultra-E450 is one example. | ||
656 | * | ||
657 | * The bit layout is BSSLL, where: | ||
658 | * B: 0 on bus A, 1 on bus B | ||
659 | * D: 2-bit slot number, derived from PCI device number as | ||
660 | * (dev - 1) for bus A, or (dev - 2) for bus B | ||
661 | * L: 2-bit line number | ||
662 | */ | ||
663 | if (bus & 0x80) { | ||
664 | /* PBM-A */ | ||
665 | bus = 0x00; | ||
666 | slot = (slot - 1) << 2; | ||
667 | } else { | ||
668 | /* PBM-B */ | ||
669 | bus = 0x10; | ||
670 | slot = (slot - 2) << 2; | ||
671 | } | ||
672 | irq -= 1; | ||
673 | |||
674 | ret = (bus | slot | irq); | ||
675 | } else { | ||
676 | /* Going through a PCI-PCI bridge that lacks a set of | ||
677 | * interrupt-map and interrupt-map-mask properties. | ||
678 | */ | ||
679 | ret = ((irq - 1 + (slot & 3)) & 3) + 1; | ||
680 | } | ||
681 | |||
682 | return ret; | ||
683 | } | ||
684 | |||
685 | static int of_irq_verbose; | ||
686 | |||
687 | static unsigned int __init build_one_device_irq(struct of_device *op, | ||
688 | struct device *parent, | ||
689 | unsigned int irq) | ||
690 | { | ||
691 | struct device_node *dp = op->node; | ||
692 | struct device_node *pp, *ip; | ||
693 | unsigned int orig_irq = irq; | ||
694 | int nid; | ||
695 | |||
696 | if (irq == 0xffffffff) | ||
697 | return irq; | ||
698 | |||
699 | if (dp->irq_trans) { | ||
700 | irq = dp->irq_trans->irq_build(dp, irq, | ||
701 | dp->irq_trans->data); | ||
702 | |||
703 | if (of_irq_verbose) | ||
704 | printk("%s: direct translate %x --> %x\n", | ||
705 | dp->full_name, orig_irq, irq); | ||
706 | |||
707 | goto out; | ||
708 | } | ||
709 | |||
710 | /* Something more complicated. Walk up to the root, applying | ||
711 | * interrupt-map or bus specific translations, until we hit | ||
712 | * an IRQ translator. | ||
713 | * | ||
714 | * If we hit a bus type or situation we cannot handle, we | ||
715 | * stop and assume that the original IRQ number was in a | ||
716 | * format which has special meaning to it's immediate parent. | ||
717 | */ | ||
718 | pp = dp->parent; | ||
719 | ip = NULL; | ||
720 | while (pp) { | ||
721 | const void *imap, *imsk; | ||
722 | int imlen; | ||
723 | |||
724 | imap = of_get_property(pp, "interrupt-map", &imlen); | ||
725 | imsk = of_get_property(pp, "interrupt-map-mask", NULL); | ||
726 | if (imap && imsk) { | ||
727 | struct device_node *iret; | ||
728 | int this_orig_irq = irq; | ||
729 | |||
730 | iret = apply_interrupt_map(dp, pp, | ||
731 | imap, imlen, imsk, | ||
732 | &irq); | ||
733 | |||
734 | if (of_irq_verbose) | ||
735 | printk("%s: Apply [%s:%x] imap --> [%s:%x]\n", | ||
736 | op->node->full_name, | ||
737 | pp->full_name, this_orig_irq, | ||
738 | (iret ? iret->full_name : "NULL"), irq); | ||
739 | |||
740 | if (!iret) | ||
741 | break; | ||
742 | |||
743 | if (iret->irq_trans) { | ||
744 | ip = iret; | ||
745 | break; | ||
746 | } | ||
747 | } else { | ||
748 | if (!strcmp(pp->name, "pci")) { | ||
749 | unsigned int this_orig_irq = irq; | ||
750 | |||
751 | irq = pci_irq_swizzle(dp, pp, irq); | ||
752 | if (of_irq_verbose) | ||
753 | printk("%s: PCI swizzle [%s] " | ||
754 | "%x --> %x\n", | ||
755 | op->node->full_name, | ||
756 | pp->full_name, this_orig_irq, | ||
757 | irq); | ||
758 | |||
759 | } | ||
760 | |||
761 | if (pp->irq_trans) { | ||
762 | ip = pp; | ||
763 | break; | ||
764 | } | ||
765 | } | ||
766 | dp = pp; | ||
767 | pp = pp->parent; | ||
768 | } | ||
769 | if (!ip) | ||
770 | return orig_irq; | ||
771 | |||
772 | irq = ip->irq_trans->irq_build(op->node, irq, | ||
773 | ip->irq_trans->data); | ||
774 | if (of_irq_verbose) | ||
775 | printk("%s: Apply IRQ trans [%s] %x --> %x\n", | ||
776 | op->node->full_name, ip->full_name, orig_irq, irq); | ||
777 | |||
778 | out: | ||
779 | nid = of_node_to_nid(dp); | ||
780 | if (nid != -1) { | ||
781 | cpumask_t numa_mask = node_to_cpumask(nid); | ||
782 | |||
783 | irq_set_affinity(irq, numa_mask); | ||
784 | } | ||
785 | |||
786 | return irq; | ||
787 | } | ||
788 | |||
789 | static struct of_device * __init scan_one_device(struct device_node *dp, | ||
790 | struct device *parent) | ||
791 | { | ||
792 | struct of_device *op = kzalloc(sizeof(*op), GFP_KERNEL); | ||
793 | const unsigned int *irq; | ||
794 | struct dev_archdata *sd; | ||
795 | int len, i; | ||
796 | |||
797 | if (!op) | ||
798 | return NULL; | ||
799 | |||
800 | sd = &op->dev.archdata; | ||
801 | sd->prom_node = dp; | ||
802 | sd->op = op; | ||
803 | |||
804 | op->node = dp; | ||
805 | |||
806 | op->clock_freq = of_getintprop_default(dp, "clock-frequency", | ||
807 | (25*1000*1000)); | ||
808 | op->portid = of_getintprop_default(dp, "upa-portid", -1); | ||
809 | if (op->portid == -1) | ||
810 | op->portid = of_getintprop_default(dp, "portid", -1); | ||
811 | |||
812 | irq = of_get_property(dp, "interrupts", &len); | ||
813 | if (irq) { | ||
814 | memcpy(op->irqs, irq, len); | ||
815 | op->num_irqs = len / 4; | ||
816 | } else { | ||
817 | op->num_irqs = 0; | ||
818 | } | ||
819 | |||
820 | /* Prevent overrunning the op->irqs[] array. */ | ||
821 | if (op->num_irqs > PROMINTR_MAX) { | ||
822 | printk(KERN_WARNING "%s: Too many irqs (%d), " | ||
823 | "limiting to %d.\n", | ||
824 | dp->full_name, op->num_irqs, PROMINTR_MAX); | ||
825 | op->num_irqs = PROMINTR_MAX; | ||
826 | } | ||
827 | |||
828 | build_device_resources(op, parent); | ||
829 | for (i = 0; i < op->num_irqs; i++) | ||
830 | op->irqs[i] = build_one_device_irq(op, parent, op->irqs[i]); | ||
831 | |||
832 | op->dev.parent = parent; | ||
833 | op->dev.bus = &of_platform_bus_type; | ||
834 | if (!parent) | ||
835 | dev_set_name(&op->dev, "root"); | ||
836 | else | ||
837 | dev_set_name(&op->dev, "%08x", dp->node); | ||
838 | |||
839 | if (of_device_register(op)) { | ||
840 | printk("%s: Could not register of device.\n", | ||
841 | dp->full_name); | ||
842 | kfree(op); | ||
843 | op = NULL; | ||
844 | } | ||
845 | |||
846 | return op; | ||
847 | } | ||
848 | |||
849 | static void __init scan_tree(struct device_node *dp, struct device *parent) | ||
850 | { | ||
851 | while (dp) { | ||
852 | struct of_device *op = scan_one_device(dp, parent); | ||
853 | |||
854 | if (op) | ||
855 | scan_tree(dp->child, &op->dev); | ||
856 | |||
857 | dp = dp->sibling; | ||
858 | } | ||
859 | } | ||
860 | |||
861 | static void __init scan_of_devices(void) | ||
862 | { | ||
863 | struct device_node *root = of_find_node_by_path("/"); | ||
864 | struct of_device *parent; | ||
865 | |||
866 | parent = scan_one_device(root, NULL); | ||
867 | if (!parent) | ||
868 | return; | ||
869 | |||
870 | scan_tree(root->child, &parent->dev); | ||
871 | } | ||
872 | |||
873 | static int __init of_bus_driver_init(void) | ||
874 | { | ||
875 | int err; | ||
876 | |||
877 | err = of_bus_type_init(&of_platform_bus_type, "of"); | ||
878 | if (!err) | ||
879 | scan_of_devices(); | ||
880 | |||
881 | return err; | ||
882 | } | ||
883 | |||
884 | postcore_initcall(of_bus_driver_init); | ||
885 | |||
886 | static int __init of_debug(char *str) | ||
887 | { | ||
888 | int val = 0; | ||
889 | |||
890 | get_option(&str, &val); | ||
891 | if (val & 1) | ||
892 | of_resource_verbose = 1; | ||
893 | if (val & 2) | ||
894 | of_irq_verbose = 1; | ||
895 | return 1; | ||
896 | } | ||
897 | |||
898 | __setup("of_debug=", of_debug); | ||
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c new file mode 100644 index 000000000000..bdb7c0a6d83d --- /dev/null +++ b/arch/sparc/kernel/pci.c | |||
@@ -0,0 +1,1095 @@ | |||
1 | /* pci.c: UltraSparc PCI controller support. | ||
2 | * | ||
3 | * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com) | ||
4 | * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) | ||
5 | * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz) | ||
6 | * | ||
7 | * OF tree based PCI bus probing taken from the PowerPC port | ||
8 | * with minor modifications, see there for credits. | ||
9 | */ | ||
10 | |||
11 | #include <linux/module.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/string.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/capability.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/pci.h> | ||
18 | #include <linux/msi.h> | ||
19 | #include <linux/irq.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/of.h> | ||
22 | #include <linux/of_device.h> | ||
23 | |||
24 | #include <asm/uaccess.h> | ||
25 | #include <asm/pgtable.h> | ||
26 | #include <asm/irq.h> | ||
27 | #include <asm/prom.h> | ||
28 | #include <asm/apb.h> | ||
29 | |||
30 | #include "pci_impl.h" | ||
31 | |||
32 | /* List of all PCI controllers found in the system. */ | ||
33 | struct pci_pbm_info *pci_pbm_root = NULL; | ||
34 | |||
35 | /* Each PBM found gets a unique index. */ | ||
36 | int pci_num_pbms = 0; | ||
37 | |||
38 | volatile int pci_poke_in_progress; | ||
39 | volatile int pci_poke_cpu = -1; | ||
40 | volatile int pci_poke_faulted; | ||
41 | |||
42 | static DEFINE_SPINLOCK(pci_poke_lock); | ||
43 | |||
44 | void pci_config_read8(u8 *addr, u8 *ret) | ||
45 | { | ||
46 | unsigned long flags; | ||
47 | u8 byte; | ||
48 | |||
49 | spin_lock_irqsave(&pci_poke_lock, flags); | ||
50 | pci_poke_cpu = smp_processor_id(); | ||
51 | pci_poke_in_progress = 1; | ||
52 | pci_poke_faulted = 0; | ||
53 | __asm__ __volatile__("membar #Sync\n\t" | ||
54 | "lduba [%1] %2, %0\n\t" | ||
55 | "membar #Sync" | ||
56 | : "=r" (byte) | ||
57 | : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) | ||
58 | : "memory"); | ||
59 | pci_poke_in_progress = 0; | ||
60 | pci_poke_cpu = -1; | ||
61 | if (!pci_poke_faulted) | ||
62 | *ret = byte; | ||
63 | spin_unlock_irqrestore(&pci_poke_lock, flags); | ||
64 | } | ||
65 | |||
66 | void pci_config_read16(u16 *addr, u16 *ret) | ||
67 | { | ||
68 | unsigned long flags; | ||
69 | u16 word; | ||
70 | |||
71 | spin_lock_irqsave(&pci_poke_lock, flags); | ||
72 | pci_poke_cpu = smp_processor_id(); | ||
73 | pci_poke_in_progress = 1; | ||
74 | pci_poke_faulted = 0; | ||
75 | __asm__ __volatile__("membar #Sync\n\t" | ||
76 | "lduha [%1] %2, %0\n\t" | ||
77 | "membar #Sync" | ||
78 | : "=r" (word) | ||
79 | : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) | ||
80 | : "memory"); | ||
81 | pci_poke_in_progress = 0; | ||
82 | pci_poke_cpu = -1; | ||
83 | if (!pci_poke_faulted) | ||
84 | *ret = word; | ||
85 | spin_unlock_irqrestore(&pci_poke_lock, flags); | ||
86 | } | ||
87 | |||
88 | void pci_config_read32(u32 *addr, u32 *ret) | ||
89 | { | ||
90 | unsigned long flags; | ||
91 | u32 dword; | ||
92 | |||
93 | spin_lock_irqsave(&pci_poke_lock, flags); | ||
94 | pci_poke_cpu = smp_processor_id(); | ||
95 | pci_poke_in_progress = 1; | ||
96 | pci_poke_faulted = 0; | ||
97 | __asm__ __volatile__("membar #Sync\n\t" | ||
98 | "lduwa [%1] %2, %0\n\t" | ||
99 | "membar #Sync" | ||
100 | : "=r" (dword) | ||
101 | : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) | ||
102 | : "memory"); | ||
103 | pci_poke_in_progress = 0; | ||
104 | pci_poke_cpu = -1; | ||
105 | if (!pci_poke_faulted) | ||
106 | *ret = dword; | ||
107 | spin_unlock_irqrestore(&pci_poke_lock, flags); | ||
108 | } | ||
109 | |||
110 | void pci_config_write8(u8 *addr, u8 val) | ||
111 | { | ||
112 | unsigned long flags; | ||
113 | |||
114 | spin_lock_irqsave(&pci_poke_lock, flags); | ||
115 | pci_poke_cpu = smp_processor_id(); | ||
116 | pci_poke_in_progress = 1; | ||
117 | pci_poke_faulted = 0; | ||
118 | __asm__ __volatile__("membar #Sync\n\t" | ||
119 | "stba %0, [%1] %2\n\t" | ||
120 | "membar #Sync" | ||
121 | : /* no outputs */ | ||
122 | : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) | ||
123 | : "memory"); | ||
124 | pci_poke_in_progress = 0; | ||
125 | pci_poke_cpu = -1; | ||
126 | spin_unlock_irqrestore(&pci_poke_lock, flags); | ||
127 | } | ||
128 | |||
129 | void pci_config_write16(u16 *addr, u16 val) | ||
130 | { | ||
131 | unsigned long flags; | ||
132 | |||
133 | spin_lock_irqsave(&pci_poke_lock, flags); | ||
134 | pci_poke_cpu = smp_processor_id(); | ||
135 | pci_poke_in_progress = 1; | ||
136 | pci_poke_faulted = 0; | ||
137 | __asm__ __volatile__("membar #Sync\n\t" | ||
138 | "stha %0, [%1] %2\n\t" | ||
139 | "membar #Sync" | ||
140 | : /* no outputs */ | ||
141 | : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) | ||
142 | : "memory"); | ||
143 | pci_poke_in_progress = 0; | ||
144 | pci_poke_cpu = -1; | ||
145 | spin_unlock_irqrestore(&pci_poke_lock, flags); | ||
146 | } | ||
147 | |||
148 | void pci_config_write32(u32 *addr, u32 val) | ||
149 | { | ||
150 | unsigned long flags; | ||
151 | |||
152 | spin_lock_irqsave(&pci_poke_lock, flags); | ||
153 | pci_poke_cpu = smp_processor_id(); | ||
154 | pci_poke_in_progress = 1; | ||
155 | pci_poke_faulted = 0; | ||
156 | __asm__ __volatile__("membar #Sync\n\t" | ||
157 | "stwa %0, [%1] %2\n\t" | ||
158 | "membar #Sync" | ||
159 | : /* no outputs */ | ||
160 | : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) | ||
161 | : "memory"); | ||
162 | pci_poke_in_progress = 0; | ||
163 | pci_poke_cpu = -1; | ||
164 | spin_unlock_irqrestore(&pci_poke_lock, flags); | ||
165 | } | ||
166 | |||
167 | static int ofpci_verbose; | ||
168 | |||
169 | static int __init ofpci_debug(char *str) | ||
170 | { | ||
171 | int val = 0; | ||
172 | |||
173 | get_option(&str, &val); | ||
174 | if (val) | ||
175 | ofpci_verbose = 1; | ||
176 | return 1; | ||
177 | } | ||
178 | |||
179 | __setup("ofpci_debug=", ofpci_debug); | ||
180 | |||
181 | static unsigned long pci_parse_of_flags(u32 addr0) | ||
182 | { | ||
183 | unsigned long flags = 0; | ||
184 | |||
185 | if (addr0 & 0x02000000) { | ||
186 | flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY; | ||
187 | flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64; | ||
188 | flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M; | ||
189 | if (addr0 & 0x40000000) | ||
190 | flags |= IORESOURCE_PREFETCH | ||
191 | | PCI_BASE_ADDRESS_MEM_PREFETCH; | ||
192 | } else if (addr0 & 0x01000000) | ||
193 | flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO; | ||
194 | return flags; | ||
195 | } | ||
196 | |||
197 | /* The of_device layer has translated all of the assigned-address properties | ||
198 | * into physical address resources, we only have to figure out the register | ||
199 | * mapping. | ||
200 | */ | ||
201 | static void pci_parse_of_addrs(struct of_device *op, | ||
202 | struct device_node *node, | ||
203 | struct pci_dev *dev) | ||
204 | { | ||
205 | struct resource *op_res; | ||
206 | const u32 *addrs; | ||
207 | int proplen; | ||
208 | |||
209 | addrs = of_get_property(node, "assigned-addresses", &proplen); | ||
210 | if (!addrs) | ||
211 | return; | ||
212 | if (ofpci_verbose) | ||
213 | printk(" parse addresses (%d bytes) @ %p\n", | ||
214 | proplen, addrs); | ||
215 | op_res = &op->resource[0]; | ||
216 | for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) { | ||
217 | struct resource *res; | ||
218 | unsigned long flags; | ||
219 | int i; | ||
220 | |||
221 | flags = pci_parse_of_flags(addrs[0]); | ||
222 | if (!flags) | ||
223 | continue; | ||
224 | i = addrs[0] & 0xff; | ||
225 | if (ofpci_verbose) | ||
226 | printk(" start: %lx, end: %lx, i: %x\n", | ||
227 | op_res->start, op_res->end, i); | ||
228 | |||
229 | if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { | ||
230 | res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; | ||
231 | } else if (i == dev->rom_base_reg) { | ||
232 | res = &dev->resource[PCI_ROM_RESOURCE]; | ||
233 | flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE; | ||
234 | } else { | ||
235 | printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); | ||
236 | continue; | ||
237 | } | ||
238 | res->start = op_res->start; | ||
239 | res->end = op_res->end; | ||
240 | res->flags = flags; | ||
241 | res->name = pci_name(dev); | ||
242 | } | ||
243 | } | ||
244 | |||
245 | static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, | ||
246 | struct device_node *node, | ||
247 | struct pci_bus *bus, int devfn) | ||
248 | { | ||
249 | struct dev_archdata *sd; | ||
250 | struct of_device *op; | ||
251 | struct pci_dev *dev; | ||
252 | const char *type; | ||
253 | u32 class; | ||
254 | |||
255 | dev = alloc_pci_dev(); | ||
256 | if (!dev) | ||
257 | return NULL; | ||
258 | |||
259 | sd = &dev->dev.archdata; | ||
260 | sd->iommu = pbm->iommu; | ||
261 | sd->stc = &pbm->stc; | ||
262 | sd->host_controller = pbm; | ||
263 | sd->prom_node = node; | ||
264 | sd->op = op = of_find_device_by_node(node); | ||
265 | sd->numa_node = pbm->numa_node; | ||
266 | |||
267 | sd = &op->dev.archdata; | ||
268 | sd->iommu = pbm->iommu; | ||
269 | sd->stc = &pbm->stc; | ||
270 | sd->numa_node = pbm->numa_node; | ||
271 | |||
272 | if (!strcmp(node->name, "ebus")) | ||
273 | of_propagate_archdata(op); | ||
274 | |||
275 | type = of_get_property(node, "device_type", NULL); | ||
276 | if (type == NULL) | ||
277 | type = ""; | ||
278 | |||
279 | if (ofpci_verbose) | ||
280 | printk(" create device, devfn: %x, type: %s\n", | ||
281 | devfn, type); | ||
282 | |||
283 | dev->bus = bus; | ||
284 | dev->sysdata = node; | ||
285 | dev->dev.parent = bus->bridge; | ||
286 | dev->dev.bus = &pci_bus_type; | ||
287 | dev->devfn = devfn; | ||
288 | dev->multifunction = 0; /* maybe a lie? */ | ||
289 | |||
290 | dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff); | ||
291 | dev->device = of_getintprop_default(node, "device-id", 0xffff); | ||
292 | dev->subsystem_vendor = | ||
293 | of_getintprop_default(node, "subsystem-vendor-id", 0); | ||
294 | dev->subsystem_device = | ||
295 | of_getintprop_default(node, "subsystem-id", 0); | ||
296 | |||
297 | dev->cfg_size = pci_cfg_space_size(dev); | ||
298 | |||
299 | /* We can't actually use the firmware value, we have | ||
300 | * to read what is in the register right now. One | ||
301 | * reason is that in the case of IDE interfaces the | ||
302 | * firmware can sample the value before the the IDE | ||
303 | * interface is programmed into native mode. | ||
304 | */ | ||
305 | pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); | ||
306 | dev->class = class >> 8; | ||
307 | dev->revision = class & 0xff; | ||
308 | |||
309 | dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(bus), | ||
310 | dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); | ||
311 | |||
312 | if (ofpci_verbose) | ||
313 | printk(" class: 0x%x device name: %s\n", | ||
314 | dev->class, pci_name(dev)); | ||
315 | |||
316 | /* I have seen IDE devices which will not respond to | ||
317 | * the bmdma simplex check reads if bus mastering is | ||
318 | * disabled. | ||
319 | */ | ||
320 | if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) | ||
321 | pci_set_master(dev); | ||
322 | |||
323 | dev->current_state = 4; /* unknown power state */ | ||
324 | dev->error_state = pci_channel_io_normal; | ||
325 | |||
326 | if (!strcmp(node->name, "pci")) { | ||
327 | /* a PCI-PCI bridge */ | ||
328 | dev->hdr_type = PCI_HEADER_TYPE_BRIDGE; | ||
329 | dev->rom_base_reg = PCI_ROM_ADDRESS1; | ||
330 | } else if (!strcmp(type, "cardbus")) { | ||
331 | dev->hdr_type = PCI_HEADER_TYPE_CARDBUS; | ||
332 | } else { | ||
333 | dev->hdr_type = PCI_HEADER_TYPE_NORMAL; | ||
334 | dev->rom_base_reg = PCI_ROM_ADDRESS; | ||
335 | |||
336 | dev->irq = sd->op->irqs[0]; | ||
337 | if (dev->irq == 0xffffffff) | ||
338 | dev->irq = PCI_IRQ_NONE; | ||
339 | } | ||
340 | |||
341 | pci_parse_of_addrs(sd->op, node, dev); | ||
342 | |||
343 | if (ofpci_verbose) | ||
344 | printk(" adding to system ...\n"); | ||
345 | |||
346 | pci_device_add(dev, bus); | ||
347 | |||
348 | return dev; | ||
349 | } | ||
350 | |||
351 | static void __devinit apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p) | ||
352 | { | ||
353 | u32 idx, first, last; | ||
354 | |||
355 | first = 8; | ||
356 | last = 0; | ||
357 | for (idx = 0; idx < 8; idx++) { | ||
358 | if ((map & (1 << idx)) != 0) { | ||
359 | if (first > idx) | ||
360 | first = idx; | ||
361 | if (last < idx) | ||
362 | last = idx; | ||
363 | } | ||
364 | } | ||
365 | |||
366 | *first_p = first; | ||
367 | *last_p = last; | ||
368 | } | ||
369 | |||
370 | static void pci_resource_adjust(struct resource *res, | ||
371 | struct resource *root) | ||
372 | { | ||
373 | res->start += root->start; | ||
374 | res->end += root->start; | ||
375 | } | ||
376 | |||
377 | /* For PCI bus devices which lack a 'ranges' property we interrogate | ||
378 | * the config space values to set the resources, just like the generic | ||
379 | * Linux PCI probing code does. | ||
380 | */ | ||
381 | static void __devinit pci_cfg_fake_ranges(struct pci_dev *dev, | ||
382 | struct pci_bus *bus, | ||
383 | struct pci_pbm_info *pbm) | ||
384 | { | ||
385 | struct resource *res; | ||
386 | u8 io_base_lo, io_limit_lo; | ||
387 | u16 mem_base_lo, mem_limit_lo; | ||
388 | unsigned long base, limit; | ||
389 | |||
390 | pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); | ||
391 | pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); | ||
392 | base = (io_base_lo & PCI_IO_RANGE_MASK) << 8; | ||
393 | limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8; | ||
394 | |||
395 | if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) { | ||
396 | u16 io_base_hi, io_limit_hi; | ||
397 | |||
398 | pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi); | ||
399 | pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi); | ||
400 | base |= (io_base_hi << 16); | ||
401 | limit |= (io_limit_hi << 16); | ||
402 | } | ||
403 | |||
404 | res = bus->resource[0]; | ||
405 | if (base <= limit) { | ||
406 | res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; | ||
407 | if (!res->start) | ||
408 | res->start = base; | ||
409 | if (!res->end) | ||
410 | res->end = limit + 0xfff; | ||
411 | pci_resource_adjust(res, &pbm->io_space); | ||
412 | } | ||
413 | |||
414 | pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); | ||
415 | pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); | ||
416 | base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; | ||
417 | limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16; | ||
418 | |||
419 | res = bus->resource[1]; | ||
420 | if (base <= limit) { | ||
421 | res->flags = ((mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | | ||
422 | IORESOURCE_MEM); | ||
423 | res->start = base; | ||
424 | res->end = limit + 0xfffff; | ||
425 | pci_resource_adjust(res, &pbm->mem_space); | ||
426 | } | ||
427 | |||
428 | pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); | ||
429 | pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); | ||
430 | base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16; | ||
431 | limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; | ||
432 | |||
433 | if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { | ||
434 | u32 mem_base_hi, mem_limit_hi; | ||
435 | |||
436 | pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi); | ||
437 | pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi); | ||
438 | |||
439 | /* | ||
440 | * Some bridges set the base > limit by default, and some | ||
441 | * (broken) BIOSes do not initialize them. If we find | ||
442 | * this, just assume they are not being used. | ||
443 | */ | ||
444 | if (mem_base_hi <= mem_limit_hi) { | ||
445 | base |= ((long) mem_base_hi) << 32; | ||
446 | limit |= ((long) mem_limit_hi) << 32; | ||
447 | } | ||
448 | } | ||
449 | |||
450 | res = bus->resource[2]; | ||
451 | if (base <= limit) { | ||
452 | res->flags = ((mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | | ||
453 | IORESOURCE_MEM | IORESOURCE_PREFETCH); | ||
454 | res->start = base; | ||
455 | res->end = limit + 0xfffff; | ||
456 | pci_resource_adjust(res, &pbm->mem_space); | ||
457 | } | ||
458 | } | ||
459 | |||
460 | /* Cook up fake bus resources for SUNW,simba PCI bridges which lack | ||
461 | * a proper 'ranges' property. | ||
462 | */ | ||
463 | static void __devinit apb_fake_ranges(struct pci_dev *dev, | ||
464 | struct pci_bus *bus, | ||
465 | struct pci_pbm_info *pbm) | ||
466 | { | ||
467 | struct resource *res; | ||
468 | u32 first, last; | ||
469 | u8 map; | ||
470 | |||
471 | pci_read_config_byte(dev, APB_IO_ADDRESS_MAP, &map); | ||
472 | apb_calc_first_last(map, &first, &last); | ||
473 | res = bus->resource[0]; | ||
474 | res->start = (first << 21); | ||
475 | res->end = (last << 21) + ((1 << 21) - 1); | ||
476 | res->flags = IORESOURCE_IO; | ||
477 | pci_resource_adjust(res, &pbm->io_space); | ||
478 | |||
479 | pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map); | ||
480 | apb_calc_first_last(map, &first, &last); | ||
481 | res = bus->resource[1]; | ||
482 | res->start = (first << 21); | ||
483 | res->end = (last << 21) + ((1 << 21) - 1); | ||
484 | res->flags = IORESOURCE_MEM; | ||
485 | pci_resource_adjust(res, &pbm->mem_space); | ||
486 | } | ||
487 | |||
488 | static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm, | ||
489 | struct device_node *node, | ||
490 | struct pci_bus *bus); | ||
491 | |||
492 | #define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1]) | ||
493 | |||
494 | static void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm, | ||
495 | struct device_node *node, | ||
496 | struct pci_dev *dev) | ||
497 | { | ||
498 | struct pci_bus *bus; | ||
499 | const u32 *busrange, *ranges; | ||
500 | int len, i, simba; | ||
501 | struct resource *res; | ||
502 | unsigned int flags; | ||
503 | u64 size; | ||
504 | |||
505 | if (ofpci_verbose) | ||
506 | printk("of_scan_pci_bridge(%s)\n", node->full_name); | ||
507 | |||
508 | /* parse bus-range property */ | ||
509 | busrange = of_get_property(node, "bus-range", &len); | ||
510 | if (busrange == NULL || len != 8) { | ||
511 | printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n", | ||
512 | node->full_name); | ||
513 | return; | ||
514 | } | ||
515 | ranges = of_get_property(node, "ranges", &len); | ||
516 | simba = 0; | ||
517 | if (ranges == NULL) { | ||
518 | const char *model = of_get_property(node, "model", NULL); | ||
519 | if (model && !strcmp(model, "SUNW,simba")) | ||
520 | simba = 1; | ||
521 | } | ||
522 | |||
523 | bus = pci_add_new_bus(dev->bus, dev, busrange[0]); | ||
524 | if (!bus) { | ||
525 | printk(KERN_ERR "Failed to create pci bus for %s\n", | ||
526 | node->full_name); | ||
527 | return; | ||
528 | } | ||
529 | |||
530 | bus->primary = dev->bus->number; | ||
531 | bus->subordinate = busrange[1]; | ||
532 | bus->bridge_ctl = 0; | ||
533 | |||
534 | /* parse ranges property, or cook one up by hand for Simba */ | ||
535 | /* PCI #address-cells == 3 and #size-cells == 2 always */ | ||
536 | res = &dev->resource[PCI_BRIDGE_RESOURCES]; | ||
537 | for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) { | ||
538 | res->flags = 0; | ||
539 | bus->resource[i] = res; | ||
540 | ++res; | ||
541 | } | ||
542 | if (simba) { | ||
543 | apb_fake_ranges(dev, bus, pbm); | ||
544 | goto after_ranges; | ||
545 | } else if (ranges == NULL) { | ||
546 | pci_cfg_fake_ranges(dev, bus, pbm); | ||
547 | goto after_ranges; | ||
548 | } | ||
549 | i = 1; | ||
550 | for (; len >= 32; len -= 32, ranges += 8) { | ||
551 | struct resource *root; | ||
552 | |||
553 | flags = pci_parse_of_flags(ranges[0]); | ||
554 | size = GET_64BIT(ranges, 6); | ||
555 | if (flags == 0 || size == 0) | ||
556 | continue; | ||
557 | if (flags & IORESOURCE_IO) { | ||
558 | res = bus->resource[0]; | ||
559 | if (res->flags) { | ||
560 | printk(KERN_ERR "PCI: ignoring extra I/O range" | ||
561 | " for bridge %s\n", node->full_name); | ||
562 | continue; | ||
563 | } | ||
564 | root = &pbm->io_space; | ||
565 | } else { | ||
566 | if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) { | ||
567 | printk(KERN_ERR "PCI: too many memory ranges" | ||
568 | " for bridge %s\n", node->full_name); | ||
569 | continue; | ||
570 | } | ||
571 | res = bus->resource[i]; | ||
572 | ++i; | ||
573 | root = &pbm->mem_space; | ||
574 | } | ||
575 | |||
576 | res->start = GET_64BIT(ranges, 1); | ||
577 | res->end = res->start + size - 1; | ||
578 | res->flags = flags; | ||
579 | |||
580 | /* Another way to implement this would be to add an of_device | ||
581 | * layer routine that can calculate a resource for a given | ||
582 | * range property value in a PCI device. | ||
583 | */ | ||
584 | pci_resource_adjust(res, root); | ||
585 | } | ||
586 | after_ranges: | ||
587 | sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), | ||
588 | bus->number); | ||
589 | if (ofpci_verbose) | ||
590 | printk(" bus name: %s\n", bus->name); | ||
591 | |||
592 | pci_of_scan_bus(pbm, node, bus); | ||
593 | } | ||
594 | |||
595 | static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm, | ||
596 | struct device_node *node, | ||
597 | struct pci_bus *bus) | ||
598 | { | ||
599 | struct device_node *child; | ||
600 | const u32 *reg; | ||
601 | int reglen, devfn, prev_devfn; | ||
602 | struct pci_dev *dev; | ||
603 | |||
604 | if (ofpci_verbose) | ||
605 | printk("PCI: scan_bus[%s] bus no %d\n", | ||
606 | node->full_name, bus->number); | ||
607 | |||
608 | child = NULL; | ||
609 | prev_devfn = -1; | ||
610 | while ((child = of_get_next_child(node, child)) != NULL) { | ||
611 | if (ofpci_verbose) | ||
612 | printk(" * %s\n", child->full_name); | ||
613 | reg = of_get_property(child, "reg", ®len); | ||
614 | if (reg == NULL || reglen < 20) | ||
615 | continue; | ||
616 | |||
617 | devfn = (reg[0] >> 8) & 0xff; | ||
618 | |||
619 | /* This is a workaround for some device trees | ||
620 | * which list PCI devices twice. On the V100 | ||
621 | * for example, device number 3 is listed twice. | ||
622 | * Once as "pm" and once again as "lomp". | ||
623 | */ | ||
624 | if (devfn == prev_devfn) | ||
625 | continue; | ||
626 | prev_devfn = devfn; | ||
627 | |||
628 | /* create a new pci_dev for this device */ | ||
629 | dev = of_create_pci_dev(pbm, child, bus, devfn); | ||
630 | if (!dev) | ||
631 | continue; | ||
632 | if (ofpci_verbose) | ||
633 | printk("PCI: dev header type: %x\n", | ||
634 | dev->hdr_type); | ||
635 | |||
636 | if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || | ||
637 | dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) | ||
638 | of_scan_pci_bridge(pbm, child, dev); | ||
639 | } | ||
640 | } | ||
641 | |||
642 | static ssize_t | ||
643 | show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char * buf) | ||
644 | { | ||
645 | struct pci_dev *pdev; | ||
646 | struct device_node *dp; | ||
647 | |||
648 | pdev = to_pci_dev(dev); | ||
649 | dp = pdev->dev.archdata.prom_node; | ||
650 | |||
651 | return snprintf (buf, PAGE_SIZE, "%s\n", dp->full_name); | ||
652 | } | ||
653 | |||
654 | static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL); | ||
655 | |||
656 | static void __devinit pci_bus_register_of_sysfs(struct pci_bus *bus) | ||
657 | { | ||
658 | struct pci_dev *dev; | ||
659 | struct pci_bus *child_bus; | ||
660 | int err; | ||
661 | |||
662 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
663 | /* we don't really care if we can create this file or | ||
664 | * not, but we need to assign the result of the call | ||
665 | * or the world will fall under alien invasion and | ||
666 | * everybody will be frozen on a spaceship ready to be | ||
667 | * eaten on alpha centauri by some green and jelly | ||
668 | * humanoid. | ||
669 | */ | ||
670 | err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr); | ||
671 | } | ||
672 | list_for_each_entry(child_bus, &bus->children, node) | ||
673 | pci_bus_register_of_sysfs(child_bus); | ||
674 | } | ||
675 | |||
676 | struct pci_bus * __devinit pci_scan_one_pbm(struct pci_pbm_info *pbm, | ||
677 | struct device *parent) | ||
678 | { | ||
679 | struct device_node *node = pbm->op->node; | ||
680 | struct pci_bus *bus; | ||
681 | |||
682 | printk("PCI: Scanning PBM %s\n", node->full_name); | ||
683 | |||
684 | bus = pci_create_bus(parent, pbm->pci_first_busno, pbm->pci_ops, pbm); | ||
685 | if (!bus) { | ||
686 | printk(KERN_ERR "Failed to create bus for %s\n", | ||
687 | node->full_name); | ||
688 | return NULL; | ||
689 | } | ||
690 | bus->secondary = pbm->pci_first_busno; | ||
691 | bus->subordinate = pbm->pci_last_busno; | ||
692 | |||
693 | bus->resource[0] = &pbm->io_space; | ||
694 | bus->resource[1] = &pbm->mem_space; | ||
695 | |||
696 | pci_of_scan_bus(pbm, node, bus); | ||
697 | pci_bus_add_devices(bus); | ||
698 | pci_bus_register_of_sysfs(bus); | ||
699 | |||
700 | return bus; | ||
701 | } | ||
702 | |||
703 | void __devinit pcibios_fixup_bus(struct pci_bus *pbus) | ||
704 | { | ||
705 | struct pci_pbm_info *pbm = pbus->sysdata; | ||
706 | |||
707 | /* Generic PCI bus probing sets these to point at | ||
708 | * &io{port,mem}_resouce which is wrong for us. | ||
709 | */ | ||
710 | pbus->resource[0] = &pbm->io_space; | ||
711 | pbus->resource[1] = &pbm->mem_space; | ||
712 | } | ||
713 | |||
714 | struct resource *pcibios_select_root(struct pci_dev *pdev, struct resource *r) | ||
715 | { | ||
716 | struct pci_pbm_info *pbm = pdev->bus->sysdata; | ||
717 | struct resource *root = NULL; | ||
718 | |||
719 | if (r->flags & IORESOURCE_IO) | ||
720 | root = &pbm->io_space; | ||
721 | if (r->flags & IORESOURCE_MEM) | ||
722 | root = &pbm->mem_space; | ||
723 | |||
724 | return root; | ||
725 | } | ||
726 | |||
727 | void pcibios_update_irq(struct pci_dev *pdev, int irq) | ||
728 | { | ||
729 | } | ||
730 | |||
731 | void pcibios_align_resource(void *data, struct resource *res, | ||
732 | resource_size_t size, resource_size_t align) | ||
733 | { | ||
734 | } | ||
735 | |||
736 | int pcibios_enable_device(struct pci_dev *dev, int mask) | ||
737 | { | ||
738 | u16 cmd, oldcmd; | ||
739 | int i; | ||
740 | |||
741 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | ||
742 | oldcmd = cmd; | ||
743 | |||
744 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | ||
745 | struct resource *res = &dev->resource[i]; | ||
746 | |||
747 | /* Only set up the requested stuff */ | ||
748 | if (!(mask & (1<<i))) | ||
749 | continue; | ||
750 | |||
751 | if (res->flags & IORESOURCE_IO) | ||
752 | cmd |= PCI_COMMAND_IO; | ||
753 | if (res->flags & IORESOURCE_MEM) | ||
754 | cmd |= PCI_COMMAND_MEMORY; | ||
755 | } | ||
756 | |||
757 | if (cmd != oldcmd) { | ||
758 | printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n", | ||
759 | pci_name(dev), cmd); | ||
760 | /* Enable the appropriate bits in the PCI command register. */ | ||
761 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
762 | } | ||
763 | return 0; | ||
764 | } | ||
765 | |||
766 | void pcibios_resource_to_bus(struct pci_dev *pdev, struct pci_bus_region *region, | ||
767 | struct resource *res) | ||
768 | { | ||
769 | struct pci_pbm_info *pbm = pdev->bus->sysdata; | ||
770 | struct resource zero_res, *root; | ||
771 | |||
772 | zero_res.start = 0; | ||
773 | zero_res.end = 0; | ||
774 | zero_res.flags = res->flags; | ||
775 | |||
776 | if (res->flags & IORESOURCE_IO) | ||
777 | root = &pbm->io_space; | ||
778 | else | ||
779 | root = &pbm->mem_space; | ||
780 | |||
781 | pci_resource_adjust(&zero_res, root); | ||
782 | |||
783 | region->start = res->start - zero_res.start; | ||
784 | region->end = res->end - zero_res.start; | ||
785 | } | ||
786 | EXPORT_SYMBOL(pcibios_resource_to_bus); | ||
787 | |||
788 | void pcibios_bus_to_resource(struct pci_dev *pdev, struct resource *res, | ||
789 | struct pci_bus_region *region) | ||
790 | { | ||
791 | struct pci_pbm_info *pbm = pdev->bus->sysdata; | ||
792 | struct resource *root; | ||
793 | |||
794 | res->start = region->start; | ||
795 | res->end = region->end; | ||
796 | |||
797 | if (res->flags & IORESOURCE_IO) | ||
798 | root = &pbm->io_space; | ||
799 | else | ||
800 | root = &pbm->mem_space; | ||
801 | |||
802 | pci_resource_adjust(res, root); | ||
803 | } | ||
804 | EXPORT_SYMBOL(pcibios_bus_to_resource); | ||
805 | |||
806 | char * __devinit pcibios_setup(char *str) | ||
807 | { | ||
808 | return str; | ||
809 | } | ||
810 | |||
811 | /* Platform support for /proc/bus/pci/X/Y mmap()s. */ | ||
812 | |||
813 | /* If the user uses a host-bridge as the PCI device, he may use | ||
814 | * this to perform a raw mmap() of the I/O or MEM space behind | ||
815 | * that controller. | ||
816 | * | ||
817 | * This can be useful for execution of x86 PCI bios initialization code | ||
818 | * on a PCI card, like the xfree86 int10 stuff does. | ||
819 | */ | ||
820 | static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma, | ||
821 | enum pci_mmap_state mmap_state) | ||
822 | { | ||
823 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; | ||
824 | unsigned long space_size, user_offset, user_size; | ||
825 | |||
826 | if (mmap_state == pci_mmap_io) { | ||
827 | space_size = (pbm->io_space.end - | ||
828 | pbm->io_space.start) + 1; | ||
829 | } else { | ||
830 | space_size = (pbm->mem_space.end - | ||
831 | pbm->mem_space.start) + 1; | ||
832 | } | ||
833 | |||
834 | /* Make sure the request is in range. */ | ||
835 | user_offset = vma->vm_pgoff << PAGE_SHIFT; | ||
836 | user_size = vma->vm_end - vma->vm_start; | ||
837 | |||
838 | if (user_offset >= space_size || | ||
839 | (user_offset + user_size) > space_size) | ||
840 | return -EINVAL; | ||
841 | |||
842 | if (mmap_state == pci_mmap_io) { | ||
843 | vma->vm_pgoff = (pbm->io_space.start + | ||
844 | user_offset) >> PAGE_SHIFT; | ||
845 | } else { | ||
846 | vma->vm_pgoff = (pbm->mem_space.start + | ||
847 | user_offset) >> PAGE_SHIFT; | ||
848 | } | ||
849 | |||
850 | return 0; | ||
851 | } | ||
852 | |||
853 | /* Adjust vm_pgoff of VMA such that it is the physical page offset | ||
854 | * corresponding to the 32-bit pci bus offset for DEV requested by the user. | ||
855 | * | ||
856 | * Basically, the user finds the base address for his device which he wishes | ||
857 | * to mmap. They read the 32-bit value from the config space base register, | ||
858 | * add whatever PAGE_SIZE multiple offset they wish, and feed this into the | ||
859 | * offset parameter of mmap on /proc/bus/pci/XXX for that device. | ||
860 | * | ||
861 | * Returns negative error code on failure, zero on success. | ||
862 | */ | ||
863 | static int __pci_mmap_make_offset(struct pci_dev *pdev, | ||
864 | struct vm_area_struct *vma, | ||
865 | enum pci_mmap_state mmap_state) | ||
866 | { | ||
867 | unsigned long user_paddr, user_size; | ||
868 | int i, err; | ||
869 | |||
870 | /* First compute the physical address in vma->vm_pgoff, | ||
871 | * making sure the user offset is within range in the | ||
872 | * appropriate PCI space. | ||
873 | */ | ||
874 | err = __pci_mmap_make_offset_bus(pdev, vma, mmap_state); | ||
875 | if (err) | ||
876 | return err; | ||
877 | |||
878 | /* If this is a mapping on a host bridge, any address | ||
879 | * is OK. | ||
880 | */ | ||
881 | if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST) | ||
882 | return err; | ||
883 | |||
884 | /* Otherwise make sure it's in the range for one of the | ||
885 | * device's resources. | ||
886 | */ | ||
887 | user_paddr = vma->vm_pgoff << PAGE_SHIFT; | ||
888 | user_size = vma->vm_end - vma->vm_start; | ||
889 | |||
890 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { | ||
891 | struct resource *rp = &pdev->resource[i]; | ||
892 | resource_size_t aligned_end; | ||
893 | |||
894 | /* Active? */ | ||
895 | if (!rp->flags) | ||
896 | continue; | ||
897 | |||
898 | /* Same type? */ | ||
899 | if (i == PCI_ROM_RESOURCE) { | ||
900 | if (mmap_state != pci_mmap_mem) | ||
901 | continue; | ||
902 | } else { | ||
903 | if ((mmap_state == pci_mmap_io && | ||
904 | (rp->flags & IORESOURCE_IO) == 0) || | ||
905 | (mmap_state == pci_mmap_mem && | ||
906 | (rp->flags & IORESOURCE_MEM) == 0)) | ||
907 | continue; | ||
908 | } | ||
909 | |||
910 | /* Align the resource end to the next page address. | ||
911 | * PAGE_SIZE intentionally added instead of (PAGE_SIZE - 1), | ||
912 | * because actually we need the address of the next byte | ||
913 | * after rp->end. | ||
914 | */ | ||
915 | aligned_end = (rp->end + PAGE_SIZE) & PAGE_MASK; | ||
916 | |||
917 | if ((rp->start <= user_paddr) && | ||
918 | (user_paddr + user_size) <= aligned_end) | ||
919 | break; | ||
920 | } | ||
921 | |||
922 | if (i > PCI_ROM_RESOURCE) | ||
923 | return -EINVAL; | ||
924 | |||
925 | return 0; | ||
926 | } | ||
927 | |||
928 | /* Set vm_flags of VMA, as appropriate for this architecture, for a pci device | ||
929 | * mapping. | ||
930 | */ | ||
931 | static void __pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma, | ||
932 | enum pci_mmap_state mmap_state) | ||
933 | { | ||
934 | vma->vm_flags |= (VM_IO | VM_RESERVED); | ||
935 | } | ||
936 | |||
937 | /* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci | ||
938 | * device mapping. | ||
939 | */ | ||
940 | static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma, | ||
941 | enum pci_mmap_state mmap_state) | ||
942 | { | ||
943 | /* Our io_remap_pfn_range takes care of this, do nothing. */ | ||
944 | } | ||
945 | |||
946 | /* Perform the actual remap of the pages for a PCI device mapping, as appropriate | ||
947 | * for this architecture. The region in the process to map is described by vm_start | ||
948 | * and vm_end members of VMA, the base physical address is found in vm_pgoff. | ||
949 | * The pci device structure is provided so that architectures may make mapping | ||
950 | * decisions on a per-device or per-bus basis. | ||
951 | * | ||
952 | * Returns a negative error code on failure, zero on success. | ||
953 | */ | ||
954 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | ||
955 | enum pci_mmap_state mmap_state, | ||
956 | int write_combine) | ||
957 | { | ||
958 | int ret; | ||
959 | |||
960 | ret = __pci_mmap_make_offset(dev, vma, mmap_state); | ||
961 | if (ret < 0) | ||
962 | return ret; | ||
963 | |||
964 | __pci_mmap_set_flags(dev, vma, mmap_state); | ||
965 | __pci_mmap_set_pgprot(dev, vma, mmap_state); | ||
966 | |||
967 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
968 | ret = io_remap_pfn_range(vma, vma->vm_start, | ||
969 | vma->vm_pgoff, | ||
970 | vma->vm_end - vma->vm_start, | ||
971 | vma->vm_page_prot); | ||
972 | if (ret) | ||
973 | return ret; | ||
974 | |||
975 | return 0; | ||
976 | } | ||
977 | |||
978 | #ifdef CONFIG_NUMA | ||
979 | int pcibus_to_node(struct pci_bus *pbus) | ||
980 | { | ||
981 | struct pci_pbm_info *pbm = pbus->sysdata; | ||
982 | |||
983 | return pbm->numa_node; | ||
984 | } | ||
985 | EXPORT_SYMBOL(pcibus_to_node); | ||
986 | #endif | ||
987 | |||
988 | /* Return the domain number for this pci bus */ | ||
989 | |||
990 | int pci_domain_nr(struct pci_bus *pbus) | ||
991 | { | ||
992 | struct pci_pbm_info *pbm = pbus->sysdata; | ||
993 | int ret; | ||
994 | |||
995 | if (!pbm) { | ||
996 | ret = -ENXIO; | ||
997 | } else { | ||
998 | ret = pbm->index; | ||
999 | } | ||
1000 | |||
1001 | return ret; | ||
1002 | } | ||
1003 | EXPORT_SYMBOL(pci_domain_nr); | ||
1004 | |||
1005 | #ifdef CONFIG_PCI_MSI | ||
1006 | int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) | ||
1007 | { | ||
1008 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; | ||
1009 | unsigned int virt_irq; | ||
1010 | |||
1011 | if (!pbm->setup_msi_irq) | ||
1012 | return -EINVAL; | ||
1013 | |||
1014 | return pbm->setup_msi_irq(&virt_irq, pdev, desc); | ||
1015 | } | ||
1016 | |||
1017 | void arch_teardown_msi_irq(unsigned int virt_irq) | ||
1018 | { | ||
1019 | struct msi_desc *entry = get_irq_msi(virt_irq); | ||
1020 | struct pci_dev *pdev = entry->dev; | ||
1021 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; | ||
1022 | |||
1023 | if (pbm->teardown_msi_irq) | ||
1024 | pbm->teardown_msi_irq(virt_irq, pdev); | ||
1025 | } | ||
1026 | #endif /* !(CONFIG_PCI_MSI) */ | ||
1027 | |||
1028 | struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) | ||
1029 | { | ||
1030 | return pdev->dev.archdata.prom_node; | ||
1031 | } | ||
1032 | EXPORT_SYMBOL(pci_device_to_OF_node); | ||
1033 | |||
1034 | static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit) | ||
1035 | { | ||
1036 | struct pci_dev *ali_isa_bridge; | ||
1037 | u8 val; | ||
1038 | |||
1039 | /* ALI sound chips generate 31-bits of DMA, a special register | ||
1040 | * determines what bit 31 is emitted as. | ||
1041 | */ | ||
1042 | ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, | ||
1043 | PCI_DEVICE_ID_AL_M1533, | ||
1044 | NULL); | ||
1045 | |||
1046 | pci_read_config_byte(ali_isa_bridge, 0x7e, &val); | ||
1047 | if (set_bit) | ||
1048 | val |= 0x01; | ||
1049 | else | ||
1050 | val &= ~0x01; | ||
1051 | pci_write_config_byte(ali_isa_bridge, 0x7e, val); | ||
1052 | pci_dev_put(ali_isa_bridge); | ||
1053 | } | ||
1054 | |||
1055 | int pci_dma_supported(struct pci_dev *pdev, u64 device_mask) | ||
1056 | { | ||
1057 | u64 dma_addr_mask; | ||
1058 | |||
1059 | if (pdev == NULL) { | ||
1060 | dma_addr_mask = 0xffffffff; | ||
1061 | } else { | ||
1062 | struct iommu *iommu = pdev->dev.archdata.iommu; | ||
1063 | |||
1064 | dma_addr_mask = iommu->dma_addr_mask; | ||
1065 | |||
1066 | if (pdev->vendor == PCI_VENDOR_ID_AL && | ||
1067 | pdev->device == PCI_DEVICE_ID_AL_M5451 && | ||
1068 | device_mask == 0x7fffffff) { | ||
1069 | ali_sound_dma_hack(pdev, | ||
1070 | (dma_addr_mask & 0x80000000) != 0); | ||
1071 | return 1; | ||
1072 | } | ||
1073 | } | ||
1074 | |||
1075 | if (device_mask >= (1UL << 32UL)) | ||
1076 | return 0; | ||
1077 | |||
1078 | return (device_mask & dma_addr_mask) == dma_addr_mask; | ||
1079 | } | ||
1080 | |||
1081 | void pci_resource_to_user(const struct pci_dev *pdev, int bar, | ||
1082 | const struct resource *rp, resource_size_t *start, | ||
1083 | resource_size_t *end) | ||
1084 | { | ||
1085 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; | ||
1086 | unsigned long offset; | ||
1087 | |||
1088 | if (rp->flags & IORESOURCE_IO) | ||
1089 | offset = pbm->io_space.start; | ||
1090 | else | ||
1091 | offset = pbm->mem_space.start; | ||
1092 | |||
1093 | *start = rp->start - offset; | ||
1094 | *end = rp->end - offset; | ||
1095 | } | ||
diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c new file mode 100644 index 000000000000..23b88082d0b2 --- /dev/null +++ b/arch/sparc/kernel/pci_common.c | |||
@@ -0,0 +1,545 @@ | |||
1 | /* pci_common.c: PCI controller common support. | ||
2 | * | ||
3 | * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #include <linux/string.h> | ||
7 | #include <linux/slab.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/pci.h> | ||
10 | #include <linux/device.h> | ||
11 | #include <linux/of_device.h> | ||
12 | |||
13 | #include <asm/prom.h> | ||
14 | #include <asm/oplib.h> | ||
15 | |||
16 | #include "pci_impl.h" | ||
17 | #include "pci_sun4v.h" | ||
18 | |||
19 | static int config_out_of_range(struct pci_pbm_info *pbm, | ||
20 | unsigned long bus, | ||
21 | unsigned long devfn, | ||
22 | unsigned long reg) | ||
23 | { | ||
24 | if (bus < pbm->pci_first_busno || | ||
25 | bus > pbm->pci_last_busno) | ||
26 | return 1; | ||
27 | return 0; | ||
28 | } | ||
29 | |||
30 | static void *sun4u_config_mkaddr(struct pci_pbm_info *pbm, | ||
31 | unsigned long bus, | ||
32 | unsigned long devfn, | ||
33 | unsigned long reg) | ||
34 | { | ||
35 | unsigned long rbits = pbm->config_space_reg_bits; | ||
36 | |||
37 | if (config_out_of_range(pbm, bus, devfn, reg)) | ||
38 | return NULL; | ||
39 | |||
40 | reg = (reg & ((1 << rbits) - 1)); | ||
41 | devfn <<= rbits; | ||
42 | bus <<= rbits + 8; | ||
43 | |||
44 | return (void *) (pbm->config_space | bus | devfn | reg); | ||
45 | } | ||
46 | |||
47 | /* At least on Sabre, it is necessary to access all PCI host controller | ||
48 | * registers at their natural size, otherwise zeros are returned. | ||
49 | * Strange but true, and I see no language in the UltraSPARC-IIi | ||
50 | * programmer's manual that mentions this even indirectly. | ||
51 | */ | ||
52 | static int sun4u_read_pci_cfg_host(struct pci_pbm_info *pbm, | ||
53 | unsigned char bus, unsigned int devfn, | ||
54 | int where, int size, u32 *value) | ||
55 | { | ||
56 | u32 tmp32, *addr; | ||
57 | u16 tmp16; | ||
58 | u8 tmp8; | ||
59 | |||
60 | addr = sun4u_config_mkaddr(pbm, bus, devfn, where); | ||
61 | if (!addr) | ||
62 | return PCIBIOS_SUCCESSFUL; | ||
63 | |||
64 | switch (size) { | ||
65 | case 1: | ||
66 | if (where < 8) { | ||
67 | unsigned long align = (unsigned long) addr; | ||
68 | |||
69 | align &= ~1; | ||
70 | pci_config_read16((u16 *)align, &tmp16); | ||
71 | if (where & 1) | ||
72 | *value = tmp16 >> 8; | ||
73 | else | ||
74 | *value = tmp16 & 0xff; | ||
75 | } else { | ||
76 | pci_config_read8((u8 *)addr, &tmp8); | ||
77 | *value = (u32) tmp8; | ||
78 | } | ||
79 | break; | ||
80 | |||
81 | case 2: | ||
82 | if (where < 8) { | ||
83 | pci_config_read16((u16 *)addr, &tmp16); | ||
84 | *value = (u32) tmp16; | ||
85 | } else { | ||
86 | pci_config_read8((u8 *)addr, &tmp8); | ||
87 | *value = (u32) tmp8; | ||
88 | pci_config_read8(((u8 *)addr) + 1, &tmp8); | ||
89 | *value |= ((u32) tmp8) << 8; | ||
90 | } | ||
91 | break; | ||
92 | |||
93 | case 4: | ||
94 | tmp32 = 0xffffffff; | ||
95 | sun4u_read_pci_cfg_host(pbm, bus, devfn, | ||
96 | where, 2, &tmp32); | ||
97 | *value = tmp32; | ||
98 | |||
99 | tmp32 = 0xffffffff; | ||
100 | sun4u_read_pci_cfg_host(pbm, bus, devfn, | ||
101 | where + 2, 2, &tmp32); | ||
102 | *value |= tmp32 << 16; | ||
103 | break; | ||
104 | } | ||
105 | return PCIBIOS_SUCCESSFUL; | ||
106 | } | ||
107 | |||
108 | static int sun4u_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, | ||
109 | int where, int size, u32 *value) | ||
110 | { | ||
111 | struct pci_pbm_info *pbm = bus_dev->sysdata; | ||
112 | unsigned char bus = bus_dev->number; | ||
113 | u32 *addr; | ||
114 | u16 tmp16; | ||
115 | u8 tmp8; | ||
116 | |||
117 | switch (size) { | ||
118 | case 1: | ||
119 | *value = 0xff; | ||
120 | break; | ||
121 | case 2: | ||
122 | *value = 0xffff; | ||
123 | break; | ||
124 | case 4: | ||
125 | *value = 0xffffffff; | ||
126 | break; | ||
127 | } | ||
128 | |||
129 | if (!bus_dev->number && !PCI_SLOT(devfn)) | ||
130 | return sun4u_read_pci_cfg_host(pbm, bus, devfn, where, | ||
131 | size, value); | ||
132 | |||
133 | addr = sun4u_config_mkaddr(pbm, bus, devfn, where); | ||
134 | if (!addr) | ||
135 | return PCIBIOS_SUCCESSFUL; | ||
136 | |||
137 | switch (size) { | ||
138 | case 1: | ||
139 | pci_config_read8((u8 *)addr, &tmp8); | ||
140 | *value = (u32) tmp8; | ||
141 | break; | ||
142 | |||
143 | case 2: | ||
144 | if (where & 0x01) { | ||
145 | printk("pci_read_config_word: misaligned reg [%x]\n", | ||
146 | where); | ||
147 | return PCIBIOS_SUCCESSFUL; | ||
148 | } | ||
149 | pci_config_read16((u16 *)addr, &tmp16); | ||
150 | *value = (u32) tmp16; | ||
151 | break; | ||
152 | |||
153 | case 4: | ||
154 | if (where & 0x03) { | ||
155 | printk("pci_read_config_dword: misaligned reg [%x]\n", | ||
156 | where); | ||
157 | return PCIBIOS_SUCCESSFUL; | ||
158 | } | ||
159 | pci_config_read32(addr, value); | ||
160 | break; | ||
161 | } | ||
162 | return PCIBIOS_SUCCESSFUL; | ||
163 | } | ||
164 | |||
165 | static int sun4u_write_pci_cfg_host(struct pci_pbm_info *pbm, | ||
166 | unsigned char bus, unsigned int devfn, | ||
167 | int where, int size, u32 value) | ||
168 | { | ||
169 | u32 *addr; | ||
170 | |||
171 | addr = sun4u_config_mkaddr(pbm, bus, devfn, where); | ||
172 | if (!addr) | ||
173 | return PCIBIOS_SUCCESSFUL; | ||
174 | |||
175 | switch (size) { | ||
176 | case 1: | ||
177 | if (where < 8) { | ||
178 | unsigned long align = (unsigned long) addr; | ||
179 | u16 tmp16; | ||
180 | |||
181 | align &= ~1; | ||
182 | pci_config_read16((u16 *)align, &tmp16); | ||
183 | if (where & 1) { | ||
184 | tmp16 &= 0x00ff; | ||
185 | tmp16 |= value << 8; | ||
186 | } else { | ||
187 | tmp16 &= 0xff00; | ||
188 | tmp16 |= value; | ||
189 | } | ||
190 | pci_config_write16((u16 *)align, tmp16); | ||
191 | } else | ||
192 | pci_config_write8((u8 *)addr, value); | ||
193 | break; | ||
194 | case 2: | ||
195 | if (where < 8) { | ||
196 | pci_config_write16((u16 *)addr, value); | ||
197 | } else { | ||
198 | pci_config_write8((u8 *)addr, value & 0xff); | ||
199 | pci_config_write8(((u8 *)addr) + 1, value >> 8); | ||
200 | } | ||
201 | break; | ||
202 | case 4: | ||
203 | sun4u_write_pci_cfg_host(pbm, bus, devfn, | ||
204 | where, 2, value & 0xffff); | ||
205 | sun4u_write_pci_cfg_host(pbm, bus, devfn, | ||
206 | where + 2, 2, value >> 16); | ||
207 | break; | ||
208 | } | ||
209 | return PCIBIOS_SUCCESSFUL; | ||
210 | } | ||
211 | |||
212 | static int sun4u_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, | ||
213 | int where, int size, u32 value) | ||
214 | { | ||
215 | struct pci_pbm_info *pbm = bus_dev->sysdata; | ||
216 | unsigned char bus = bus_dev->number; | ||
217 | u32 *addr; | ||
218 | |||
219 | if (!bus_dev->number && !PCI_SLOT(devfn)) | ||
220 | return sun4u_write_pci_cfg_host(pbm, bus, devfn, where, | ||
221 | size, value); | ||
222 | |||
223 | addr = sun4u_config_mkaddr(pbm, bus, devfn, where); | ||
224 | if (!addr) | ||
225 | return PCIBIOS_SUCCESSFUL; | ||
226 | |||
227 | switch (size) { | ||
228 | case 1: | ||
229 | pci_config_write8((u8 *)addr, value); | ||
230 | break; | ||
231 | |||
232 | case 2: | ||
233 | if (where & 0x01) { | ||
234 | printk("pci_write_config_word: misaligned reg [%x]\n", | ||
235 | where); | ||
236 | return PCIBIOS_SUCCESSFUL; | ||
237 | } | ||
238 | pci_config_write16((u16 *)addr, value); | ||
239 | break; | ||
240 | |||
241 | case 4: | ||
242 | if (where & 0x03) { | ||
243 | printk("pci_write_config_dword: misaligned reg [%x]\n", | ||
244 | where); | ||
245 | return PCIBIOS_SUCCESSFUL; | ||
246 | } | ||
247 | pci_config_write32(addr, value); | ||
248 | } | ||
249 | return PCIBIOS_SUCCESSFUL; | ||
250 | } | ||
251 | |||
252 | struct pci_ops sun4u_pci_ops = { | ||
253 | .read = sun4u_read_pci_cfg, | ||
254 | .write = sun4u_write_pci_cfg, | ||
255 | }; | ||
256 | |||
257 | static int sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, | ||
258 | int where, int size, u32 *value) | ||
259 | { | ||
260 | struct pci_pbm_info *pbm = bus_dev->sysdata; | ||
261 | u32 devhandle = pbm->devhandle; | ||
262 | unsigned int bus = bus_dev->number; | ||
263 | unsigned int device = PCI_SLOT(devfn); | ||
264 | unsigned int func = PCI_FUNC(devfn); | ||
265 | unsigned long ret; | ||
266 | |||
267 | if (config_out_of_range(pbm, bus, devfn, where)) { | ||
268 | ret = ~0UL; | ||
269 | } else { | ||
270 | ret = pci_sun4v_config_get(devhandle, | ||
271 | HV_PCI_DEVICE_BUILD(bus, device, func), | ||
272 | where, size); | ||
273 | } | ||
274 | switch (size) { | ||
275 | case 1: | ||
276 | *value = ret & 0xff; | ||
277 | break; | ||
278 | case 2: | ||
279 | *value = ret & 0xffff; | ||
280 | break; | ||
281 | case 4: | ||
282 | *value = ret & 0xffffffff; | ||
283 | break; | ||
284 | }; | ||
285 | |||
286 | |||
287 | return PCIBIOS_SUCCESSFUL; | ||
288 | } | ||
289 | |||
290 | static int sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, | ||
291 | int where, int size, u32 value) | ||
292 | { | ||
293 | struct pci_pbm_info *pbm = bus_dev->sysdata; | ||
294 | u32 devhandle = pbm->devhandle; | ||
295 | unsigned int bus = bus_dev->number; | ||
296 | unsigned int device = PCI_SLOT(devfn); | ||
297 | unsigned int func = PCI_FUNC(devfn); | ||
298 | unsigned long ret; | ||
299 | |||
300 | if (config_out_of_range(pbm, bus, devfn, where)) { | ||
301 | /* Do nothing. */ | ||
302 | } else { | ||
303 | ret = pci_sun4v_config_put(devhandle, | ||
304 | HV_PCI_DEVICE_BUILD(bus, device, func), | ||
305 | where, size, value); | ||
306 | } | ||
307 | return PCIBIOS_SUCCESSFUL; | ||
308 | } | ||
309 | |||
310 | struct pci_ops sun4v_pci_ops = { | ||
311 | .read = sun4v_read_pci_cfg, | ||
312 | .write = sun4v_write_pci_cfg, | ||
313 | }; | ||
314 | |||
315 | void pci_get_pbm_props(struct pci_pbm_info *pbm) | ||
316 | { | ||
317 | const u32 *val = of_get_property(pbm->op->node, "bus-range", NULL); | ||
318 | |||
319 | pbm->pci_first_busno = val[0]; | ||
320 | pbm->pci_last_busno = val[1]; | ||
321 | |||
322 | val = of_get_property(pbm->op->node, "ino-bitmap", NULL); | ||
323 | if (val) { | ||
324 | pbm->ino_bitmap = (((u64)val[1] << 32UL) | | ||
325 | ((u64)val[0] << 0UL)); | ||
326 | } | ||
327 | } | ||
328 | |||
329 | static void pci_register_legacy_regions(struct resource *io_res, | ||
330 | struct resource *mem_res) | ||
331 | { | ||
332 | struct resource *p; | ||
333 | |||
334 | /* VGA Video RAM. */ | ||
335 | p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
336 | if (!p) | ||
337 | return; | ||
338 | |||
339 | p->name = "Video RAM area"; | ||
340 | p->start = mem_res->start + 0xa0000UL; | ||
341 | p->end = p->start + 0x1ffffUL; | ||
342 | p->flags = IORESOURCE_BUSY; | ||
343 | request_resource(mem_res, p); | ||
344 | |||
345 | p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
346 | if (!p) | ||
347 | return; | ||
348 | |||
349 | p->name = "System ROM"; | ||
350 | p->start = mem_res->start + 0xf0000UL; | ||
351 | p->end = p->start + 0xffffUL; | ||
352 | p->flags = IORESOURCE_BUSY; | ||
353 | request_resource(mem_res, p); | ||
354 | |||
355 | p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
356 | if (!p) | ||
357 | return; | ||
358 | |||
359 | p->name = "Video ROM"; | ||
360 | p->start = mem_res->start + 0xc0000UL; | ||
361 | p->end = p->start + 0x7fffUL; | ||
362 | p->flags = IORESOURCE_BUSY; | ||
363 | request_resource(mem_res, p); | ||
364 | } | ||
365 | |||
366 | static void pci_register_iommu_region(struct pci_pbm_info *pbm) | ||
367 | { | ||
368 | const u32 *vdma = of_get_property(pbm->op->node, "virtual-dma", NULL); | ||
369 | |||
370 | if (vdma) { | ||
371 | struct resource *rp = kmalloc(sizeof(*rp), GFP_KERNEL); | ||
372 | |||
373 | if (!rp) { | ||
374 | prom_printf("Cannot allocate IOMMU resource.\n"); | ||
375 | prom_halt(); | ||
376 | } | ||
377 | rp->name = "IOMMU"; | ||
378 | rp->start = pbm->mem_space.start + (unsigned long) vdma[0]; | ||
379 | rp->end = rp->start + (unsigned long) vdma[1] - 1UL; | ||
380 | rp->flags = IORESOURCE_BUSY; | ||
381 | request_resource(&pbm->mem_space, rp); | ||
382 | } | ||
383 | } | ||
384 | |||
385 | void pci_determine_mem_io_space(struct pci_pbm_info *pbm) | ||
386 | { | ||
387 | const struct linux_prom_pci_ranges *pbm_ranges; | ||
388 | int i, saw_mem, saw_io; | ||
389 | int num_pbm_ranges; | ||
390 | |||
391 | saw_mem = saw_io = 0; | ||
392 | pbm_ranges = of_get_property(pbm->op->node, "ranges", &i); | ||
393 | if (!pbm_ranges) { | ||
394 | prom_printf("PCI: Fatal error, missing PBM ranges property " | ||
395 | " for %s\n", | ||
396 | pbm->name); | ||
397 | prom_halt(); | ||
398 | } | ||
399 | |||
400 | num_pbm_ranges = i / sizeof(*pbm_ranges); | ||
401 | |||
402 | for (i = 0; i < num_pbm_ranges; i++) { | ||
403 | const struct linux_prom_pci_ranges *pr = &pbm_ranges[i]; | ||
404 | unsigned long a, size; | ||
405 | u32 parent_phys_hi, parent_phys_lo; | ||
406 | u32 size_hi, size_lo; | ||
407 | int type; | ||
408 | |||
409 | parent_phys_hi = pr->parent_phys_hi; | ||
410 | parent_phys_lo = pr->parent_phys_lo; | ||
411 | if (tlb_type == hypervisor) | ||
412 | parent_phys_hi &= 0x0fffffff; | ||
413 | |||
414 | size_hi = pr->size_hi; | ||
415 | size_lo = pr->size_lo; | ||
416 | |||
417 | type = (pr->child_phys_hi >> 24) & 0x3; | ||
418 | a = (((unsigned long)parent_phys_hi << 32UL) | | ||
419 | ((unsigned long)parent_phys_lo << 0UL)); | ||
420 | size = (((unsigned long)size_hi << 32UL) | | ||
421 | ((unsigned long)size_lo << 0UL)); | ||
422 | |||
423 | switch (type) { | ||
424 | case 0: | ||
425 | /* PCI config space, 16MB */ | ||
426 | pbm->config_space = a; | ||
427 | break; | ||
428 | |||
429 | case 1: | ||
430 | /* 16-bit IO space, 16MB */ | ||
431 | pbm->io_space.start = a; | ||
432 | pbm->io_space.end = a + size - 1UL; | ||
433 | pbm->io_space.flags = IORESOURCE_IO; | ||
434 | saw_io = 1; | ||
435 | break; | ||
436 | |||
437 | case 2: | ||
438 | /* 32-bit MEM space, 2GB */ | ||
439 | pbm->mem_space.start = a; | ||
440 | pbm->mem_space.end = a + size - 1UL; | ||
441 | pbm->mem_space.flags = IORESOURCE_MEM; | ||
442 | saw_mem = 1; | ||
443 | break; | ||
444 | |||
445 | case 3: | ||
446 | /* XXX 64-bit MEM handling XXX */ | ||
447 | |||
448 | default: | ||
449 | break; | ||
450 | }; | ||
451 | } | ||
452 | |||
453 | if (!saw_io || !saw_mem) { | ||
454 | prom_printf("%s: Fatal error, missing %s PBM range.\n", | ||
455 | pbm->name, | ||
456 | (!saw_io ? "IO" : "MEM")); | ||
457 | prom_halt(); | ||
458 | } | ||
459 | |||
460 | printk("%s: PCI IO[%lx] MEM[%lx]\n", | ||
461 | pbm->name, | ||
462 | pbm->io_space.start, | ||
463 | pbm->mem_space.start); | ||
464 | |||
465 | pbm->io_space.name = pbm->mem_space.name = pbm->name; | ||
466 | |||
467 | request_resource(&ioport_resource, &pbm->io_space); | ||
468 | request_resource(&iomem_resource, &pbm->mem_space); | ||
469 | |||
470 | pci_register_legacy_regions(&pbm->io_space, | ||
471 | &pbm->mem_space); | ||
472 | pci_register_iommu_region(pbm); | ||
473 | } | ||
474 | |||
475 | /* Generic helper routines for PCI error reporting. */ | ||
476 | void pci_scan_for_target_abort(struct pci_pbm_info *pbm, | ||
477 | struct pci_bus *pbus) | ||
478 | { | ||
479 | struct pci_dev *pdev; | ||
480 | struct pci_bus *bus; | ||
481 | |||
482 | list_for_each_entry(pdev, &pbus->devices, bus_list) { | ||
483 | u16 status, error_bits; | ||
484 | |||
485 | pci_read_config_word(pdev, PCI_STATUS, &status); | ||
486 | error_bits = | ||
487 | (status & (PCI_STATUS_SIG_TARGET_ABORT | | ||
488 | PCI_STATUS_REC_TARGET_ABORT)); | ||
489 | if (error_bits) { | ||
490 | pci_write_config_word(pdev, PCI_STATUS, error_bits); | ||
491 | printk("%s: Device %s saw Target Abort [%016x]\n", | ||
492 | pbm->name, pci_name(pdev), status); | ||
493 | } | ||
494 | } | ||
495 | |||
496 | list_for_each_entry(bus, &pbus->children, node) | ||
497 | pci_scan_for_target_abort(pbm, bus); | ||
498 | } | ||
499 | |||
500 | void pci_scan_for_master_abort(struct pci_pbm_info *pbm, | ||
501 | struct pci_bus *pbus) | ||
502 | { | ||
503 | struct pci_dev *pdev; | ||
504 | struct pci_bus *bus; | ||
505 | |||
506 | list_for_each_entry(pdev, &pbus->devices, bus_list) { | ||
507 | u16 status, error_bits; | ||
508 | |||
509 | pci_read_config_word(pdev, PCI_STATUS, &status); | ||
510 | error_bits = | ||
511 | (status & (PCI_STATUS_REC_MASTER_ABORT)); | ||
512 | if (error_bits) { | ||
513 | pci_write_config_word(pdev, PCI_STATUS, error_bits); | ||
514 | printk("%s: Device %s received Master Abort [%016x]\n", | ||
515 | pbm->name, pci_name(pdev), status); | ||
516 | } | ||
517 | } | ||
518 | |||
519 | list_for_each_entry(bus, &pbus->children, node) | ||
520 | pci_scan_for_master_abort(pbm, bus); | ||
521 | } | ||
522 | |||
523 | void pci_scan_for_parity_error(struct pci_pbm_info *pbm, | ||
524 | struct pci_bus *pbus) | ||
525 | { | ||
526 | struct pci_dev *pdev; | ||
527 | struct pci_bus *bus; | ||
528 | |||
529 | list_for_each_entry(pdev, &pbus->devices, bus_list) { | ||
530 | u16 status, error_bits; | ||
531 | |||
532 | pci_read_config_word(pdev, PCI_STATUS, &status); | ||
533 | error_bits = | ||
534 | (status & (PCI_STATUS_PARITY | | ||
535 | PCI_STATUS_DETECTED_PARITY)); | ||
536 | if (error_bits) { | ||
537 | pci_write_config_word(pdev, PCI_STATUS, error_bits); | ||
538 | printk("%s: Device %s saw Parity Error [%016x]\n", | ||
539 | pbm->name, pci_name(pdev), status); | ||
540 | } | ||
541 | } | ||
542 | |||
543 | list_for_each_entry(bus, &pbus->children, node) | ||
544 | pci_scan_for_parity_error(pbm, bus); | ||
545 | } | ||
diff --git a/arch/sparc/kernel/pci_fire.c b/arch/sparc/kernel/pci_fire.c new file mode 100644 index 000000000000..9462b68f4894 --- /dev/null +++ b/arch/sparc/kernel/pci_fire.c | |||
@@ -0,0 +1,521 @@ | |||
1 | /* pci_fire.c: Sun4u platform PCI-E controller support. | ||
2 | * | ||
3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/pci.h> | ||
7 | #include <linux/slab.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/msi.h> | ||
10 | #include <linux/irq.h> | ||
11 | #include <linux/of_device.h> | ||
12 | |||
13 | #include <asm/prom.h> | ||
14 | #include <asm/irq.h> | ||
15 | #include <asm/upa.h> | ||
16 | |||
17 | #include "pci_impl.h" | ||
18 | |||
19 | #define DRIVER_NAME "fire" | ||
20 | #define PFX DRIVER_NAME ": " | ||
21 | |||
22 | #define FIRE_IOMMU_CONTROL 0x40000UL | ||
23 | #define FIRE_IOMMU_TSBBASE 0x40008UL | ||
24 | #define FIRE_IOMMU_FLUSH 0x40100UL | ||
25 | #define FIRE_IOMMU_FLUSHINV 0x40108UL | ||
26 | |||
27 | static int pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm) | ||
28 | { | ||
29 | struct iommu *iommu = pbm->iommu; | ||
30 | u32 vdma[2], dma_mask; | ||
31 | u64 control; | ||
32 | int tsbsize, err; | ||
33 | |||
34 | /* No virtual-dma property on these guys, use largest size. */ | ||
35 | vdma[0] = 0xc0000000; /* base */ | ||
36 | vdma[1] = 0x40000000; /* size */ | ||
37 | dma_mask = 0xffffffff; | ||
38 | tsbsize = 128; | ||
39 | |||
40 | /* Register addresses. */ | ||
41 | iommu->iommu_control = pbm->pbm_regs + FIRE_IOMMU_CONTROL; | ||
42 | iommu->iommu_tsbbase = pbm->pbm_regs + FIRE_IOMMU_TSBBASE; | ||
43 | iommu->iommu_flush = pbm->pbm_regs + FIRE_IOMMU_FLUSH; | ||
44 | iommu->iommu_flushinv = pbm->pbm_regs + FIRE_IOMMU_FLUSHINV; | ||
45 | |||
46 | /* We use the main control/status register of FIRE as the write | ||
47 | * completion register. | ||
48 | */ | ||
49 | iommu->write_complete_reg = pbm->controller_regs + 0x410000UL; | ||
50 | |||
51 | /* | ||
52 | * Invalidate TLB Entries. | ||
53 | */ | ||
54 | upa_writeq(~(u64)0, iommu->iommu_flushinv); | ||
55 | |||
56 | err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask, | ||
57 | pbm->numa_node); | ||
58 | if (err) | ||
59 | return err; | ||
60 | |||
61 | upa_writeq(__pa(iommu->page_table) | 0x7UL, iommu->iommu_tsbbase); | ||
62 | |||
63 | control = upa_readq(iommu->iommu_control); | ||
64 | control |= (0x00000400 /* TSB cache snoop enable */ | | ||
65 | 0x00000300 /* Cache mode */ | | ||
66 | 0x00000002 /* Bypass enable */ | | ||
67 | 0x00000001 /* Translation enable */); | ||
68 | upa_writeq(control, iommu->iommu_control); | ||
69 | |||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | #ifdef CONFIG_PCI_MSI | ||
74 | struct pci_msiq_entry { | ||
75 | u64 word0; | ||
76 | #define MSIQ_WORD0_RESV 0x8000000000000000UL | ||
77 | #define MSIQ_WORD0_FMT_TYPE 0x7f00000000000000UL | ||
78 | #define MSIQ_WORD0_FMT_TYPE_SHIFT 56 | ||
79 | #define MSIQ_WORD0_LEN 0x00ffc00000000000UL | ||
80 | #define MSIQ_WORD0_LEN_SHIFT 46 | ||
81 | #define MSIQ_WORD0_ADDR0 0x00003fff00000000UL | ||
82 | #define MSIQ_WORD0_ADDR0_SHIFT 32 | ||
83 | #define MSIQ_WORD0_RID 0x00000000ffff0000UL | ||
84 | #define MSIQ_WORD0_RID_SHIFT 16 | ||
85 | #define MSIQ_WORD0_DATA0 0x000000000000ffffUL | ||
86 | #define MSIQ_WORD0_DATA0_SHIFT 0 | ||
87 | |||
88 | #define MSIQ_TYPE_MSG 0x6 | ||
89 | #define MSIQ_TYPE_MSI32 0xb | ||
90 | #define MSIQ_TYPE_MSI64 0xf | ||
91 | |||
92 | u64 word1; | ||
93 | #define MSIQ_WORD1_ADDR1 0xffffffffffff0000UL | ||
94 | #define MSIQ_WORD1_ADDR1_SHIFT 16 | ||
95 | #define MSIQ_WORD1_DATA1 0x000000000000ffffUL | ||
96 | #define MSIQ_WORD1_DATA1_SHIFT 0 | ||
97 | |||
98 | u64 resv[6]; | ||
99 | }; | ||
100 | |||
101 | /* All MSI registers are offset from pbm->pbm_regs */ | ||
102 | #define EVENT_QUEUE_BASE_ADDR_REG 0x010000UL | ||
103 | #define EVENT_QUEUE_BASE_ADDR_ALL_ONES 0xfffc000000000000UL | ||
104 | |||
105 | #define EVENT_QUEUE_CONTROL_SET(EQ) (0x011000UL + (EQ) * 0x8UL) | ||
106 | #define EVENT_QUEUE_CONTROL_SET_OFLOW 0x0200000000000000UL | ||
107 | #define EVENT_QUEUE_CONTROL_SET_EN 0x0000100000000000UL | ||
108 | |||
109 | #define EVENT_QUEUE_CONTROL_CLEAR(EQ) (0x011200UL + (EQ) * 0x8UL) | ||
110 | #define EVENT_QUEUE_CONTROL_CLEAR_OF 0x0200000000000000UL | ||
111 | #define EVENT_QUEUE_CONTROL_CLEAR_E2I 0x0000800000000000UL | ||
112 | #define EVENT_QUEUE_CONTROL_CLEAR_DIS 0x0000100000000000UL | ||
113 | |||
114 | #define EVENT_QUEUE_STATE(EQ) (0x011400UL + (EQ) * 0x8UL) | ||
115 | #define EVENT_QUEUE_STATE_MASK 0x0000000000000007UL | ||
116 | #define EVENT_QUEUE_STATE_IDLE 0x0000000000000001UL | ||
117 | #define EVENT_QUEUE_STATE_ACTIVE 0x0000000000000002UL | ||
118 | #define EVENT_QUEUE_STATE_ERROR 0x0000000000000004UL | ||
119 | |||
120 | #define EVENT_QUEUE_TAIL(EQ) (0x011600UL + (EQ) * 0x8UL) | ||
121 | #define EVENT_QUEUE_TAIL_OFLOW 0x0200000000000000UL | ||
122 | #define EVENT_QUEUE_TAIL_VAL 0x000000000000007fUL | ||
123 | |||
124 | #define EVENT_QUEUE_HEAD(EQ) (0x011800UL + (EQ) * 0x8UL) | ||
125 | #define EVENT_QUEUE_HEAD_VAL 0x000000000000007fUL | ||
126 | |||
127 | #define MSI_MAP(MSI) (0x020000UL + (MSI) * 0x8UL) | ||
128 | #define MSI_MAP_VALID 0x8000000000000000UL | ||
129 | #define MSI_MAP_EQWR_N 0x4000000000000000UL | ||
130 | #define MSI_MAP_EQNUM 0x000000000000003fUL | ||
131 | |||
132 | #define MSI_CLEAR(MSI) (0x028000UL + (MSI) * 0x8UL) | ||
133 | #define MSI_CLEAR_EQWR_N 0x4000000000000000UL | ||
134 | |||
135 | #define IMONDO_DATA0 0x02C000UL | ||
136 | #define IMONDO_DATA0_DATA 0xffffffffffffffc0UL | ||
137 | |||
138 | #define IMONDO_DATA1 0x02C008UL | ||
139 | #define IMONDO_DATA1_DATA 0xffffffffffffffffUL | ||
140 | |||
141 | #define MSI_32BIT_ADDR 0x034000UL | ||
142 | #define MSI_32BIT_ADDR_VAL 0x00000000ffff0000UL | ||
143 | |||
144 | #define MSI_64BIT_ADDR 0x034008UL | ||
145 | #define MSI_64BIT_ADDR_VAL 0xffffffffffff0000UL | ||
146 | |||
147 | static int pci_fire_get_head(struct pci_pbm_info *pbm, unsigned long msiqid, | ||
148 | unsigned long *head) | ||
149 | { | ||
150 | *head = upa_readq(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid)); | ||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | static int pci_fire_dequeue_msi(struct pci_pbm_info *pbm, unsigned long msiqid, | ||
155 | unsigned long *head, unsigned long *msi) | ||
156 | { | ||
157 | unsigned long type_fmt, type, msi_num; | ||
158 | struct pci_msiq_entry *base, *ep; | ||
159 | |||
160 | base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 8192)); | ||
161 | ep = &base[*head]; | ||
162 | |||
163 | if ((ep->word0 & MSIQ_WORD0_FMT_TYPE) == 0) | ||
164 | return 0; | ||
165 | |||
166 | type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >> | ||
167 | MSIQ_WORD0_FMT_TYPE_SHIFT); | ||
168 | type = (type_fmt >> 3); | ||
169 | if (unlikely(type != MSIQ_TYPE_MSI32 && | ||
170 | type != MSIQ_TYPE_MSI64)) | ||
171 | return -EINVAL; | ||
172 | |||
173 | *msi = msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >> | ||
174 | MSIQ_WORD0_DATA0_SHIFT); | ||
175 | |||
176 | upa_writeq(MSI_CLEAR_EQWR_N, pbm->pbm_regs + MSI_CLEAR(msi_num)); | ||
177 | |||
178 | /* Clear the entry. */ | ||
179 | ep->word0 &= ~MSIQ_WORD0_FMT_TYPE; | ||
180 | |||
181 | /* Go to next entry in ring. */ | ||
182 | (*head)++; | ||
183 | if (*head >= pbm->msiq_ent_count) | ||
184 | *head = 0; | ||
185 | |||
186 | return 1; | ||
187 | } | ||
188 | |||
189 | static int pci_fire_set_head(struct pci_pbm_info *pbm, unsigned long msiqid, | ||
190 | unsigned long head) | ||
191 | { | ||
192 | upa_writeq(head, pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid)); | ||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid, | ||
197 | unsigned long msi, int is_msi64) | ||
198 | { | ||
199 | u64 val; | ||
200 | |||
201 | val = upa_readq(pbm->pbm_regs + MSI_MAP(msi)); | ||
202 | val &= ~(MSI_MAP_EQNUM); | ||
203 | val |= msiqid; | ||
204 | upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi)); | ||
205 | |||
206 | upa_writeq(MSI_CLEAR_EQWR_N, pbm->pbm_regs + MSI_CLEAR(msi)); | ||
207 | |||
208 | val = upa_readq(pbm->pbm_regs + MSI_MAP(msi)); | ||
209 | val |= MSI_MAP_VALID; | ||
210 | upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi)); | ||
211 | |||
212 | return 0; | ||
213 | } | ||
214 | |||
215 | static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi) | ||
216 | { | ||
217 | unsigned long msiqid; | ||
218 | u64 val; | ||
219 | |||
220 | val = upa_readq(pbm->pbm_regs + MSI_MAP(msi)); | ||
221 | msiqid = (val & MSI_MAP_EQNUM); | ||
222 | |||
223 | val &= ~MSI_MAP_VALID; | ||
224 | |||
225 | upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi)); | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | static int pci_fire_msiq_alloc(struct pci_pbm_info *pbm) | ||
231 | { | ||
232 | unsigned long pages, order, i; | ||
233 | |||
234 | order = get_order(512 * 1024); | ||
235 | pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order); | ||
236 | if (pages == 0UL) { | ||
237 | printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n", | ||
238 | order); | ||
239 | return -ENOMEM; | ||
240 | } | ||
241 | memset((char *)pages, 0, PAGE_SIZE << order); | ||
242 | pbm->msi_queues = (void *) pages; | ||
243 | |||
244 | upa_writeq((EVENT_QUEUE_BASE_ADDR_ALL_ONES | | ||
245 | __pa(pbm->msi_queues)), | ||
246 | pbm->pbm_regs + EVENT_QUEUE_BASE_ADDR_REG); | ||
247 | |||
248 | upa_writeq(pbm->portid << 6, pbm->pbm_regs + IMONDO_DATA0); | ||
249 | upa_writeq(0, pbm->pbm_regs + IMONDO_DATA1); | ||
250 | |||
251 | upa_writeq(pbm->msi32_start, pbm->pbm_regs + MSI_32BIT_ADDR); | ||
252 | upa_writeq(pbm->msi64_start, pbm->pbm_regs + MSI_64BIT_ADDR); | ||
253 | |||
254 | for (i = 0; i < pbm->msiq_num; i++) { | ||
255 | upa_writeq(0, pbm->pbm_regs + EVENT_QUEUE_HEAD(i)); | ||
256 | upa_writeq(0, pbm->pbm_regs + EVENT_QUEUE_TAIL(i)); | ||
257 | } | ||
258 | |||
259 | return 0; | ||
260 | } | ||
261 | |||
262 | static void pci_fire_msiq_free(struct pci_pbm_info *pbm) | ||
263 | { | ||
264 | unsigned long pages, order; | ||
265 | |||
266 | order = get_order(512 * 1024); | ||
267 | pages = (unsigned long) pbm->msi_queues; | ||
268 | |||
269 | free_pages(pages, order); | ||
270 | |||
271 | pbm->msi_queues = NULL; | ||
272 | } | ||
273 | |||
274 | static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm, | ||
275 | unsigned long msiqid, | ||
276 | unsigned long devino) | ||
277 | { | ||
278 | unsigned long cregs = (unsigned long) pbm->pbm_regs; | ||
279 | unsigned long imap_reg, iclr_reg, int_ctrlr; | ||
280 | unsigned int virt_irq; | ||
281 | int fixup; | ||
282 | u64 val; | ||
283 | |||
284 | imap_reg = cregs + (0x001000UL + (devino * 0x08UL)); | ||
285 | iclr_reg = cregs + (0x001400UL + (devino * 0x08UL)); | ||
286 | |||
287 | /* XXX iterate amongst the 4 IRQ controllers XXX */ | ||
288 | int_ctrlr = (1UL << 6); | ||
289 | |||
290 | val = upa_readq(imap_reg); | ||
291 | val |= (1UL << 63) | int_ctrlr; | ||
292 | upa_writeq(val, imap_reg); | ||
293 | |||
294 | fixup = ((pbm->portid << 6) | devino) - int_ctrlr; | ||
295 | |||
296 | virt_irq = build_irq(fixup, iclr_reg, imap_reg); | ||
297 | if (!virt_irq) | ||
298 | return -ENOMEM; | ||
299 | |||
300 | upa_writeq(EVENT_QUEUE_CONTROL_SET_EN, | ||
301 | pbm->pbm_regs + EVENT_QUEUE_CONTROL_SET(msiqid)); | ||
302 | |||
303 | return virt_irq; | ||
304 | } | ||
305 | |||
306 | static const struct sparc64_msiq_ops pci_fire_msiq_ops = { | ||
307 | .get_head = pci_fire_get_head, | ||
308 | .dequeue_msi = pci_fire_dequeue_msi, | ||
309 | .set_head = pci_fire_set_head, | ||
310 | .msi_setup = pci_fire_msi_setup, | ||
311 | .msi_teardown = pci_fire_msi_teardown, | ||
312 | .msiq_alloc = pci_fire_msiq_alloc, | ||
313 | .msiq_free = pci_fire_msiq_free, | ||
314 | .msiq_build_irq = pci_fire_msiq_build_irq, | ||
315 | }; | ||
316 | |||
317 | static void pci_fire_msi_init(struct pci_pbm_info *pbm) | ||
318 | { | ||
319 | sparc64_pbm_msi_init(pbm, &pci_fire_msiq_ops); | ||
320 | } | ||
321 | #else /* CONFIG_PCI_MSI */ | ||
322 | static void pci_fire_msi_init(struct pci_pbm_info *pbm) | ||
323 | { | ||
324 | } | ||
325 | #endif /* !(CONFIG_PCI_MSI) */ | ||
326 | |||
327 | /* Based at pbm->controller_regs */ | ||
328 | #define FIRE_PARITY_CONTROL 0x470010UL | ||
329 | #define FIRE_PARITY_ENAB 0x8000000000000000UL | ||
330 | #define FIRE_FATAL_RESET_CTL 0x471028UL | ||
331 | #define FIRE_FATAL_RESET_SPARE 0x0000000004000000UL | ||
332 | #define FIRE_FATAL_RESET_MB 0x0000000002000000UL | ||
333 | #define FIRE_FATAL_RESET_CPE 0x0000000000008000UL | ||
334 | #define FIRE_FATAL_RESET_APE 0x0000000000004000UL | ||
335 | #define FIRE_FATAL_RESET_PIO 0x0000000000000040UL | ||
336 | #define FIRE_FATAL_RESET_JW 0x0000000000000004UL | ||
337 | #define FIRE_FATAL_RESET_JI 0x0000000000000002UL | ||
338 | #define FIRE_FATAL_RESET_JR 0x0000000000000001UL | ||
339 | #define FIRE_CORE_INTR_ENABLE 0x471800UL | ||
340 | |||
341 | /* Based at pbm->pbm_regs */ | ||
342 | #define FIRE_TLU_CTRL 0x80000UL | ||
343 | #define FIRE_TLU_CTRL_TIM 0x00000000da000000UL | ||
344 | #define FIRE_TLU_CTRL_QDET 0x0000000000000100UL | ||
345 | #define FIRE_TLU_CTRL_CFG 0x0000000000000001UL | ||
346 | #define FIRE_TLU_DEV_CTRL 0x90008UL | ||
347 | #define FIRE_TLU_LINK_CTRL 0x90020UL | ||
348 | #define FIRE_TLU_LINK_CTRL_CLK 0x0000000000000040UL | ||
349 | #define FIRE_LPU_RESET 0xe2008UL | ||
350 | #define FIRE_LPU_LLCFG 0xe2200UL | ||
351 | #define FIRE_LPU_LLCFG_VC0 0x0000000000000100UL | ||
352 | #define FIRE_LPU_FCTRL_UCTRL 0xe2240UL | ||
353 | #define FIRE_LPU_FCTRL_UCTRL_N 0x0000000000000002UL | ||
354 | #define FIRE_LPU_FCTRL_UCTRL_P 0x0000000000000001UL | ||
355 | #define FIRE_LPU_TXL_FIFOP 0xe2430UL | ||
356 | #define FIRE_LPU_LTSSM_CFG2 0xe2788UL | ||
357 | #define FIRE_LPU_LTSSM_CFG3 0xe2790UL | ||
358 | #define FIRE_LPU_LTSSM_CFG4 0xe2798UL | ||
359 | #define FIRE_LPU_LTSSM_CFG5 0xe27a0UL | ||
360 | #define FIRE_DMC_IENAB 0x31800UL | ||
361 | #define FIRE_DMC_DBG_SEL_A 0x53000UL | ||
362 | #define FIRE_DMC_DBG_SEL_B 0x53008UL | ||
363 | #define FIRE_PEC_IENAB 0x51800UL | ||
364 | |||
365 | static void pci_fire_hw_init(struct pci_pbm_info *pbm) | ||
366 | { | ||
367 | u64 val; | ||
368 | |||
369 | upa_writeq(FIRE_PARITY_ENAB, | ||
370 | pbm->controller_regs + FIRE_PARITY_CONTROL); | ||
371 | |||
372 | upa_writeq((FIRE_FATAL_RESET_SPARE | | ||
373 | FIRE_FATAL_RESET_MB | | ||
374 | FIRE_FATAL_RESET_CPE | | ||
375 | FIRE_FATAL_RESET_APE | | ||
376 | FIRE_FATAL_RESET_PIO | | ||
377 | FIRE_FATAL_RESET_JW | | ||
378 | FIRE_FATAL_RESET_JI | | ||
379 | FIRE_FATAL_RESET_JR), | ||
380 | pbm->controller_regs + FIRE_FATAL_RESET_CTL); | ||
381 | |||
382 | upa_writeq(~(u64)0, pbm->controller_regs + FIRE_CORE_INTR_ENABLE); | ||
383 | |||
384 | val = upa_readq(pbm->pbm_regs + FIRE_TLU_CTRL); | ||
385 | val |= (FIRE_TLU_CTRL_TIM | | ||
386 | FIRE_TLU_CTRL_QDET | | ||
387 | FIRE_TLU_CTRL_CFG); | ||
388 | upa_writeq(val, pbm->pbm_regs + FIRE_TLU_CTRL); | ||
389 | upa_writeq(0, pbm->pbm_regs + FIRE_TLU_DEV_CTRL); | ||
390 | upa_writeq(FIRE_TLU_LINK_CTRL_CLK, | ||
391 | pbm->pbm_regs + FIRE_TLU_LINK_CTRL); | ||
392 | |||
393 | upa_writeq(0, pbm->pbm_regs + FIRE_LPU_RESET); | ||
394 | upa_writeq(FIRE_LPU_LLCFG_VC0, pbm->pbm_regs + FIRE_LPU_LLCFG); | ||
395 | upa_writeq((FIRE_LPU_FCTRL_UCTRL_N | FIRE_LPU_FCTRL_UCTRL_P), | ||
396 | pbm->pbm_regs + FIRE_LPU_FCTRL_UCTRL); | ||
397 | upa_writeq(((0xffff << 16) | (0x0000 << 0)), | ||
398 | pbm->pbm_regs + FIRE_LPU_TXL_FIFOP); | ||
399 | upa_writeq(3000000, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG2); | ||
400 | upa_writeq(500000, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG3); | ||
401 | upa_writeq((2 << 16) | (140 << 8), | ||
402 | pbm->pbm_regs + FIRE_LPU_LTSSM_CFG4); | ||
403 | upa_writeq(0, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG5); | ||
404 | |||
405 | upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_DMC_IENAB); | ||
406 | upa_writeq(0, pbm->pbm_regs + FIRE_DMC_DBG_SEL_A); | ||
407 | upa_writeq(0, pbm->pbm_regs + FIRE_DMC_DBG_SEL_B); | ||
408 | |||
409 | upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_PEC_IENAB); | ||
410 | } | ||
411 | |||
412 | static int __init pci_fire_pbm_init(struct pci_pbm_info *pbm, | ||
413 | struct of_device *op, u32 portid) | ||
414 | { | ||
415 | const struct linux_prom64_registers *regs; | ||
416 | struct device_node *dp = op->node; | ||
417 | int err; | ||
418 | |||
419 | pbm->numa_node = -1; | ||
420 | |||
421 | pbm->pci_ops = &sun4u_pci_ops; | ||
422 | pbm->config_space_reg_bits = 12; | ||
423 | |||
424 | pbm->index = pci_num_pbms++; | ||
425 | |||
426 | pbm->portid = portid; | ||
427 | pbm->op = op; | ||
428 | pbm->name = dp->full_name; | ||
429 | |||
430 | regs = of_get_property(dp, "reg", NULL); | ||
431 | pbm->pbm_regs = regs[0].phys_addr; | ||
432 | pbm->controller_regs = regs[1].phys_addr - 0x410000UL; | ||
433 | |||
434 | printk("%s: SUN4U PCIE Bus Module\n", pbm->name); | ||
435 | |||
436 | pci_determine_mem_io_space(pbm); | ||
437 | |||
438 | pci_get_pbm_props(pbm); | ||
439 | |||
440 | pci_fire_hw_init(pbm); | ||
441 | |||
442 | err = pci_fire_pbm_iommu_init(pbm); | ||
443 | if (err) | ||
444 | return err; | ||
445 | |||
446 | pci_fire_msi_init(pbm); | ||
447 | |||
448 | pbm->pci_bus = pci_scan_one_pbm(pbm, &op->dev); | ||
449 | |||
450 | /* XXX register error interrupt handlers XXX */ | ||
451 | |||
452 | pbm->next = pci_pbm_root; | ||
453 | pci_pbm_root = pbm; | ||
454 | |||
455 | return 0; | ||
456 | } | ||
457 | |||
458 | static int __devinit fire_probe(struct of_device *op, | ||
459 | const struct of_device_id *match) | ||
460 | { | ||
461 | struct device_node *dp = op->node; | ||
462 | struct pci_pbm_info *pbm; | ||
463 | struct iommu *iommu; | ||
464 | u32 portid; | ||
465 | int err; | ||
466 | |||
467 | portid = of_getintprop_default(dp, "portid", 0xff); | ||
468 | |||
469 | err = -ENOMEM; | ||
470 | pbm = kzalloc(sizeof(*pbm), GFP_KERNEL); | ||
471 | if (!pbm) { | ||
472 | printk(KERN_ERR PFX "Cannot allocate pci_pbminfo.\n"); | ||
473 | goto out_err; | ||
474 | } | ||
475 | |||
476 | iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL); | ||
477 | if (!iommu) { | ||
478 | printk(KERN_ERR PFX "Cannot allocate PBM iommu.\n"); | ||
479 | goto out_free_controller; | ||
480 | } | ||
481 | |||
482 | pbm->iommu = iommu; | ||
483 | |||
484 | err = pci_fire_pbm_init(pbm, op, portid); | ||
485 | if (err) | ||
486 | goto out_free_iommu; | ||
487 | |||
488 | dev_set_drvdata(&op->dev, pbm); | ||
489 | |||
490 | return 0; | ||
491 | |||
492 | out_free_iommu: | ||
493 | kfree(pbm->iommu); | ||
494 | |||
495 | out_free_controller: | ||
496 | kfree(pbm); | ||
497 | |||
498 | out_err: | ||
499 | return err; | ||
500 | } | ||
501 | |||
502 | static struct of_device_id __initdata fire_match[] = { | ||
503 | { | ||
504 | .name = "pci", | ||
505 | .compatible = "pciex108e,80f0", | ||
506 | }, | ||
507 | {}, | ||
508 | }; | ||
509 | |||
510 | static struct of_platform_driver fire_driver = { | ||
511 | .name = DRIVER_NAME, | ||
512 | .match_table = fire_match, | ||
513 | .probe = fire_probe, | ||
514 | }; | ||
515 | |||
516 | static int __init fire_init(void) | ||
517 | { | ||
518 | return of_register_driver(&fire_driver, &of_bus_type); | ||
519 | } | ||
520 | |||
521 | subsys_initcall(fire_init); | ||
diff --git a/arch/sparc/kernel/pci_impl.h b/arch/sparc/kernel/pci_impl.h new file mode 100644 index 000000000000..03186824327e --- /dev/null +++ b/arch/sparc/kernel/pci_impl.h | |||
@@ -0,0 +1,185 @@ | |||
1 | /* pci_impl.h: Helper definitions for PCI controller support. | ||
2 | * | ||
3 | * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #ifndef PCI_IMPL_H | ||
7 | #define PCI_IMPL_H | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <linux/spinlock.h> | ||
11 | #include <linux/pci.h> | ||
12 | #include <linux/msi.h> | ||
13 | #include <linux/of_device.h> | ||
14 | #include <asm/io.h> | ||
15 | #include <asm/prom.h> | ||
16 | #include <asm/iommu.h> | ||
17 | |||
18 | /* The abstraction used here is that there are PCI controllers, | ||
19 | * each with one (Sabre) or two (PSYCHO/SCHIZO) PCI bus modules | ||
20 | * underneath. Each PCI bus module uses an IOMMU (shared by both | ||
21 | * PBMs of a controller, or per-PBM), and if a streaming buffer | ||
22 | * is present, each PCI bus module has it's own. (ie. the IOMMU | ||
23 | * might be shared between PBMs, the STC is never shared) | ||
24 | * Furthermore, each PCI bus module controls it's own autonomous | ||
25 | * PCI bus. | ||
26 | */ | ||
27 | |||
28 | #define PCI_STC_FLUSHFLAG_INIT(STC) \ | ||
29 | (*((STC)->strbuf_flushflag) = 0UL) | ||
30 | #define PCI_STC_FLUSHFLAG_SET(STC) \ | ||
31 | (*((STC)->strbuf_flushflag) != 0UL) | ||
32 | |||
33 | #ifdef CONFIG_PCI_MSI | ||
34 | struct pci_pbm_info; | ||
35 | struct sparc64_msiq_ops { | ||
36 | int (*get_head)(struct pci_pbm_info *pbm, unsigned long msiqid, | ||
37 | unsigned long *head); | ||
38 | int (*dequeue_msi)(struct pci_pbm_info *pbm, unsigned long msiqid, | ||
39 | unsigned long *head, unsigned long *msi); | ||
40 | int (*set_head)(struct pci_pbm_info *pbm, unsigned long msiqid, | ||
41 | unsigned long head); | ||
42 | int (*msi_setup)(struct pci_pbm_info *pbm, unsigned long msiqid, | ||
43 | unsigned long msi, int is_msi64); | ||
44 | int (*msi_teardown)(struct pci_pbm_info *pbm, unsigned long msi); | ||
45 | int (*msiq_alloc)(struct pci_pbm_info *pbm); | ||
46 | void (*msiq_free)(struct pci_pbm_info *pbm); | ||
47 | int (*msiq_build_irq)(struct pci_pbm_info *pbm, unsigned long msiqid, | ||
48 | unsigned long devino); | ||
49 | }; | ||
50 | |||
51 | extern void sparc64_pbm_msi_init(struct pci_pbm_info *pbm, | ||
52 | const struct sparc64_msiq_ops *ops); | ||
53 | |||
54 | struct sparc64_msiq_cookie { | ||
55 | struct pci_pbm_info *pbm; | ||
56 | unsigned long msiqid; | ||
57 | }; | ||
58 | #endif | ||
59 | |||
60 | struct pci_pbm_info { | ||
61 | struct pci_pbm_info *next; | ||
62 | struct pci_pbm_info *sibling; | ||
63 | int index; | ||
64 | |||
65 | /* Physical address base of controller registers. */ | ||
66 | unsigned long controller_regs; | ||
67 | |||
68 | /* Physical address base of PBM registers. */ | ||
69 | unsigned long pbm_regs; | ||
70 | |||
71 | /* Physical address of DMA sync register, if any. */ | ||
72 | unsigned long sync_reg; | ||
73 | |||
74 | /* Opaque 32-bit system bus Port ID. */ | ||
75 | u32 portid; | ||
76 | |||
77 | /* Opaque 32-bit handle used for hypervisor calls. */ | ||
78 | u32 devhandle; | ||
79 | |||
80 | /* Chipset version information. */ | ||
81 | int chip_type; | ||
82 | #define PBM_CHIP_TYPE_SABRE 1 | ||
83 | #define PBM_CHIP_TYPE_PSYCHO 2 | ||
84 | #define PBM_CHIP_TYPE_SCHIZO 3 | ||
85 | #define PBM_CHIP_TYPE_SCHIZO_PLUS 4 | ||
86 | #define PBM_CHIP_TYPE_TOMATILLO 5 | ||
87 | int chip_version; | ||
88 | int chip_revision; | ||
89 | |||
90 | /* Name used for top-level resources. */ | ||
91 | char *name; | ||
92 | |||
93 | /* OBP specific information. */ | ||
94 | struct of_device *op; | ||
95 | u64 ino_bitmap; | ||
96 | |||
97 | /* PBM I/O and Memory space resources. */ | ||
98 | struct resource io_space; | ||
99 | struct resource mem_space; | ||
100 | |||
101 | /* Base of PCI Config space, can be per-PBM or shared. */ | ||
102 | unsigned long config_space; | ||
103 | |||
104 | /* This will be 12 on PCI-E controllers, 8 elsewhere. */ | ||
105 | unsigned long config_space_reg_bits; | ||
106 | |||
107 | unsigned long pci_afsr; | ||
108 | unsigned long pci_afar; | ||
109 | unsigned long pci_csr; | ||
110 | |||
111 | /* State of 66MHz capabilities on this PBM. */ | ||
112 | int is_66mhz_capable; | ||
113 | int all_devs_66mhz; | ||
114 | |||
115 | #ifdef CONFIG_PCI_MSI | ||
116 | /* MSI info. */ | ||
117 | u32 msiq_num; | ||
118 | u32 msiq_ent_count; | ||
119 | u32 msiq_first; | ||
120 | u32 msiq_first_devino; | ||
121 | u32 msiq_rotor; | ||
122 | struct sparc64_msiq_cookie *msiq_irq_cookies; | ||
123 | u32 msi_num; | ||
124 | u32 msi_first; | ||
125 | u32 msi_data_mask; | ||
126 | u32 msix_data_width; | ||
127 | u64 msi32_start; | ||
128 | u64 msi64_start; | ||
129 | u32 msi32_len; | ||
130 | u32 msi64_len; | ||
131 | void *msi_queues; | ||
132 | unsigned long *msi_bitmap; | ||
133 | unsigned int *msi_irq_table; | ||
134 | int (*setup_msi_irq)(unsigned int *virt_irq_p, struct pci_dev *pdev, | ||
135 | struct msi_desc *entry); | ||
136 | void (*teardown_msi_irq)(unsigned int virt_irq, struct pci_dev *pdev); | ||
137 | const struct sparc64_msiq_ops *msi_ops; | ||
138 | #endif /* !(CONFIG_PCI_MSI) */ | ||
139 | |||
140 | /* This PBM's streaming buffer. */ | ||
141 | struct strbuf stc; | ||
142 | |||
143 | /* IOMMU state, potentially shared by both PBM segments. */ | ||
144 | struct iommu *iommu; | ||
145 | |||
146 | /* Now things for the actual PCI bus probes. */ | ||
147 | unsigned int pci_first_busno; | ||
148 | unsigned int pci_last_busno; | ||
149 | struct pci_bus *pci_bus; | ||
150 | struct pci_ops *pci_ops; | ||
151 | |||
152 | int numa_node; | ||
153 | }; | ||
154 | |||
155 | extern struct pci_pbm_info *pci_pbm_root; | ||
156 | |||
157 | extern int pci_num_pbms; | ||
158 | |||
159 | /* PCI bus scanning and fixup support. */ | ||
160 | extern void pci_get_pbm_props(struct pci_pbm_info *pbm); | ||
161 | extern struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm, | ||
162 | struct device *parent); | ||
163 | extern void pci_determine_mem_io_space(struct pci_pbm_info *pbm); | ||
164 | |||
165 | /* Error reporting support. */ | ||
166 | extern void pci_scan_for_target_abort(struct pci_pbm_info *, struct pci_bus *); | ||
167 | extern void pci_scan_for_master_abort(struct pci_pbm_info *, struct pci_bus *); | ||
168 | extern void pci_scan_for_parity_error(struct pci_pbm_info *, struct pci_bus *); | ||
169 | |||
170 | /* Configuration space access. */ | ||
171 | extern void pci_config_read8(u8 *addr, u8 *ret); | ||
172 | extern void pci_config_read16(u16 *addr, u16 *ret); | ||
173 | extern void pci_config_read32(u32 *addr, u32 *ret); | ||
174 | extern void pci_config_write8(u8 *addr, u8 val); | ||
175 | extern void pci_config_write16(u16 *addr, u16 val); | ||
176 | extern void pci_config_write32(u32 *addr, u32 val); | ||
177 | |||
178 | extern struct pci_ops sun4u_pci_ops; | ||
179 | extern struct pci_ops sun4v_pci_ops; | ||
180 | |||
181 | extern volatile int pci_poke_in_progress; | ||
182 | extern volatile int pci_poke_cpu; | ||
183 | extern volatile int pci_poke_faulted; | ||
184 | |||
185 | #endif /* !(PCI_IMPL_H) */ | ||
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c new file mode 100644 index 000000000000..2e680f34f727 --- /dev/null +++ b/arch/sparc/kernel/pci_msi.c | |||
@@ -0,0 +1,447 @@ | |||
1 | /* pci_msi.c: Sparc64 MSI support common layer. | ||
2 | * | ||
3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/interrupt.h> | ||
7 | #include <linux/irq.h> | ||
8 | |||
9 | #include "pci_impl.h" | ||
10 | |||
11 | static irqreturn_t sparc64_msiq_interrupt(int irq, void *cookie) | ||
12 | { | ||
13 | struct sparc64_msiq_cookie *msiq_cookie = cookie; | ||
14 | struct pci_pbm_info *pbm = msiq_cookie->pbm; | ||
15 | unsigned long msiqid = msiq_cookie->msiqid; | ||
16 | const struct sparc64_msiq_ops *ops; | ||
17 | unsigned long orig_head, head; | ||
18 | int err; | ||
19 | |||
20 | ops = pbm->msi_ops; | ||
21 | |||
22 | err = ops->get_head(pbm, msiqid, &head); | ||
23 | if (unlikely(err < 0)) | ||
24 | goto err_get_head; | ||
25 | |||
26 | orig_head = head; | ||
27 | for (;;) { | ||
28 | unsigned long msi; | ||
29 | |||
30 | err = ops->dequeue_msi(pbm, msiqid, &head, &msi); | ||
31 | if (likely(err > 0)) { | ||
32 | struct irq_desc *desc; | ||
33 | unsigned int virt_irq; | ||
34 | |||
35 | virt_irq = pbm->msi_irq_table[msi - pbm->msi_first]; | ||
36 | desc = irq_desc + virt_irq; | ||
37 | |||
38 | desc->handle_irq(virt_irq, desc); | ||
39 | } | ||
40 | |||
41 | if (unlikely(err < 0)) | ||
42 | goto err_dequeue; | ||
43 | |||
44 | if (err == 0) | ||
45 | break; | ||
46 | } | ||
47 | if (likely(head != orig_head)) { | ||
48 | err = ops->set_head(pbm, msiqid, head); | ||
49 | if (unlikely(err < 0)) | ||
50 | goto err_set_head; | ||
51 | } | ||
52 | return IRQ_HANDLED; | ||
53 | |||
54 | err_get_head: | ||
55 | printk(KERN_EMERG "MSI: Get head on msiqid[%lu] gives error %d\n", | ||
56 | msiqid, err); | ||
57 | goto err_out; | ||
58 | |||
59 | err_dequeue: | ||
60 | printk(KERN_EMERG "MSI: Dequeue head[%lu] from msiqid[%lu] " | ||
61 | "gives error %d\n", | ||
62 | head, msiqid, err); | ||
63 | goto err_out; | ||
64 | |||
65 | err_set_head: | ||
66 | printk(KERN_EMERG "MSI: Set head[%lu] on msiqid[%lu] " | ||
67 | "gives error %d\n", | ||
68 | head, msiqid, err); | ||
69 | goto err_out; | ||
70 | |||
71 | err_out: | ||
72 | return IRQ_NONE; | ||
73 | } | ||
74 | |||
75 | static u32 pick_msiq(struct pci_pbm_info *pbm) | ||
76 | { | ||
77 | static DEFINE_SPINLOCK(rotor_lock); | ||
78 | unsigned long flags; | ||
79 | u32 ret, rotor; | ||
80 | |||
81 | spin_lock_irqsave(&rotor_lock, flags); | ||
82 | |||
83 | rotor = pbm->msiq_rotor; | ||
84 | ret = pbm->msiq_first + rotor; | ||
85 | |||
86 | if (++rotor >= pbm->msiq_num) | ||
87 | rotor = 0; | ||
88 | pbm->msiq_rotor = rotor; | ||
89 | |||
90 | spin_unlock_irqrestore(&rotor_lock, flags); | ||
91 | |||
92 | return ret; | ||
93 | } | ||
94 | |||
95 | |||
96 | static int alloc_msi(struct pci_pbm_info *pbm) | ||
97 | { | ||
98 | int i; | ||
99 | |||
100 | for (i = 0; i < pbm->msi_num; i++) { | ||
101 | if (!test_and_set_bit(i, pbm->msi_bitmap)) | ||
102 | return i + pbm->msi_first; | ||
103 | } | ||
104 | |||
105 | return -ENOENT; | ||
106 | } | ||
107 | |||
108 | static void free_msi(struct pci_pbm_info *pbm, int msi_num) | ||
109 | { | ||
110 | msi_num -= pbm->msi_first; | ||
111 | clear_bit(msi_num, pbm->msi_bitmap); | ||
112 | } | ||
113 | |||
114 | static struct irq_chip msi_irq = { | ||
115 | .typename = "PCI-MSI", | ||
116 | .mask = mask_msi_irq, | ||
117 | .unmask = unmask_msi_irq, | ||
118 | .enable = unmask_msi_irq, | ||
119 | .disable = mask_msi_irq, | ||
120 | /* XXX affinity XXX */ | ||
121 | }; | ||
122 | |||
123 | static int sparc64_setup_msi_irq(unsigned int *virt_irq_p, | ||
124 | struct pci_dev *pdev, | ||
125 | struct msi_desc *entry) | ||
126 | { | ||
127 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; | ||
128 | const struct sparc64_msiq_ops *ops = pbm->msi_ops; | ||
129 | struct msi_msg msg; | ||
130 | int msi, err; | ||
131 | u32 msiqid; | ||
132 | |||
133 | *virt_irq_p = virt_irq_alloc(0, 0); | ||
134 | err = -ENOMEM; | ||
135 | if (!*virt_irq_p) | ||
136 | goto out_err; | ||
137 | |||
138 | set_irq_chip_and_handler_name(*virt_irq_p, &msi_irq, | ||
139 | handle_simple_irq, "MSI"); | ||
140 | |||
141 | err = alloc_msi(pbm); | ||
142 | if (unlikely(err < 0)) | ||
143 | goto out_virt_irq_free; | ||
144 | |||
145 | msi = err; | ||
146 | |||
147 | msiqid = pick_msiq(pbm); | ||
148 | |||
149 | err = ops->msi_setup(pbm, msiqid, msi, | ||
150 | (entry->msi_attrib.is_64 ? 1 : 0)); | ||
151 | if (err) | ||
152 | goto out_msi_free; | ||
153 | |||
154 | pbm->msi_irq_table[msi - pbm->msi_first] = *virt_irq_p; | ||
155 | |||
156 | if (entry->msi_attrib.is_64) { | ||
157 | msg.address_hi = pbm->msi64_start >> 32; | ||
158 | msg.address_lo = pbm->msi64_start & 0xffffffff; | ||
159 | } else { | ||
160 | msg.address_hi = 0; | ||
161 | msg.address_lo = pbm->msi32_start; | ||
162 | } | ||
163 | msg.data = msi; | ||
164 | |||
165 | set_irq_msi(*virt_irq_p, entry); | ||
166 | write_msi_msg(*virt_irq_p, &msg); | ||
167 | |||
168 | return 0; | ||
169 | |||
170 | out_msi_free: | ||
171 | free_msi(pbm, msi); | ||
172 | |||
173 | out_virt_irq_free: | ||
174 | set_irq_chip(*virt_irq_p, NULL); | ||
175 | virt_irq_free(*virt_irq_p); | ||
176 | *virt_irq_p = 0; | ||
177 | |||
178 | out_err: | ||
179 | return err; | ||
180 | } | ||
181 | |||
182 | static void sparc64_teardown_msi_irq(unsigned int virt_irq, | ||
183 | struct pci_dev *pdev) | ||
184 | { | ||
185 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; | ||
186 | const struct sparc64_msiq_ops *ops = pbm->msi_ops; | ||
187 | unsigned int msi_num; | ||
188 | int i, err; | ||
189 | |||
190 | for (i = 0; i < pbm->msi_num; i++) { | ||
191 | if (pbm->msi_irq_table[i] == virt_irq) | ||
192 | break; | ||
193 | } | ||
194 | if (i >= pbm->msi_num) { | ||
195 | printk(KERN_ERR "%s: teardown: No MSI for irq %u\n", | ||
196 | pbm->name, virt_irq); | ||
197 | return; | ||
198 | } | ||
199 | |||
200 | msi_num = pbm->msi_first + i; | ||
201 | pbm->msi_irq_table[i] = ~0U; | ||
202 | |||
203 | err = ops->msi_teardown(pbm, msi_num); | ||
204 | if (err) { | ||
205 | printk(KERN_ERR "%s: teardown: ops->teardown() on MSI %u, " | ||
206 | "irq %u, gives error %d\n", | ||
207 | pbm->name, msi_num, virt_irq, err); | ||
208 | return; | ||
209 | } | ||
210 | |||
211 | free_msi(pbm, msi_num); | ||
212 | |||
213 | set_irq_chip(virt_irq, NULL); | ||
214 | virt_irq_free(virt_irq); | ||
215 | } | ||
216 | |||
217 | static int msi_bitmap_alloc(struct pci_pbm_info *pbm) | ||
218 | { | ||
219 | unsigned long size, bits_per_ulong; | ||
220 | |||
221 | bits_per_ulong = sizeof(unsigned long) * 8; | ||
222 | size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1); | ||
223 | size /= 8; | ||
224 | BUG_ON(size % sizeof(unsigned long)); | ||
225 | |||
226 | pbm->msi_bitmap = kzalloc(size, GFP_KERNEL); | ||
227 | if (!pbm->msi_bitmap) | ||
228 | return -ENOMEM; | ||
229 | |||
230 | return 0; | ||
231 | } | ||
232 | |||
233 | static void msi_bitmap_free(struct pci_pbm_info *pbm) | ||
234 | { | ||
235 | kfree(pbm->msi_bitmap); | ||
236 | pbm->msi_bitmap = NULL; | ||
237 | } | ||
238 | |||
239 | static int msi_table_alloc(struct pci_pbm_info *pbm) | ||
240 | { | ||
241 | int size, i; | ||
242 | |||
243 | size = pbm->msiq_num * sizeof(struct sparc64_msiq_cookie); | ||
244 | pbm->msiq_irq_cookies = kzalloc(size, GFP_KERNEL); | ||
245 | if (!pbm->msiq_irq_cookies) | ||
246 | return -ENOMEM; | ||
247 | |||
248 | for (i = 0; i < pbm->msiq_num; i++) { | ||
249 | struct sparc64_msiq_cookie *p; | ||
250 | |||
251 | p = &pbm->msiq_irq_cookies[i]; | ||
252 | p->pbm = pbm; | ||
253 | p->msiqid = pbm->msiq_first + i; | ||
254 | } | ||
255 | |||
256 | size = pbm->msi_num * sizeof(unsigned int); | ||
257 | pbm->msi_irq_table = kzalloc(size, GFP_KERNEL); | ||
258 | if (!pbm->msi_irq_table) { | ||
259 | kfree(pbm->msiq_irq_cookies); | ||
260 | pbm->msiq_irq_cookies = NULL; | ||
261 | return -ENOMEM; | ||
262 | } | ||
263 | |||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | static void msi_table_free(struct pci_pbm_info *pbm) | ||
268 | { | ||
269 | kfree(pbm->msiq_irq_cookies); | ||
270 | pbm->msiq_irq_cookies = NULL; | ||
271 | |||
272 | kfree(pbm->msi_irq_table); | ||
273 | pbm->msi_irq_table = NULL; | ||
274 | } | ||
275 | |||
276 | static int bringup_one_msi_queue(struct pci_pbm_info *pbm, | ||
277 | const struct sparc64_msiq_ops *ops, | ||
278 | unsigned long msiqid, | ||
279 | unsigned long devino) | ||
280 | { | ||
281 | int irq = ops->msiq_build_irq(pbm, msiqid, devino); | ||
282 | int err, nid; | ||
283 | |||
284 | if (irq < 0) | ||
285 | return irq; | ||
286 | |||
287 | nid = pbm->numa_node; | ||
288 | if (nid != -1) { | ||
289 | cpumask_t numa_mask = node_to_cpumask(nid); | ||
290 | |||
291 | irq_set_affinity(irq, numa_mask); | ||
292 | } | ||
293 | err = request_irq(irq, sparc64_msiq_interrupt, 0, | ||
294 | "MSIQ", | ||
295 | &pbm->msiq_irq_cookies[msiqid - pbm->msiq_first]); | ||
296 | if (err) | ||
297 | return err; | ||
298 | |||
299 | return 0; | ||
300 | } | ||
301 | |||
302 | static int sparc64_bringup_msi_queues(struct pci_pbm_info *pbm, | ||
303 | const struct sparc64_msiq_ops *ops) | ||
304 | { | ||
305 | int i; | ||
306 | |||
307 | for (i = 0; i < pbm->msiq_num; i++) { | ||
308 | unsigned long msiqid = i + pbm->msiq_first; | ||
309 | unsigned long devino = i + pbm->msiq_first_devino; | ||
310 | int err; | ||
311 | |||
312 | err = bringup_one_msi_queue(pbm, ops, msiqid, devino); | ||
313 | if (err) | ||
314 | return err; | ||
315 | } | ||
316 | |||
317 | return 0; | ||
318 | } | ||
319 | |||
320 | void sparc64_pbm_msi_init(struct pci_pbm_info *pbm, | ||
321 | const struct sparc64_msiq_ops *ops) | ||
322 | { | ||
323 | const u32 *val; | ||
324 | int len; | ||
325 | |||
326 | val = of_get_property(pbm->op->node, "#msi-eqs", &len); | ||
327 | if (!val || len != 4) | ||
328 | goto no_msi; | ||
329 | pbm->msiq_num = *val; | ||
330 | if (pbm->msiq_num) { | ||
331 | const struct msiq_prop { | ||
332 | u32 first_msiq; | ||
333 | u32 num_msiq; | ||
334 | u32 first_devino; | ||
335 | } *mqp; | ||
336 | const struct msi_range_prop { | ||
337 | u32 first_msi; | ||
338 | u32 num_msi; | ||
339 | } *mrng; | ||
340 | const struct addr_range_prop { | ||
341 | u32 msi32_high; | ||
342 | u32 msi32_low; | ||
343 | u32 msi32_len; | ||
344 | u32 msi64_high; | ||
345 | u32 msi64_low; | ||
346 | u32 msi64_len; | ||
347 | } *arng; | ||
348 | |||
349 | val = of_get_property(pbm->op->node, "msi-eq-size", &len); | ||
350 | if (!val || len != 4) | ||
351 | goto no_msi; | ||
352 | |||
353 | pbm->msiq_ent_count = *val; | ||
354 | |||
355 | mqp = of_get_property(pbm->op->node, | ||
356 | "msi-eq-to-devino", &len); | ||
357 | if (!mqp) | ||
358 | mqp = of_get_property(pbm->op->node, | ||
359 | "msi-eq-devino", &len); | ||
360 | if (!mqp || len != sizeof(struct msiq_prop)) | ||
361 | goto no_msi; | ||
362 | |||
363 | pbm->msiq_first = mqp->first_msiq; | ||
364 | pbm->msiq_first_devino = mqp->first_devino; | ||
365 | |||
366 | val = of_get_property(pbm->op->node, "#msi", &len); | ||
367 | if (!val || len != 4) | ||
368 | goto no_msi; | ||
369 | pbm->msi_num = *val; | ||
370 | |||
371 | mrng = of_get_property(pbm->op->node, "msi-ranges", &len); | ||
372 | if (!mrng || len != sizeof(struct msi_range_prop)) | ||
373 | goto no_msi; | ||
374 | pbm->msi_first = mrng->first_msi; | ||
375 | |||
376 | val = of_get_property(pbm->op->node, "msi-data-mask", &len); | ||
377 | if (!val || len != 4) | ||
378 | goto no_msi; | ||
379 | pbm->msi_data_mask = *val; | ||
380 | |||
381 | val = of_get_property(pbm->op->node, "msix-data-width", &len); | ||
382 | if (!val || len != 4) | ||
383 | goto no_msi; | ||
384 | pbm->msix_data_width = *val; | ||
385 | |||
386 | arng = of_get_property(pbm->op->node, "msi-address-ranges", | ||
387 | &len); | ||
388 | if (!arng || len != sizeof(struct addr_range_prop)) | ||
389 | goto no_msi; | ||
390 | pbm->msi32_start = ((u64)arng->msi32_high << 32) | | ||
391 | (u64) arng->msi32_low; | ||
392 | pbm->msi64_start = ((u64)arng->msi64_high << 32) | | ||
393 | (u64) arng->msi64_low; | ||
394 | pbm->msi32_len = arng->msi32_len; | ||
395 | pbm->msi64_len = arng->msi64_len; | ||
396 | |||
397 | if (msi_bitmap_alloc(pbm)) | ||
398 | goto no_msi; | ||
399 | |||
400 | if (msi_table_alloc(pbm)) { | ||
401 | msi_bitmap_free(pbm); | ||
402 | goto no_msi; | ||
403 | } | ||
404 | |||
405 | if (ops->msiq_alloc(pbm)) { | ||
406 | msi_table_free(pbm); | ||
407 | msi_bitmap_free(pbm); | ||
408 | goto no_msi; | ||
409 | } | ||
410 | |||
411 | if (sparc64_bringup_msi_queues(pbm, ops)) { | ||
412 | ops->msiq_free(pbm); | ||
413 | msi_table_free(pbm); | ||
414 | msi_bitmap_free(pbm); | ||
415 | goto no_msi; | ||
416 | } | ||
417 | |||
418 | printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] " | ||
419 | "devino[0x%x]\n", | ||
420 | pbm->name, | ||
421 | pbm->msiq_first, pbm->msiq_num, | ||
422 | pbm->msiq_ent_count, | ||
423 | pbm->msiq_first_devino); | ||
424 | printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] " | ||
425 | "width[%u]\n", | ||
426 | pbm->name, | ||
427 | pbm->msi_first, pbm->msi_num, pbm->msi_data_mask, | ||
428 | pbm->msix_data_width); | ||
429 | printk(KERN_INFO "%s: MSI addr32[0x%lx:0x%x] " | ||
430 | "addr64[0x%lx:0x%x]\n", | ||
431 | pbm->name, | ||
432 | pbm->msi32_start, pbm->msi32_len, | ||
433 | pbm->msi64_start, pbm->msi64_len); | ||
434 | printk(KERN_INFO "%s: MSI queues at RA [%016lx]\n", | ||
435 | pbm->name, | ||
436 | __pa(pbm->msi_queues)); | ||
437 | |||
438 | pbm->msi_ops = ops; | ||
439 | pbm->setup_msi_irq = sparc64_setup_msi_irq; | ||
440 | pbm->teardown_msi_irq = sparc64_teardown_msi_irq; | ||
441 | } | ||
442 | return; | ||
443 | |||
444 | no_msi: | ||
445 | pbm->msiq_num = 0; | ||
446 | printk(KERN_INFO "%s: No MSI support.\n", pbm->name); | ||
447 | } | ||
diff --git a/arch/sparc/kernel/pci_psycho.c b/arch/sparc/kernel/pci_psycho.c new file mode 100644 index 000000000000..dfb3ec892987 --- /dev/null +++ b/arch/sparc/kernel/pci_psycho.c | |||
@@ -0,0 +1,618 @@ | |||
1 | /* pci_psycho.c: PSYCHO/U2P specific PCI controller support. | ||
2 | * | ||
3 | * Copyright (C) 1997, 1998, 1999, 2007 David S. Miller (davem@davemloft.net) | ||
4 | * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) | ||
5 | * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com) | ||
6 | */ | ||
7 | |||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/types.h> | ||
10 | #include <linux/pci.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/of_device.h> | ||
15 | |||
16 | #include <asm/iommu.h> | ||
17 | #include <asm/irq.h> | ||
18 | #include <asm/starfire.h> | ||
19 | #include <asm/prom.h> | ||
20 | #include <asm/upa.h> | ||
21 | |||
22 | #include "pci_impl.h" | ||
23 | #include "iommu_common.h" | ||
24 | #include "psycho_common.h" | ||
25 | |||
26 | #define DRIVER_NAME "psycho" | ||
27 | #define PFX DRIVER_NAME ": " | ||
28 | |||
29 | /* Misc. PSYCHO PCI controller register offsets and definitions. */ | ||
30 | #define PSYCHO_CONTROL 0x0010UL | ||
31 | #define PSYCHO_CONTROL_IMPL 0xf000000000000000UL /* Implementation of this PSYCHO*/ | ||
32 | #define PSYCHO_CONTROL_VER 0x0f00000000000000UL /* Version of this PSYCHO */ | ||
33 | #define PSYCHO_CONTROL_MID 0x00f8000000000000UL /* UPA Module ID of PSYCHO */ | ||
34 | #define PSYCHO_CONTROL_IGN 0x0007c00000000000UL /* Interrupt Group Number */ | ||
35 | #define PSYCHO_CONTROL_RESV 0x00003ffffffffff0UL /* Reserved */ | ||
36 | #define PSYCHO_CONTROL_APCKEN 0x0000000000000008UL /* Address Parity Check Enable */ | ||
37 | #define PSYCHO_CONTROL_APERR 0x0000000000000004UL /* Incoming System Addr Parerr */ | ||
38 | #define PSYCHO_CONTROL_IAP 0x0000000000000002UL /* Invert UPA Parity */ | ||
39 | #define PSYCHO_CONTROL_MODE 0x0000000000000001UL /* PSYCHO clock mode */ | ||
40 | #define PSYCHO_PCIA_CTRL 0x2000UL | ||
41 | #define PSYCHO_PCIB_CTRL 0x4000UL | ||
42 | #define PSYCHO_PCICTRL_RESV1 0xfffffff000000000UL /* Reserved */ | ||
43 | #define PSYCHO_PCICTRL_SBH_ERR 0x0000000800000000UL /* Streaming byte hole error */ | ||
44 | #define PSYCHO_PCICTRL_SERR 0x0000000400000000UL /* SERR signal asserted */ | ||
45 | #define PSYCHO_PCICTRL_SPEED 0x0000000200000000UL /* PCI speed (1 is U2P clock) */ | ||
46 | #define PSYCHO_PCICTRL_RESV2 0x00000001ffc00000UL /* Reserved */ | ||
47 | #define PSYCHO_PCICTRL_ARB_PARK 0x0000000000200000UL /* PCI arbitration parking */ | ||
48 | #define PSYCHO_PCICTRL_RESV3 0x00000000001ff800UL /* Reserved */ | ||
49 | #define PSYCHO_PCICTRL_SBH_INT 0x0000000000000400UL /* Streaming byte hole int enab */ | ||
50 | #define PSYCHO_PCICTRL_WEN 0x0000000000000200UL /* Power Mgmt Wake Enable */ | ||
51 | #define PSYCHO_PCICTRL_EEN 0x0000000000000100UL /* PCI Error Interrupt Enable */ | ||
52 | #define PSYCHO_PCICTRL_RESV4 0x00000000000000c0UL /* Reserved */ | ||
53 | #define PSYCHO_PCICTRL_AEN 0x000000000000003fUL /* PCI DVMA Arbitration Enable */ | ||
54 | |||
55 | /* PSYCHO error handling support. */ | ||
56 | |||
57 | /* Helper function of IOMMU error checking, which checks out | ||
58 | * the state of the streaming buffers. The IOMMU lock is | ||
59 | * held when this is called. | ||
60 | * | ||
61 | * For the PCI error case we know which PBM (and thus which | ||
62 | * streaming buffer) caused the error, but for the uncorrectable | ||
63 | * error case we do not. So we always check both streaming caches. | ||
64 | */ | ||
65 | #define PSYCHO_STRBUF_CONTROL_A 0x2800UL | ||
66 | #define PSYCHO_STRBUF_CONTROL_B 0x4800UL | ||
67 | #define PSYCHO_STRBUF_CTRL_LPTR 0x00000000000000f0UL /* LRU Lock Pointer */ | ||
68 | #define PSYCHO_STRBUF_CTRL_LENAB 0x0000000000000008UL /* LRU Lock Enable */ | ||
69 | #define PSYCHO_STRBUF_CTRL_RRDIS 0x0000000000000004UL /* Rerun Disable */ | ||
70 | #define PSYCHO_STRBUF_CTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */ | ||
71 | #define PSYCHO_STRBUF_CTRL_ENAB 0x0000000000000001UL /* Streaming Buffer Enable */ | ||
72 | #define PSYCHO_STRBUF_FLUSH_A 0x2808UL | ||
73 | #define PSYCHO_STRBUF_FLUSH_B 0x4808UL | ||
74 | #define PSYCHO_STRBUF_FSYNC_A 0x2810UL | ||
75 | #define PSYCHO_STRBUF_FSYNC_B 0x4810UL | ||
76 | #define PSYCHO_STC_DATA_A 0xb000UL | ||
77 | #define PSYCHO_STC_DATA_B 0xc000UL | ||
78 | #define PSYCHO_STC_ERR_A 0xb400UL | ||
79 | #define PSYCHO_STC_ERR_B 0xc400UL | ||
80 | #define PSYCHO_STC_TAG_A 0xb800UL | ||
81 | #define PSYCHO_STC_TAG_B 0xc800UL | ||
82 | #define PSYCHO_STC_LINE_A 0xb900UL | ||
83 | #define PSYCHO_STC_LINE_B 0xc900UL | ||
84 | |||
85 | /* When an Uncorrectable Error or a PCI Error happens, we | ||
86 | * interrogate the IOMMU state to see if it is the cause. | ||
87 | */ | ||
88 | #define PSYCHO_IOMMU_CONTROL 0x0200UL | ||
89 | #define PSYCHO_IOMMU_CTRL_RESV 0xfffffffff9000000UL /* Reserved */ | ||
90 | #define PSYCHO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL /* Translation Error Status */ | ||
91 | #define PSYCHO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL /* Translation Error encountered */ | ||
92 | #define PSYCHO_IOMMU_CTRL_LCKEN 0x0000000000800000UL /* Enable translation locking */ | ||
93 | #define PSYCHO_IOMMU_CTRL_LCKPTR 0x0000000000780000UL /* Translation lock pointer */ | ||
94 | #define PSYCHO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL /* TSB Size */ | ||
95 | #define PSYCHO_IOMMU_TSBSZ_1K 0x0000000000000000UL /* TSB Table 1024 8-byte entries */ | ||
96 | #define PSYCHO_IOMMU_TSBSZ_2K 0x0000000000010000UL /* TSB Table 2048 8-byte entries */ | ||
97 | #define PSYCHO_IOMMU_TSBSZ_4K 0x0000000000020000UL /* TSB Table 4096 8-byte entries */ | ||
98 | #define PSYCHO_IOMMU_TSBSZ_8K 0x0000000000030000UL /* TSB Table 8192 8-byte entries */ | ||
99 | #define PSYCHO_IOMMU_TSBSZ_16K 0x0000000000040000UL /* TSB Table 16k 8-byte entries */ | ||
100 | #define PSYCHO_IOMMU_TSBSZ_32K 0x0000000000050000UL /* TSB Table 32k 8-byte entries */ | ||
101 | #define PSYCHO_IOMMU_TSBSZ_64K 0x0000000000060000UL /* TSB Table 64k 8-byte entries */ | ||
102 | #define PSYCHO_IOMMU_TSBSZ_128K 0x0000000000070000UL /* TSB Table 128k 8-byte entries */ | ||
103 | #define PSYCHO_IOMMU_CTRL_RESV2 0x000000000000fff8UL /* Reserved */ | ||
104 | #define PSYCHO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL /* Assumed page size, 0=8k 1=64k */ | ||
105 | #define PSYCHO_IOMMU_CTRL_DENAB 0x0000000000000002UL /* Diagnostic mode enable */ | ||
106 | #define PSYCHO_IOMMU_CTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */ | ||
107 | #define PSYCHO_IOMMU_TSBBASE 0x0208UL | ||
108 | #define PSYCHO_IOMMU_FLUSH 0x0210UL | ||
109 | #define PSYCHO_IOMMU_TAG 0xa580UL | ||
110 | #define PSYCHO_IOMMU_DATA 0xa600UL | ||
111 | |||
112 | /* Uncorrectable Errors. Cause of the error and the address are | ||
113 | * recorded in the UE_AFSR and UE_AFAR of PSYCHO. They are errors | ||
114 | * relating to UPA interface transactions. | ||
115 | */ | ||
116 | #define PSYCHO_UE_AFSR 0x0030UL | ||
117 | #define PSYCHO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO is cause */ | ||
118 | #define PSYCHO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read is cause */ | ||
119 | #define PSYCHO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write is cause */ | ||
120 | #define PSYCHO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */ | ||
121 | #define PSYCHO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read is cause */ | ||
122 | #define PSYCHO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write is cause*/ | ||
123 | #define PSYCHO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */ | ||
124 | #define PSYCHO_UEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask of failed transfer */ | ||
125 | #define PSYCHO_UEAFSR_DOFF 0x00000000e0000000UL /* Doubleword Offset */ | ||
126 | #define PSYCHO_UEAFSR_MID 0x000000001f000000UL /* UPA MID causing the fault */ | ||
127 | #define PSYCHO_UEAFSR_BLK 0x0000000000800000UL /* Trans was block operation */ | ||
128 | #define PSYCHO_UEAFSR_RESV2 0x00000000007fffffUL /* Reserved */ | ||
129 | #define PSYCHO_UE_AFAR 0x0038UL | ||
130 | |||
131 | static irqreturn_t psycho_ue_intr(int irq, void *dev_id) | ||
132 | { | ||
133 | struct pci_pbm_info *pbm = dev_id; | ||
134 | unsigned long afsr_reg = pbm->controller_regs + PSYCHO_UE_AFSR; | ||
135 | unsigned long afar_reg = pbm->controller_regs + PSYCHO_UE_AFAR; | ||
136 | unsigned long afsr, afar, error_bits; | ||
137 | int reported; | ||
138 | |||
139 | /* Latch uncorrectable error status. */ | ||
140 | afar = upa_readq(afar_reg); | ||
141 | afsr = upa_readq(afsr_reg); | ||
142 | |||
143 | /* Clear the primary/secondary error status bits. */ | ||
144 | error_bits = afsr & | ||
145 | (PSYCHO_UEAFSR_PPIO | PSYCHO_UEAFSR_PDRD | PSYCHO_UEAFSR_PDWR | | ||
146 | PSYCHO_UEAFSR_SPIO | PSYCHO_UEAFSR_SDRD | PSYCHO_UEAFSR_SDWR); | ||
147 | if (!error_bits) | ||
148 | return IRQ_NONE; | ||
149 | upa_writeq(error_bits, afsr_reg); | ||
150 | |||
151 | /* Log the error. */ | ||
152 | printk("%s: Uncorrectable Error, primary error type[%s]\n", | ||
153 | pbm->name, | ||
154 | (((error_bits & PSYCHO_UEAFSR_PPIO) ? | ||
155 | "PIO" : | ||
156 | ((error_bits & PSYCHO_UEAFSR_PDRD) ? | ||
157 | "DMA Read" : | ||
158 | ((error_bits & PSYCHO_UEAFSR_PDWR) ? | ||
159 | "DMA Write" : "???"))))); | ||
160 | printk("%s: bytemask[%04lx] dword_offset[%lx] UPA_MID[%02lx] was_block(%d)\n", | ||
161 | pbm->name, | ||
162 | (afsr & PSYCHO_UEAFSR_BMSK) >> 32UL, | ||
163 | (afsr & PSYCHO_UEAFSR_DOFF) >> 29UL, | ||
164 | (afsr & PSYCHO_UEAFSR_MID) >> 24UL, | ||
165 | ((afsr & PSYCHO_UEAFSR_BLK) ? 1 : 0)); | ||
166 | printk("%s: UE AFAR [%016lx]\n", pbm->name, afar); | ||
167 | printk("%s: UE Secondary errors [", pbm->name); | ||
168 | reported = 0; | ||
169 | if (afsr & PSYCHO_UEAFSR_SPIO) { | ||
170 | reported++; | ||
171 | printk("(PIO)"); | ||
172 | } | ||
173 | if (afsr & PSYCHO_UEAFSR_SDRD) { | ||
174 | reported++; | ||
175 | printk("(DMA Read)"); | ||
176 | } | ||
177 | if (afsr & PSYCHO_UEAFSR_SDWR) { | ||
178 | reported++; | ||
179 | printk("(DMA Write)"); | ||
180 | } | ||
181 | if (!reported) | ||
182 | printk("(none)"); | ||
183 | printk("]\n"); | ||
184 | |||
185 | /* Interrogate both IOMMUs for error status. */ | ||
186 | psycho_check_iommu_error(pbm, afsr, afar, UE_ERR); | ||
187 | if (pbm->sibling) | ||
188 | psycho_check_iommu_error(pbm->sibling, afsr, afar, UE_ERR); | ||
189 | |||
190 | return IRQ_HANDLED; | ||
191 | } | ||
192 | |||
193 | /* Correctable Errors. */ | ||
194 | #define PSYCHO_CE_AFSR 0x0040UL | ||
195 | #define PSYCHO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO is cause */ | ||
196 | #define PSYCHO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read is cause */ | ||
197 | #define PSYCHO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write is cause */ | ||
198 | #define PSYCHO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */ | ||
199 | #define PSYCHO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read is cause */ | ||
200 | #define PSYCHO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write is cause*/ | ||
201 | #define PSYCHO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */ | ||
202 | #define PSYCHO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */ | ||
203 | #define PSYCHO_CEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask of failed transfer */ | ||
204 | #define PSYCHO_CEAFSR_DOFF 0x00000000e0000000UL /* Double Offset */ | ||
205 | #define PSYCHO_CEAFSR_MID 0x000000001f000000UL /* UPA MID causing the fault */ | ||
206 | #define PSYCHO_CEAFSR_BLK 0x0000000000800000UL /* Trans was block operation */ | ||
207 | #define PSYCHO_CEAFSR_RESV2 0x00000000007fffffUL /* Reserved */ | ||
208 | #define PSYCHO_CE_AFAR 0x0040UL | ||
209 | |||
210 | static irqreturn_t psycho_ce_intr(int irq, void *dev_id) | ||
211 | { | ||
212 | struct pci_pbm_info *pbm = dev_id; | ||
213 | unsigned long afsr_reg = pbm->controller_regs + PSYCHO_CE_AFSR; | ||
214 | unsigned long afar_reg = pbm->controller_regs + PSYCHO_CE_AFAR; | ||
215 | unsigned long afsr, afar, error_bits; | ||
216 | int reported; | ||
217 | |||
218 | /* Latch error status. */ | ||
219 | afar = upa_readq(afar_reg); | ||
220 | afsr = upa_readq(afsr_reg); | ||
221 | |||
222 | /* Clear primary/secondary error status bits. */ | ||
223 | error_bits = afsr & | ||
224 | (PSYCHO_CEAFSR_PPIO | PSYCHO_CEAFSR_PDRD | PSYCHO_CEAFSR_PDWR | | ||
225 | PSYCHO_CEAFSR_SPIO | PSYCHO_CEAFSR_SDRD | PSYCHO_CEAFSR_SDWR); | ||
226 | if (!error_bits) | ||
227 | return IRQ_NONE; | ||
228 | upa_writeq(error_bits, afsr_reg); | ||
229 | |||
230 | /* Log the error. */ | ||
231 | printk("%s: Correctable Error, primary error type[%s]\n", | ||
232 | pbm->name, | ||
233 | (((error_bits & PSYCHO_CEAFSR_PPIO) ? | ||
234 | "PIO" : | ||
235 | ((error_bits & PSYCHO_CEAFSR_PDRD) ? | ||
236 | "DMA Read" : | ||
237 | ((error_bits & PSYCHO_CEAFSR_PDWR) ? | ||
238 | "DMA Write" : "???"))))); | ||
239 | |||
240 | /* XXX Use syndrome and afar to print out module string just like | ||
241 | * XXX UDB CE trap handler does... -DaveM | ||
242 | */ | ||
243 | printk("%s: syndrome[%02lx] bytemask[%04lx] dword_offset[%lx] " | ||
244 | "UPA_MID[%02lx] was_block(%d)\n", | ||
245 | pbm->name, | ||
246 | (afsr & PSYCHO_CEAFSR_ESYND) >> 48UL, | ||
247 | (afsr & PSYCHO_CEAFSR_BMSK) >> 32UL, | ||
248 | (afsr & PSYCHO_CEAFSR_DOFF) >> 29UL, | ||
249 | (afsr & PSYCHO_CEAFSR_MID) >> 24UL, | ||
250 | ((afsr & PSYCHO_CEAFSR_BLK) ? 1 : 0)); | ||
251 | printk("%s: CE AFAR [%016lx]\n", pbm->name, afar); | ||
252 | printk("%s: CE Secondary errors [", pbm->name); | ||
253 | reported = 0; | ||
254 | if (afsr & PSYCHO_CEAFSR_SPIO) { | ||
255 | reported++; | ||
256 | printk("(PIO)"); | ||
257 | } | ||
258 | if (afsr & PSYCHO_CEAFSR_SDRD) { | ||
259 | reported++; | ||
260 | printk("(DMA Read)"); | ||
261 | } | ||
262 | if (afsr & PSYCHO_CEAFSR_SDWR) { | ||
263 | reported++; | ||
264 | printk("(DMA Write)"); | ||
265 | } | ||
266 | if (!reported) | ||
267 | printk("(none)"); | ||
268 | printk("]\n"); | ||
269 | |||
270 | return IRQ_HANDLED; | ||
271 | } | ||
272 | |||
273 | /* PCI Errors. They are signalled by the PCI bus module since they | ||
274 | * are associated with a specific bus segment. | ||
275 | */ | ||
276 | #define PSYCHO_PCI_AFSR_A 0x2010UL | ||
277 | #define PSYCHO_PCI_AFSR_B 0x4010UL | ||
278 | #define PSYCHO_PCI_AFAR_A 0x2018UL | ||
279 | #define PSYCHO_PCI_AFAR_B 0x4018UL | ||
280 | |||
281 | /* XXX What about PowerFail/PowerManagement??? -DaveM */ | ||
282 | #define PSYCHO_ECC_CTRL 0x0020 | ||
283 | #define PSYCHO_ECCCTRL_EE 0x8000000000000000UL /* Enable ECC Checking */ | ||
284 | #define PSYCHO_ECCCTRL_UE 0x4000000000000000UL /* Enable UE Interrupts */ | ||
285 | #define PSYCHO_ECCCTRL_CE 0x2000000000000000UL /* Enable CE INterrupts */ | ||
286 | static void psycho_register_error_handlers(struct pci_pbm_info *pbm) | ||
287 | { | ||
288 | struct of_device *op = of_find_device_by_node(pbm->op->node); | ||
289 | unsigned long base = pbm->controller_regs; | ||
290 | u64 tmp; | ||
291 | int err; | ||
292 | |||
293 | if (!op) | ||
294 | return; | ||
295 | |||
296 | /* Psycho interrupt property order is: | ||
297 | * 0: PCIERR INO for this PBM | ||
298 | * 1: UE ERR | ||
299 | * 2: CE ERR | ||
300 | * 3: POWER FAIL | ||
301 | * 4: SPARE HARDWARE | ||
302 | * 5: POWER MANAGEMENT | ||
303 | */ | ||
304 | |||
305 | if (op->num_irqs < 6) | ||
306 | return; | ||
307 | |||
308 | /* We really mean to ignore the return result here. Two | ||
309 | * PCI controller share the same interrupt numbers and | ||
310 | * drive the same front-end hardware. Whichever of the | ||
311 | * two get in here first will register the IRQ handler | ||
312 | * the second will just error out since we do not pass in | ||
313 | * IRQF_SHARED. | ||
314 | */ | ||
315 | err = request_irq(op->irqs[1], psycho_ue_intr, IRQF_SHARED, | ||
316 | "PSYCHO_UE", pbm); | ||
317 | err = request_irq(op->irqs[2], psycho_ce_intr, IRQF_SHARED, | ||
318 | "PSYCHO_CE", pbm); | ||
319 | |||
320 | /* This one, however, ought not to fail. We can just warn | ||
321 | * about it since the system can still operate properly even | ||
322 | * if this fails. | ||
323 | */ | ||
324 | err = request_irq(op->irqs[0], psycho_pcierr_intr, IRQF_SHARED, | ||
325 | "PSYCHO_PCIERR", pbm); | ||
326 | if (err) | ||
327 | printk(KERN_WARNING "%s: Could not register PCIERR, " | ||
328 | "err=%d\n", pbm->name, err); | ||
329 | |||
330 | /* Enable UE and CE interrupts for controller. */ | ||
331 | upa_writeq((PSYCHO_ECCCTRL_EE | | ||
332 | PSYCHO_ECCCTRL_UE | | ||
333 | PSYCHO_ECCCTRL_CE), base + PSYCHO_ECC_CTRL); | ||
334 | |||
335 | /* Enable PCI Error interrupts and clear error | ||
336 | * bits for each PBM. | ||
337 | */ | ||
338 | tmp = upa_readq(base + PSYCHO_PCIA_CTRL); | ||
339 | tmp |= (PSYCHO_PCICTRL_SERR | | ||
340 | PSYCHO_PCICTRL_SBH_ERR | | ||
341 | PSYCHO_PCICTRL_EEN); | ||
342 | tmp &= ~(PSYCHO_PCICTRL_SBH_INT); | ||
343 | upa_writeq(tmp, base + PSYCHO_PCIA_CTRL); | ||
344 | |||
345 | tmp = upa_readq(base + PSYCHO_PCIB_CTRL); | ||
346 | tmp |= (PSYCHO_PCICTRL_SERR | | ||
347 | PSYCHO_PCICTRL_SBH_ERR | | ||
348 | PSYCHO_PCICTRL_EEN); | ||
349 | tmp &= ~(PSYCHO_PCICTRL_SBH_INT); | ||
350 | upa_writeq(tmp, base + PSYCHO_PCIB_CTRL); | ||
351 | } | ||
352 | |||
353 | /* PSYCHO boot time probing and initialization. */ | ||
354 | static void pbm_config_busmastering(struct pci_pbm_info *pbm) | ||
355 | { | ||
356 | u8 *addr; | ||
357 | |||
358 | /* Set cache-line size to 64 bytes, this is actually | ||
359 | * a nop but I do it for completeness. | ||
360 | */ | ||
361 | addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno, | ||
362 | 0, PCI_CACHE_LINE_SIZE); | ||
363 | pci_config_write8(addr, 64 / sizeof(u32)); | ||
364 | |||
365 | /* Set PBM latency timer to 64 PCI clocks. */ | ||
366 | addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno, | ||
367 | 0, PCI_LATENCY_TIMER); | ||
368 | pci_config_write8(addr, 64); | ||
369 | } | ||
370 | |||
371 | static void __init psycho_scan_bus(struct pci_pbm_info *pbm, | ||
372 | struct device *parent) | ||
373 | { | ||
374 | pbm_config_busmastering(pbm); | ||
375 | pbm->is_66mhz_capable = 0; | ||
376 | pbm->pci_bus = pci_scan_one_pbm(pbm, parent); | ||
377 | |||
378 | /* After the PCI bus scan is complete, we can register | ||
379 | * the error interrupt handlers. | ||
380 | */ | ||
381 | psycho_register_error_handlers(pbm); | ||
382 | } | ||
383 | |||
384 | #define PSYCHO_IRQ_RETRY 0x1a00UL | ||
385 | #define PSYCHO_PCIA_DIAG 0x2020UL | ||
386 | #define PSYCHO_PCIB_DIAG 0x4020UL | ||
387 | #define PSYCHO_PCIDIAG_RESV 0xffffffffffffff80UL /* Reserved */ | ||
388 | #define PSYCHO_PCIDIAG_DRETRY 0x0000000000000040UL /* Disable retry limit */ | ||
389 | #define PSYCHO_PCIDIAG_DISYNC 0x0000000000000020UL /* Disable DMA wr / irq sync */ | ||
390 | #define PSYCHO_PCIDIAG_DDWSYNC 0x0000000000000010UL /* Disable DMA wr / PIO rd sync */ | ||
391 | #define PSYCHO_PCIDIAG_IDDPAR 0x0000000000000008UL /* Invert DMA data parity */ | ||
392 | #define PSYCHO_PCIDIAG_IPDPAR 0x0000000000000004UL /* Invert PIO data parity */ | ||
393 | #define PSYCHO_PCIDIAG_IPAPAR 0x0000000000000002UL /* Invert PIO address parity */ | ||
394 | #define PSYCHO_PCIDIAG_LPBACK 0x0000000000000001UL /* Enable loopback mode */ | ||
395 | |||
396 | static void psycho_controller_hwinit(struct pci_pbm_info *pbm) | ||
397 | { | ||
398 | u64 tmp; | ||
399 | |||
400 | upa_writeq(5, pbm->controller_regs + PSYCHO_IRQ_RETRY); | ||
401 | |||
402 | /* Enable arbiter for all PCI slots. */ | ||
403 | tmp = upa_readq(pbm->controller_regs + PSYCHO_PCIA_CTRL); | ||
404 | tmp |= PSYCHO_PCICTRL_AEN; | ||
405 | upa_writeq(tmp, pbm->controller_regs + PSYCHO_PCIA_CTRL); | ||
406 | |||
407 | tmp = upa_readq(pbm->controller_regs + PSYCHO_PCIB_CTRL); | ||
408 | tmp |= PSYCHO_PCICTRL_AEN; | ||
409 | upa_writeq(tmp, pbm->controller_regs + PSYCHO_PCIB_CTRL); | ||
410 | |||
411 | /* Disable DMA write / PIO read synchronization on | ||
412 | * both PCI bus segments. | ||
413 | * [ U2P Erratum 1243770, STP2223BGA data sheet ] | ||
414 | */ | ||
415 | tmp = upa_readq(pbm->controller_regs + PSYCHO_PCIA_DIAG); | ||
416 | tmp |= PSYCHO_PCIDIAG_DDWSYNC; | ||
417 | upa_writeq(tmp, pbm->controller_regs + PSYCHO_PCIA_DIAG); | ||
418 | |||
419 | tmp = upa_readq(pbm->controller_regs + PSYCHO_PCIB_DIAG); | ||
420 | tmp |= PSYCHO_PCIDIAG_DDWSYNC; | ||
421 | upa_writeq(tmp, pbm->controller_regs + PSYCHO_PCIB_DIAG); | ||
422 | } | ||
423 | |||
424 | static void psycho_pbm_strbuf_init(struct pci_pbm_info *pbm, | ||
425 | int is_pbm_a) | ||
426 | { | ||
427 | unsigned long base = pbm->controller_regs; | ||
428 | u64 control; | ||
429 | |||
430 | if (is_pbm_a) { | ||
431 | pbm->stc.strbuf_control = base + PSYCHO_STRBUF_CONTROL_A; | ||
432 | pbm->stc.strbuf_pflush = base + PSYCHO_STRBUF_FLUSH_A; | ||
433 | pbm->stc.strbuf_fsync = base + PSYCHO_STRBUF_FSYNC_A; | ||
434 | pbm->stc.strbuf_err_stat = base + PSYCHO_STC_ERR_A; | ||
435 | pbm->stc.strbuf_tag_diag = base + PSYCHO_STC_TAG_A; | ||
436 | pbm->stc.strbuf_line_diag= base + PSYCHO_STC_LINE_A; | ||
437 | } else { | ||
438 | pbm->stc.strbuf_control = base + PSYCHO_STRBUF_CONTROL_B; | ||
439 | pbm->stc.strbuf_pflush = base + PSYCHO_STRBUF_FLUSH_B; | ||
440 | pbm->stc.strbuf_fsync = base + PSYCHO_STRBUF_FSYNC_B; | ||
441 | pbm->stc.strbuf_err_stat = base + PSYCHO_STC_ERR_B; | ||
442 | pbm->stc.strbuf_tag_diag = base + PSYCHO_STC_TAG_B; | ||
443 | pbm->stc.strbuf_line_diag= base + PSYCHO_STC_LINE_B; | ||
444 | } | ||
445 | /* PSYCHO's streaming buffer lacks ctx flushing. */ | ||
446 | pbm->stc.strbuf_ctxflush = 0; | ||
447 | pbm->stc.strbuf_ctxmatch_base = 0; | ||
448 | |||
449 | pbm->stc.strbuf_flushflag = (volatile unsigned long *) | ||
450 | ((((unsigned long)&pbm->stc.__flushflag_buf[0]) | ||
451 | + 63UL) | ||
452 | & ~63UL); | ||
453 | pbm->stc.strbuf_flushflag_pa = (unsigned long) | ||
454 | __pa(pbm->stc.strbuf_flushflag); | ||
455 | |||
456 | /* Enable the streaming buffer. We have to be careful | ||
457 | * just in case OBP left it with LRU locking enabled. | ||
458 | * | ||
459 | * It is possible to control if PBM will be rerun on | ||
460 | * line misses. Currently I just retain whatever setting | ||
461 | * OBP left us with. All checks so far show it having | ||
462 | * a value of zero. | ||
463 | */ | ||
464 | #undef PSYCHO_STRBUF_RERUN_ENABLE | ||
465 | #undef PSYCHO_STRBUF_RERUN_DISABLE | ||
466 | control = upa_readq(pbm->stc.strbuf_control); | ||
467 | control |= PSYCHO_STRBUF_CTRL_ENAB; | ||
468 | control &= ~(PSYCHO_STRBUF_CTRL_LENAB | PSYCHO_STRBUF_CTRL_LPTR); | ||
469 | #ifdef PSYCHO_STRBUF_RERUN_ENABLE | ||
470 | control &= ~(PSYCHO_STRBUF_CTRL_RRDIS); | ||
471 | #else | ||
472 | #ifdef PSYCHO_STRBUF_RERUN_DISABLE | ||
473 | control |= PSYCHO_STRBUF_CTRL_RRDIS; | ||
474 | #endif | ||
475 | #endif | ||
476 | upa_writeq(control, pbm->stc.strbuf_control); | ||
477 | |||
478 | pbm->stc.strbuf_enabled = 1; | ||
479 | } | ||
480 | |||
481 | #define PSYCHO_IOSPACE_A 0x002000000UL | ||
482 | #define PSYCHO_IOSPACE_B 0x002010000UL | ||
483 | #define PSYCHO_IOSPACE_SIZE 0x00000ffffUL | ||
484 | #define PSYCHO_MEMSPACE_A 0x100000000UL | ||
485 | #define PSYCHO_MEMSPACE_B 0x180000000UL | ||
486 | #define PSYCHO_MEMSPACE_SIZE 0x07fffffffUL | ||
487 | |||
488 | static void __init psycho_pbm_init(struct pci_pbm_info *pbm, | ||
489 | struct of_device *op, int is_pbm_a) | ||
490 | { | ||
491 | psycho_pbm_init_common(pbm, op, "PSYCHO", PBM_CHIP_TYPE_PSYCHO); | ||
492 | psycho_pbm_strbuf_init(pbm, is_pbm_a); | ||
493 | psycho_scan_bus(pbm, &op->dev); | ||
494 | } | ||
495 | |||
496 | static struct pci_pbm_info * __devinit psycho_find_sibling(u32 upa_portid) | ||
497 | { | ||
498 | struct pci_pbm_info *pbm; | ||
499 | |||
500 | for (pbm = pci_pbm_root; pbm; pbm = pbm->next) { | ||
501 | if (pbm->portid == upa_portid) | ||
502 | return pbm; | ||
503 | } | ||
504 | return NULL; | ||
505 | } | ||
506 | |||
507 | #define PSYCHO_CONFIGSPACE 0x001000000UL | ||
508 | |||
509 | static int __devinit psycho_probe(struct of_device *op, | ||
510 | const struct of_device_id *match) | ||
511 | { | ||
512 | const struct linux_prom64_registers *pr_regs; | ||
513 | struct device_node *dp = op->node; | ||
514 | struct pci_pbm_info *pbm; | ||
515 | struct iommu *iommu; | ||
516 | int is_pbm_a, err; | ||
517 | u32 upa_portid; | ||
518 | |||
519 | upa_portid = of_getintprop_default(dp, "upa-portid", 0xff); | ||
520 | |||
521 | err = -ENOMEM; | ||
522 | pbm = kzalloc(sizeof(*pbm), GFP_KERNEL); | ||
523 | if (!pbm) { | ||
524 | printk(KERN_ERR PFX "Cannot allocate pci_pbm_info.\n"); | ||
525 | goto out_err; | ||
526 | } | ||
527 | |||
528 | pbm->sibling = psycho_find_sibling(upa_portid); | ||
529 | if (pbm->sibling) { | ||
530 | iommu = pbm->sibling->iommu; | ||
531 | } else { | ||
532 | iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL); | ||
533 | if (!iommu) { | ||
534 | printk(KERN_ERR PFX "Cannot allocate PBM iommu.\n"); | ||
535 | goto out_free_controller; | ||
536 | } | ||
537 | } | ||
538 | |||
539 | pbm->iommu = iommu; | ||
540 | pbm->portid = upa_portid; | ||
541 | |||
542 | pr_regs = of_get_property(dp, "reg", NULL); | ||
543 | err = -ENODEV; | ||
544 | if (!pr_regs) { | ||
545 | printk(KERN_ERR PFX "No reg property.\n"); | ||
546 | goto out_free_iommu; | ||
547 | } | ||
548 | |||
549 | is_pbm_a = ((pr_regs[0].phys_addr & 0x6000) == 0x2000); | ||
550 | |||
551 | pbm->controller_regs = pr_regs[2].phys_addr; | ||
552 | pbm->config_space = (pr_regs[2].phys_addr + PSYCHO_CONFIGSPACE); | ||
553 | |||
554 | if (is_pbm_a) { | ||
555 | pbm->pci_afsr = pbm->controller_regs + PSYCHO_PCI_AFSR_A; | ||
556 | pbm->pci_afar = pbm->controller_regs + PSYCHO_PCI_AFAR_A; | ||
557 | pbm->pci_csr = pbm->controller_regs + PSYCHO_PCIA_CTRL; | ||
558 | } else { | ||
559 | pbm->pci_afsr = pbm->controller_regs + PSYCHO_PCI_AFSR_B; | ||
560 | pbm->pci_afar = pbm->controller_regs + PSYCHO_PCI_AFAR_B; | ||
561 | pbm->pci_csr = pbm->controller_regs + PSYCHO_PCIB_CTRL; | ||
562 | } | ||
563 | |||
564 | psycho_controller_hwinit(pbm); | ||
565 | if (!pbm->sibling) { | ||
566 | err = psycho_iommu_init(pbm, 128, 0xc0000000, | ||
567 | 0xffffffff, PSYCHO_CONTROL); | ||
568 | if (err) | ||
569 | goto out_free_iommu; | ||
570 | |||
571 | /* If necessary, hook us up for starfire IRQ translations. */ | ||
572 | if (this_is_starfire) | ||
573 | starfire_hookup(pbm->portid); | ||
574 | } | ||
575 | |||
576 | psycho_pbm_init(pbm, op, is_pbm_a); | ||
577 | |||
578 | pbm->next = pci_pbm_root; | ||
579 | pci_pbm_root = pbm; | ||
580 | |||
581 | if (pbm->sibling) | ||
582 | pbm->sibling->sibling = pbm; | ||
583 | |||
584 | dev_set_drvdata(&op->dev, pbm); | ||
585 | |||
586 | return 0; | ||
587 | |||
588 | out_free_iommu: | ||
589 | if (!pbm->sibling) | ||
590 | kfree(pbm->iommu); | ||
591 | |||
592 | out_free_controller: | ||
593 | kfree(pbm); | ||
594 | |||
595 | out_err: | ||
596 | return err; | ||
597 | } | ||
598 | |||
599 | static struct of_device_id __initdata psycho_match[] = { | ||
600 | { | ||
601 | .name = "pci", | ||
602 | .compatible = "pci108e,8000", | ||
603 | }, | ||
604 | {}, | ||
605 | }; | ||
606 | |||
607 | static struct of_platform_driver psycho_driver = { | ||
608 | .name = DRIVER_NAME, | ||
609 | .match_table = psycho_match, | ||
610 | .probe = psycho_probe, | ||
611 | }; | ||
612 | |||
613 | static int __init psycho_init(void) | ||
614 | { | ||
615 | return of_register_driver(&psycho_driver, &of_bus_type); | ||
616 | } | ||
617 | |||
618 | subsys_initcall(psycho_init); | ||
diff --git a/arch/sparc/kernel/pci_sabre.c b/arch/sparc/kernel/pci_sabre.c new file mode 100644 index 000000000000..713257b6963c --- /dev/null +++ b/arch/sparc/kernel/pci_sabre.c | |||
@@ -0,0 +1,609 @@ | |||
1 | /* pci_sabre.c: Sabre specific PCI controller support. | ||
2 | * | ||
3 | * Copyright (C) 1997, 1998, 1999, 2007 David S. Miller (davem@davemloft.net) | ||
4 | * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) | ||
5 | * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com) | ||
6 | */ | ||
7 | |||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/types.h> | ||
10 | #include <linux/pci.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/of_device.h> | ||
15 | |||
16 | #include <asm/apb.h> | ||
17 | #include <asm/iommu.h> | ||
18 | #include <asm/irq.h> | ||
19 | #include <asm/prom.h> | ||
20 | #include <asm/upa.h> | ||
21 | |||
22 | #include "pci_impl.h" | ||
23 | #include "iommu_common.h" | ||
24 | #include "psycho_common.h" | ||
25 | |||
26 | #define DRIVER_NAME "sabre" | ||
27 | #define PFX DRIVER_NAME ": " | ||
28 | |||
29 | /* SABRE PCI controller register offsets and definitions. */ | ||
30 | #define SABRE_UE_AFSR 0x0030UL | ||
31 | #define SABRE_UEAFSR_PDRD 0x4000000000000000UL /* Primary PCI DMA Read */ | ||
32 | #define SABRE_UEAFSR_PDWR 0x2000000000000000UL /* Primary PCI DMA Write */ | ||
33 | #define SABRE_UEAFSR_SDRD 0x0800000000000000UL /* Secondary PCI DMA Read */ | ||
34 | #define SABRE_UEAFSR_SDWR 0x0400000000000000UL /* Secondary PCI DMA Write */ | ||
35 | #define SABRE_UEAFSR_SDTE 0x0200000000000000UL /* Secondary DMA Translation Error */ | ||
36 | #define SABRE_UEAFSR_PDTE 0x0100000000000000UL /* Primary DMA Translation Error */ | ||
37 | #define SABRE_UEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask */ | ||
38 | #define SABRE_UEAFSR_OFF 0x00000000e0000000UL /* Offset (AFAR bits [5:3] */ | ||
39 | #define SABRE_UEAFSR_BLK 0x0000000000800000UL /* Was block operation */ | ||
40 | #define SABRE_UECE_AFAR 0x0038UL | ||
41 | #define SABRE_CE_AFSR 0x0040UL | ||
42 | #define SABRE_CEAFSR_PDRD 0x4000000000000000UL /* Primary PCI DMA Read */ | ||
43 | #define SABRE_CEAFSR_PDWR 0x2000000000000000UL /* Primary PCI DMA Write */ | ||
44 | #define SABRE_CEAFSR_SDRD 0x0800000000000000UL /* Secondary PCI DMA Read */ | ||
45 | #define SABRE_CEAFSR_SDWR 0x0400000000000000UL /* Secondary PCI DMA Write */ | ||
46 | #define SABRE_CEAFSR_ESYND 0x00ff000000000000UL /* ECC Syndrome */ | ||
47 | #define SABRE_CEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask */ | ||
48 | #define SABRE_CEAFSR_OFF 0x00000000e0000000UL /* Offset */ | ||
49 | #define SABRE_CEAFSR_BLK 0x0000000000800000UL /* Was block operation */ | ||
50 | #define SABRE_UECE_AFAR_ALIAS 0x0048UL /* Aliases to 0x0038 */ | ||
51 | #define SABRE_IOMMU_CONTROL 0x0200UL | ||
52 | #define SABRE_IOMMUCTRL_ERRSTS 0x0000000006000000UL /* Error status bits */ | ||
53 | #define SABRE_IOMMUCTRL_ERR 0x0000000001000000UL /* Error present in IOTLB */ | ||
54 | #define SABRE_IOMMUCTRL_LCKEN 0x0000000000800000UL /* IOTLB lock enable */ | ||
55 | #define SABRE_IOMMUCTRL_LCKPTR 0x0000000000780000UL /* IOTLB lock pointer */ | ||
56 | #define SABRE_IOMMUCTRL_TSBSZ 0x0000000000070000UL /* TSB Size */ | ||
57 | #define SABRE_IOMMU_TSBSZ_1K 0x0000000000000000 | ||
58 | #define SABRE_IOMMU_TSBSZ_2K 0x0000000000010000 | ||
59 | #define SABRE_IOMMU_TSBSZ_4K 0x0000000000020000 | ||
60 | #define SABRE_IOMMU_TSBSZ_8K 0x0000000000030000 | ||
61 | #define SABRE_IOMMU_TSBSZ_16K 0x0000000000040000 | ||
62 | #define SABRE_IOMMU_TSBSZ_32K 0x0000000000050000 | ||
63 | #define SABRE_IOMMU_TSBSZ_64K 0x0000000000060000 | ||
64 | #define SABRE_IOMMU_TSBSZ_128K 0x0000000000070000 | ||
65 | #define SABRE_IOMMUCTRL_TBWSZ 0x0000000000000004UL /* TSB assumed page size */ | ||
66 | #define SABRE_IOMMUCTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */ | ||
67 | #define SABRE_IOMMUCTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */ | ||
68 | #define SABRE_IOMMU_TSBBASE 0x0208UL | ||
69 | #define SABRE_IOMMU_FLUSH 0x0210UL | ||
70 | #define SABRE_IMAP_A_SLOT0 0x0c00UL | ||
71 | #define SABRE_IMAP_B_SLOT0 0x0c20UL | ||
72 | #define SABRE_IMAP_SCSI 0x1000UL | ||
73 | #define SABRE_IMAP_ETH 0x1008UL | ||
74 | #define SABRE_IMAP_BPP 0x1010UL | ||
75 | #define SABRE_IMAP_AU_REC 0x1018UL | ||
76 | #define SABRE_IMAP_AU_PLAY 0x1020UL | ||
77 | #define SABRE_IMAP_PFAIL 0x1028UL | ||
78 | #define SABRE_IMAP_KMS 0x1030UL | ||
79 | #define SABRE_IMAP_FLPY 0x1038UL | ||
80 | #define SABRE_IMAP_SHW 0x1040UL | ||
81 | #define SABRE_IMAP_KBD 0x1048UL | ||
82 | #define SABRE_IMAP_MS 0x1050UL | ||
83 | #define SABRE_IMAP_SER 0x1058UL | ||
84 | #define SABRE_IMAP_UE 0x1070UL | ||
85 | #define SABRE_IMAP_CE 0x1078UL | ||
86 | #define SABRE_IMAP_PCIERR 0x1080UL | ||
87 | #define SABRE_IMAP_GFX 0x1098UL | ||
88 | #define SABRE_IMAP_EUPA 0x10a0UL | ||
89 | #define SABRE_ICLR_A_SLOT0 0x1400UL | ||
90 | #define SABRE_ICLR_B_SLOT0 0x1480UL | ||
91 | #define SABRE_ICLR_SCSI 0x1800UL | ||
92 | #define SABRE_ICLR_ETH 0x1808UL | ||
93 | #define SABRE_ICLR_BPP 0x1810UL | ||
94 | #define SABRE_ICLR_AU_REC 0x1818UL | ||
95 | #define SABRE_ICLR_AU_PLAY 0x1820UL | ||
96 | #define SABRE_ICLR_PFAIL 0x1828UL | ||
97 | #define SABRE_ICLR_KMS 0x1830UL | ||
98 | #define SABRE_ICLR_FLPY 0x1838UL | ||
99 | #define SABRE_ICLR_SHW 0x1840UL | ||
100 | #define SABRE_ICLR_KBD 0x1848UL | ||
101 | #define SABRE_ICLR_MS 0x1850UL | ||
102 | #define SABRE_ICLR_SER 0x1858UL | ||
103 | #define SABRE_ICLR_UE 0x1870UL | ||
104 | #define SABRE_ICLR_CE 0x1878UL | ||
105 | #define SABRE_ICLR_PCIERR 0x1880UL | ||
106 | #define SABRE_WRSYNC 0x1c20UL | ||
107 | #define SABRE_PCICTRL 0x2000UL | ||
108 | #define SABRE_PCICTRL_MRLEN 0x0000001000000000UL /* Use MemoryReadLine for block loads/stores */ | ||
109 | #define SABRE_PCICTRL_SERR 0x0000000400000000UL /* Set when SERR asserted on PCI bus */ | ||
110 | #define SABRE_PCICTRL_ARBPARK 0x0000000000200000UL /* Bus Parking 0=Ultra-IIi 1=prev-bus-owner */ | ||
111 | #define SABRE_PCICTRL_CPUPRIO 0x0000000000100000UL /* Ultra-IIi granted every other bus cycle */ | ||
112 | #define SABRE_PCICTRL_ARBPRIO 0x00000000000f0000UL /* Slot which is granted every other bus cycle */ | ||
113 | #define SABRE_PCICTRL_ERREN 0x0000000000000100UL /* PCI Error Interrupt Enable */ | ||
114 | #define SABRE_PCICTRL_RTRYWE 0x0000000000000080UL /* DMA Flow Control 0=wait-if-possible 1=retry */ | ||
115 | #define SABRE_PCICTRL_AEN 0x000000000000000fUL /* Slot PCI arbitration enables */ | ||
116 | #define SABRE_PIOAFSR 0x2010UL | ||
117 | #define SABRE_PIOAFSR_PMA 0x8000000000000000UL /* Primary Master Abort */ | ||
118 | #define SABRE_PIOAFSR_PTA 0x4000000000000000UL /* Primary Target Abort */ | ||
119 | #define SABRE_PIOAFSR_PRTRY 0x2000000000000000UL /* Primary Excessive Retries */ | ||
120 | #define SABRE_PIOAFSR_PPERR 0x1000000000000000UL /* Primary Parity Error */ | ||
121 | #define SABRE_PIOAFSR_SMA 0x0800000000000000UL /* Secondary Master Abort */ | ||
122 | #define SABRE_PIOAFSR_STA 0x0400000000000000UL /* Secondary Target Abort */ | ||
123 | #define SABRE_PIOAFSR_SRTRY 0x0200000000000000UL /* Secondary Excessive Retries */ | ||
124 | #define SABRE_PIOAFSR_SPERR 0x0100000000000000UL /* Secondary Parity Error */ | ||
125 | #define SABRE_PIOAFSR_BMSK 0x0000ffff00000000UL /* Byte Mask */ | ||
126 | #define SABRE_PIOAFSR_BLK 0x0000000080000000UL /* Was Block Operation */ | ||
127 | #define SABRE_PIOAFAR 0x2018UL | ||
128 | #define SABRE_PCIDIAG 0x2020UL | ||
129 | #define SABRE_PCIDIAG_DRTRY 0x0000000000000040UL /* Disable PIO Retry Limit */ | ||
130 | #define SABRE_PCIDIAG_IPAPAR 0x0000000000000008UL /* Invert PIO Address Parity */ | ||
131 | #define SABRE_PCIDIAG_IPDPAR 0x0000000000000004UL /* Invert PIO Data Parity */ | ||
132 | #define SABRE_PCIDIAG_IDDPAR 0x0000000000000002UL /* Invert DMA Data Parity */ | ||
133 | #define SABRE_PCIDIAG_ELPBK 0x0000000000000001UL /* Loopback Enable - not supported */ | ||
134 | #define SABRE_PCITASR 0x2028UL | ||
135 | #define SABRE_PCITASR_EF 0x0000000000000080UL /* Respond to 0xe0000000-0xffffffff */ | ||
136 | #define SABRE_PCITASR_CD 0x0000000000000040UL /* Respond to 0xc0000000-0xdfffffff */ | ||
137 | #define SABRE_PCITASR_AB 0x0000000000000020UL /* Respond to 0xa0000000-0xbfffffff */ | ||
138 | #define SABRE_PCITASR_89 0x0000000000000010UL /* Respond to 0x80000000-0x9fffffff */ | ||
139 | #define SABRE_PCITASR_67 0x0000000000000008UL /* Respond to 0x60000000-0x7fffffff */ | ||
140 | #define SABRE_PCITASR_45 0x0000000000000004UL /* Respond to 0x40000000-0x5fffffff */ | ||
141 | #define SABRE_PCITASR_23 0x0000000000000002UL /* Respond to 0x20000000-0x3fffffff */ | ||
142 | #define SABRE_PCITASR_01 0x0000000000000001UL /* Respond to 0x00000000-0x1fffffff */ | ||
143 | #define SABRE_PIOBUF_DIAG 0x5000UL | ||
144 | #define SABRE_DMABUF_DIAGLO 0x5100UL | ||
145 | #define SABRE_DMABUF_DIAGHI 0x51c0UL | ||
146 | #define SABRE_IMAP_GFX_ALIAS 0x6000UL /* Aliases to 0x1098 */ | ||
147 | #define SABRE_IMAP_EUPA_ALIAS 0x8000UL /* Aliases to 0x10a0 */ | ||
148 | #define SABRE_IOMMU_VADIAG 0xa400UL | ||
149 | #define SABRE_IOMMU_TCDIAG 0xa408UL | ||
150 | #define SABRE_IOMMU_TAG 0xa580UL | ||
151 | #define SABRE_IOMMUTAG_ERRSTS 0x0000000001800000UL /* Error status bits */ | ||
152 | #define SABRE_IOMMUTAG_ERR 0x0000000000400000UL /* Error present */ | ||
153 | #define SABRE_IOMMUTAG_WRITE 0x0000000000200000UL /* Page is writable */ | ||
154 | #define SABRE_IOMMUTAG_STREAM 0x0000000000100000UL /* Streamable bit - unused */ | ||
155 | #define SABRE_IOMMUTAG_SIZE 0x0000000000080000UL /* 0=8k 1=16k */ | ||
156 | #define SABRE_IOMMUTAG_VPN 0x000000000007ffffUL /* Virtual Page Number [31:13] */ | ||
157 | #define SABRE_IOMMU_DATA 0xa600UL | ||
158 | #define SABRE_IOMMUDATA_VALID 0x0000000040000000UL /* Valid */ | ||
159 | #define SABRE_IOMMUDATA_USED 0x0000000020000000UL /* Used (for LRU algorithm) */ | ||
160 | #define SABRE_IOMMUDATA_CACHE 0x0000000010000000UL /* Cacheable */ | ||
161 | #define SABRE_IOMMUDATA_PPN 0x00000000001fffffUL /* Physical Page Number [33:13] */ | ||
162 | #define SABRE_PCI_IRQSTATE 0xa800UL | ||
163 | #define SABRE_OBIO_IRQSTATE 0xa808UL | ||
164 | #define SABRE_FFBCFG 0xf000UL | ||
165 | #define SABRE_FFBCFG_SPRQS 0x000000000f000000 /* Slave P_RQST queue size */ | ||
166 | #define SABRE_FFBCFG_ONEREAD 0x0000000000004000 /* Slave supports one outstanding read */ | ||
167 | #define SABRE_MCCTRL0 0xf010UL | ||
168 | #define SABRE_MCCTRL0_RENAB 0x0000000080000000 /* Refresh Enable */ | ||
169 | #define SABRE_MCCTRL0_EENAB 0x0000000010000000 /* Enable all ECC functions */ | ||
170 | #define SABRE_MCCTRL0_11BIT 0x0000000000001000 /* Enable 11-bit column addressing */ | ||
171 | #define SABRE_MCCTRL0_DPP 0x0000000000000f00 /* DIMM Pair Present Bits */ | ||
172 | #define SABRE_MCCTRL0_RINTVL 0x00000000000000ff /* Refresh Interval */ | ||
173 | #define SABRE_MCCTRL1 0xf018UL | ||
174 | #define SABRE_MCCTRL1_AMDC 0x0000000038000000 /* Advance Memdata Clock */ | ||
175 | #define SABRE_MCCTRL1_ARDC 0x0000000007000000 /* Advance DRAM Read Data Clock */ | ||
176 | #define SABRE_MCCTRL1_CSR 0x0000000000e00000 /* CAS to RAS delay for CBR refresh */ | ||
177 | #define SABRE_MCCTRL1_CASRW 0x00000000001c0000 /* CAS length for read/write */ | ||
178 | #define SABRE_MCCTRL1_RCD 0x0000000000038000 /* RAS to CAS delay */ | ||
179 | #define SABRE_MCCTRL1_CP 0x0000000000007000 /* CAS Precharge */ | ||
180 | #define SABRE_MCCTRL1_RP 0x0000000000000e00 /* RAS Precharge */ | ||
181 | #define SABRE_MCCTRL1_RAS 0x00000000000001c0 /* Length of RAS for refresh */ | ||
182 | #define SABRE_MCCTRL1_CASRW2 0x0000000000000038 /* Must be same as CASRW */ | ||
183 | #define SABRE_MCCTRL1_RSC 0x0000000000000007 /* RAS after CAS hold time */ | ||
184 | #define SABRE_RESETCTRL 0xf020UL | ||
185 | |||
186 | #define SABRE_CONFIGSPACE 0x001000000UL | ||
187 | #define SABRE_IOSPACE 0x002000000UL | ||
188 | #define SABRE_IOSPACE_SIZE 0x000ffffffUL | ||
189 | #define SABRE_MEMSPACE 0x100000000UL | ||
190 | #define SABRE_MEMSPACE_SIZE 0x07fffffffUL | ||
191 | |||
192 | static int hummingbird_p; | ||
193 | static struct pci_bus *sabre_root_bus; | ||
194 | |||
195 | static irqreturn_t sabre_ue_intr(int irq, void *dev_id) | ||
196 | { | ||
197 | struct pci_pbm_info *pbm = dev_id; | ||
198 | unsigned long afsr_reg = pbm->controller_regs + SABRE_UE_AFSR; | ||
199 | unsigned long afar_reg = pbm->controller_regs + SABRE_UECE_AFAR; | ||
200 | unsigned long afsr, afar, error_bits; | ||
201 | int reported; | ||
202 | |||
203 | /* Latch uncorrectable error status. */ | ||
204 | afar = upa_readq(afar_reg); | ||
205 | afsr = upa_readq(afsr_reg); | ||
206 | |||
207 | /* Clear the primary/secondary error status bits. */ | ||
208 | error_bits = afsr & | ||
209 | (SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR | | ||
210 | SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR | | ||
211 | SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE); | ||
212 | if (!error_bits) | ||
213 | return IRQ_NONE; | ||
214 | upa_writeq(error_bits, afsr_reg); | ||
215 | |||
216 | /* Log the error. */ | ||
217 | printk("%s: Uncorrectable Error, primary error type[%s%s]\n", | ||
218 | pbm->name, | ||
219 | ((error_bits & SABRE_UEAFSR_PDRD) ? | ||
220 | "DMA Read" : | ||
221 | ((error_bits & SABRE_UEAFSR_PDWR) ? | ||
222 | "DMA Write" : "???")), | ||
223 | ((error_bits & SABRE_UEAFSR_PDTE) ? | ||
224 | ":Translation Error" : "")); | ||
225 | printk("%s: bytemask[%04lx] dword_offset[%lx] was_block(%d)\n", | ||
226 | pbm->name, | ||
227 | (afsr & SABRE_UEAFSR_BMSK) >> 32UL, | ||
228 | (afsr & SABRE_UEAFSR_OFF) >> 29UL, | ||
229 | ((afsr & SABRE_UEAFSR_BLK) ? 1 : 0)); | ||
230 | printk("%s: UE AFAR [%016lx]\n", pbm->name, afar); | ||
231 | printk("%s: UE Secondary errors [", pbm->name); | ||
232 | reported = 0; | ||
233 | if (afsr & SABRE_UEAFSR_SDRD) { | ||
234 | reported++; | ||
235 | printk("(DMA Read)"); | ||
236 | } | ||
237 | if (afsr & SABRE_UEAFSR_SDWR) { | ||
238 | reported++; | ||
239 | printk("(DMA Write)"); | ||
240 | } | ||
241 | if (afsr & SABRE_UEAFSR_SDTE) { | ||
242 | reported++; | ||
243 | printk("(Translation Error)"); | ||
244 | } | ||
245 | if (!reported) | ||
246 | printk("(none)"); | ||
247 | printk("]\n"); | ||
248 | |||
249 | /* Interrogate IOMMU for error status. */ | ||
250 | psycho_check_iommu_error(pbm, afsr, afar, UE_ERR); | ||
251 | |||
252 | return IRQ_HANDLED; | ||
253 | } | ||
254 | |||
255 | static irqreturn_t sabre_ce_intr(int irq, void *dev_id) | ||
256 | { | ||
257 | struct pci_pbm_info *pbm = dev_id; | ||
258 | unsigned long afsr_reg = pbm->controller_regs + SABRE_CE_AFSR; | ||
259 | unsigned long afar_reg = pbm->controller_regs + SABRE_UECE_AFAR; | ||
260 | unsigned long afsr, afar, error_bits; | ||
261 | int reported; | ||
262 | |||
263 | /* Latch error status. */ | ||
264 | afar = upa_readq(afar_reg); | ||
265 | afsr = upa_readq(afsr_reg); | ||
266 | |||
267 | /* Clear primary/secondary error status bits. */ | ||
268 | error_bits = afsr & | ||
269 | (SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR | | ||
270 | SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR); | ||
271 | if (!error_bits) | ||
272 | return IRQ_NONE; | ||
273 | upa_writeq(error_bits, afsr_reg); | ||
274 | |||
275 | /* Log the error. */ | ||
276 | printk("%s: Correctable Error, primary error type[%s]\n", | ||
277 | pbm->name, | ||
278 | ((error_bits & SABRE_CEAFSR_PDRD) ? | ||
279 | "DMA Read" : | ||
280 | ((error_bits & SABRE_CEAFSR_PDWR) ? | ||
281 | "DMA Write" : "???"))); | ||
282 | |||
283 | /* XXX Use syndrome and afar to print out module string just like | ||
284 | * XXX UDB CE trap handler does... -DaveM | ||
285 | */ | ||
286 | printk("%s: syndrome[%02lx] bytemask[%04lx] dword_offset[%lx] " | ||
287 | "was_block(%d)\n", | ||
288 | pbm->name, | ||
289 | (afsr & SABRE_CEAFSR_ESYND) >> 48UL, | ||
290 | (afsr & SABRE_CEAFSR_BMSK) >> 32UL, | ||
291 | (afsr & SABRE_CEAFSR_OFF) >> 29UL, | ||
292 | ((afsr & SABRE_CEAFSR_BLK) ? 1 : 0)); | ||
293 | printk("%s: CE AFAR [%016lx]\n", pbm->name, afar); | ||
294 | printk("%s: CE Secondary errors [", pbm->name); | ||
295 | reported = 0; | ||
296 | if (afsr & SABRE_CEAFSR_SDRD) { | ||
297 | reported++; | ||
298 | printk("(DMA Read)"); | ||
299 | } | ||
300 | if (afsr & SABRE_CEAFSR_SDWR) { | ||
301 | reported++; | ||
302 | printk("(DMA Write)"); | ||
303 | } | ||
304 | if (!reported) | ||
305 | printk("(none)"); | ||
306 | printk("]\n"); | ||
307 | |||
308 | return IRQ_HANDLED; | ||
309 | } | ||
310 | |||
311 | static void sabre_register_error_handlers(struct pci_pbm_info *pbm) | ||
312 | { | ||
313 | struct device_node *dp = pbm->op->node; | ||
314 | struct of_device *op; | ||
315 | unsigned long base = pbm->controller_regs; | ||
316 | u64 tmp; | ||
317 | int err; | ||
318 | |||
319 | if (pbm->chip_type == PBM_CHIP_TYPE_SABRE) | ||
320 | dp = dp->parent; | ||
321 | |||
322 | op = of_find_device_by_node(dp); | ||
323 | if (!op) | ||
324 | return; | ||
325 | |||
326 | /* Sabre/Hummingbird IRQ property layout is: | ||
327 | * 0: PCI ERR | ||
328 | * 1: UE ERR | ||
329 | * 2: CE ERR | ||
330 | * 3: POWER FAIL | ||
331 | */ | ||
332 | if (op->num_irqs < 4) | ||
333 | return; | ||
334 | |||
335 | /* We clear the error bits in the appropriate AFSR before | ||
336 | * registering the handler so that we don't get spurious | ||
337 | * interrupts. | ||
338 | */ | ||
339 | upa_writeq((SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR | | ||
340 | SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR | | ||
341 | SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE), | ||
342 | base + SABRE_UE_AFSR); | ||
343 | |||
344 | err = request_irq(op->irqs[1], sabre_ue_intr, 0, "SABRE_UE", pbm); | ||
345 | if (err) | ||
346 | printk(KERN_WARNING "%s: Couldn't register UE, err=%d.\n", | ||
347 | pbm->name, err); | ||
348 | |||
349 | upa_writeq((SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR | | ||
350 | SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR), | ||
351 | base + SABRE_CE_AFSR); | ||
352 | |||
353 | |||
354 | err = request_irq(op->irqs[2], sabre_ce_intr, 0, "SABRE_CE", pbm); | ||
355 | if (err) | ||
356 | printk(KERN_WARNING "%s: Couldn't register CE, err=%d.\n", | ||
357 | pbm->name, err); | ||
358 | err = request_irq(op->irqs[0], psycho_pcierr_intr, 0, | ||
359 | "SABRE_PCIERR", pbm); | ||
360 | if (err) | ||
361 | printk(KERN_WARNING "%s: Couldn't register PCIERR, err=%d.\n", | ||
362 | pbm->name, err); | ||
363 | |||
364 | tmp = upa_readq(base + SABRE_PCICTRL); | ||
365 | tmp |= SABRE_PCICTRL_ERREN; | ||
366 | upa_writeq(tmp, base + SABRE_PCICTRL); | ||
367 | } | ||
368 | |||
369 | static void apb_init(struct pci_bus *sabre_bus) | ||
370 | { | ||
371 | struct pci_dev *pdev; | ||
372 | |||
373 | list_for_each_entry(pdev, &sabre_bus->devices, bus_list) { | ||
374 | if (pdev->vendor == PCI_VENDOR_ID_SUN && | ||
375 | pdev->device == PCI_DEVICE_ID_SUN_SIMBA) { | ||
376 | u16 word16; | ||
377 | |||
378 | pci_read_config_word(pdev, PCI_COMMAND, &word16); | ||
379 | word16 |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY | | ||
380 | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | | ||
381 | PCI_COMMAND_IO; | ||
382 | pci_write_config_word(pdev, PCI_COMMAND, word16); | ||
383 | |||
384 | /* Status register bits are "write 1 to clear". */ | ||
385 | pci_write_config_word(pdev, PCI_STATUS, 0xffff); | ||
386 | pci_write_config_word(pdev, PCI_SEC_STATUS, 0xffff); | ||
387 | |||
388 | /* Use a primary/seconday latency timer value | ||
389 | * of 64. | ||
390 | */ | ||
391 | pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64); | ||
392 | pci_write_config_byte(pdev, PCI_SEC_LATENCY_TIMER, 64); | ||
393 | |||
394 | /* Enable reporting/forwarding of master aborts, | ||
395 | * parity, and SERR. | ||
396 | */ | ||
397 | pci_write_config_byte(pdev, PCI_BRIDGE_CONTROL, | ||
398 | (PCI_BRIDGE_CTL_PARITY | | ||
399 | PCI_BRIDGE_CTL_SERR | | ||
400 | PCI_BRIDGE_CTL_MASTER_ABORT)); | ||
401 | } | ||
402 | } | ||
403 | } | ||
404 | |||
405 | static void __init sabre_scan_bus(struct pci_pbm_info *pbm, | ||
406 | struct device *parent) | ||
407 | { | ||
408 | static int once; | ||
409 | |||
410 | /* The APB bridge speaks to the Sabre host PCI bridge | ||
411 | * at 66Mhz, but the front side of APB runs at 33Mhz | ||
412 | * for both segments. | ||
413 | * | ||
414 | * Hummingbird systems do not use APB, so they run | ||
415 | * at 66MHZ. | ||
416 | */ | ||
417 | if (hummingbird_p) | ||
418 | pbm->is_66mhz_capable = 1; | ||
419 | else | ||
420 | pbm->is_66mhz_capable = 0; | ||
421 | |||
422 | /* This driver has not been verified to handle | ||
423 | * multiple SABREs yet, so trap this. | ||
424 | * | ||
425 | * Also note that the SABRE host bridge is hardwired | ||
426 | * to live at bus 0. | ||
427 | */ | ||
428 | if (once != 0) { | ||
429 | printk(KERN_ERR PFX "Multiple controllers unsupported.\n"); | ||
430 | return; | ||
431 | } | ||
432 | once++; | ||
433 | |||
434 | pbm->pci_bus = pci_scan_one_pbm(pbm, parent); | ||
435 | if (!pbm->pci_bus) | ||
436 | return; | ||
437 | |||
438 | sabre_root_bus = pbm->pci_bus; | ||
439 | |||
440 | apb_init(pbm->pci_bus); | ||
441 | |||
442 | sabre_register_error_handlers(pbm); | ||
443 | } | ||
444 | |||
445 | static void __init sabre_pbm_init(struct pci_pbm_info *pbm, | ||
446 | struct of_device *op) | ||
447 | { | ||
448 | psycho_pbm_init_common(pbm, op, "SABRE", PBM_CHIP_TYPE_SABRE); | ||
449 | pbm->pci_afsr = pbm->controller_regs + SABRE_PIOAFSR; | ||
450 | pbm->pci_afar = pbm->controller_regs + SABRE_PIOAFAR; | ||
451 | pbm->pci_csr = pbm->controller_regs + SABRE_PCICTRL; | ||
452 | sabre_scan_bus(pbm, &op->dev); | ||
453 | } | ||
454 | |||
455 | static int __devinit sabre_probe(struct of_device *op, | ||
456 | const struct of_device_id *match) | ||
457 | { | ||
458 | const struct linux_prom64_registers *pr_regs; | ||
459 | struct device_node *dp = op->node; | ||
460 | struct pci_pbm_info *pbm; | ||
461 | u32 upa_portid, dma_mask; | ||
462 | struct iommu *iommu; | ||
463 | int tsbsize, err; | ||
464 | const u32 *vdma; | ||
465 | u64 clear_irq; | ||
466 | |||
467 | hummingbird_p = (match->data != NULL); | ||
468 | if (!hummingbird_p) { | ||
469 | struct device_node *cpu_dp; | ||
470 | |||
471 | /* Of course, Sun has to encode things a thousand | ||
472 | * different ways, inconsistently. | ||
473 | */ | ||
474 | for_each_node_by_type(cpu_dp, "cpu") { | ||
475 | if (!strcmp(cpu_dp->name, "SUNW,UltraSPARC-IIe")) | ||
476 | hummingbird_p = 1; | ||
477 | } | ||
478 | } | ||
479 | |||
480 | err = -ENOMEM; | ||
481 | pbm = kzalloc(sizeof(*pbm), GFP_KERNEL); | ||
482 | if (!pbm) { | ||
483 | printk(KERN_ERR PFX "Cannot allocate pci_pbm_info.\n"); | ||
484 | goto out_err; | ||
485 | } | ||
486 | |||
487 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); | ||
488 | if (!iommu) { | ||
489 | printk(KERN_ERR PFX "Cannot allocate PBM iommu.\n"); | ||
490 | goto out_free_controller; | ||
491 | } | ||
492 | |||
493 | pbm->iommu = iommu; | ||
494 | |||
495 | upa_portid = of_getintprop_default(dp, "upa-portid", 0xff); | ||
496 | |||
497 | pbm->portid = upa_portid; | ||
498 | |||
499 | /* | ||
500 | * Map in SABRE register set and report the presence of this SABRE. | ||
501 | */ | ||
502 | |||
503 | pr_regs = of_get_property(dp, "reg", NULL); | ||
504 | err = -ENODEV; | ||
505 | if (!pr_regs) { | ||
506 | printk(KERN_ERR PFX "No reg property\n"); | ||
507 | goto out_free_iommu; | ||
508 | } | ||
509 | |||
510 | /* | ||
511 | * First REG in property is base of entire SABRE register space. | ||
512 | */ | ||
513 | pbm->controller_regs = pr_regs[0].phys_addr; | ||
514 | |||
515 | /* Clear interrupts */ | ||
516 | |||
517 | /* PCI first */ | ||
518 | for (clear_irq = SABRE_ICLR_A_SLOT0; clear_irq < SABRE_ICLR_B_SLOT0 + 0x80; clear_irq += 8) | ||
519 | upa_writeq(0x0UL, pbm->controller_regs + clear_irq); | ||
520 | |||
521 | /* Then OBIO */ | ||
522 | for (clear_irq = SABRE_ICLR_SCSI; clear_irq < SABRE_ICLR_SCSI + 0x80; clear_irq += 8) | ||
523 | upa_writeq(0x0UL, pbm->controller_regs + clear_irq); | ||
524 | |||
525 | /* Error interrupts are enabled later after the bus scan. */ | ||
526 | upa_writeq((SABRE_PCICTRL_MRLEN | SABRE_PCICTRL_SERR | | ||
527 | SABRE_PCICTRL_ARBPARK | SABRE_PCICTRL_AEN), | ||
528 | pbm->controller_regs + SABRE_PCICTRL); | ||
529 | |||
530 | /* Now map in PCI config space for entire SABRE. */ | ||
531 | pbm->config_space = pbm->controller_regs + SABRE_CONFIGSPACE; | ||
532 | |||
533 | vdma = of_get_property(dp, "virtual-dma", NULL); | ||
534 | if (!vdma) { | ||
535 | printk(KERN_ERR PFX "No virtual-dma property\n"); | ||
536 | goto out_free_iommu; | ||
537 | } | ||
538 | |||
539 | dma_mask = vdma[0]; | ||
540 | switch(vdma[1]) { | ||
541 | case 0x20000000: | ||
542 | dma_mask |= 0x1fffffff; | ||
543 | tsbsize = 64; | ||
544 | break; | ||
545 | case 0x40000000: | ||
546 | dma_mask |= 0x3fffffff; | ||
547 | tsbsize = 128; | ||
548 | break; | ||
549 | |||
550 | case 0x80000000: | ||
551 | dma_mask |= 0x7fffffff; | ||
552 | tsbsize = 128; | ||
553 | break; | ||
554 | default: | ||
555 | printk(KERN_ERR PFX "Strange virtual-dma size.\n"); | ||
556 | goto out_free_iommu; | ||
557 | } | ||
558 | |||
559 | err = psycho_iommu_init(pbm, tsbsize, vdma[0], dma_mask, SABRE_WRSYNC); | ||
560 | if (err) | ||
561 | goto out_free_iommu; | ||
562 | |||
563 | /* | ||
564 | * Look for APB underneath. | ||
565 | */ | ||
566 | sabre_pbm_init(pbm, op); | ||
567 | |||
568 | pbm->next = pci_pbm_root; | ||
569 | pci_pbm_root = pbm; | ||
570 | |||
571 | dev_set_drvdata(&op->dev, pbm); | ||
572 | |||
573 | return 0; | ||
574 | |||
575 | out_free_iommu: | ||
576 | kfree(pbm->iommu); | ||
577 | |||
578 | out_free_controller: | ||
579 | kfree(pbm); | ||
580 | |||
581 | out_err: | ||
582 | return err; | ||
583 | } | ||
584 | |||
585 | static struct of_device_id __initdata sabre_match[] = { | ||
586 | { | ||
587 | .name = "pci", | ||
588 | .compatible = "pci108e,a001", | ||
589 | .data = (void *) 1, | ||
590 | }, | ||
591 | { | ||
592 | .name = "pci", | ||
593 | .compatible = "pci108e,a000", | ||
594 | }, | ||
595 | {}, | ||
596 | }; | ||
597 | |||
598 | static struct of_platform_driver sabre_driver = { | ||
599 | .name = DRIVER_NAME, | ||
600 | .match_table = sabre_match, | ||
601 | .probe = sabre_probe, | ||
602 | }; | ||
603 | |||
604 | static int __init sabre_init(void) | ||
605 | { | ||
606 | return of_register_driver(&sabre_driver, &of_bus_type); | ||
607 | } | ||
608 | |||
609 | subsys_initcall(sabre_init); | ||
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c new file mode 100644 index 000000000000..45d9dba1ba11 --- /dev/null +++ b/arch/sparc/kernel/pci_schizo.c | |||
@@ -0,0 +1,1504 @@ | |||
1 | /* pci_schizo.c: SCHIZO/TOMATILLO specific PCI controller support. | ||
2 | * | ||
3 | * Copyright (C) 2001, 2002, 2003, 2007, 2008 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/pci.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/slab.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/of_device.h> | ||
13 | |||
14 | #include <asm/iommu.h> | ||
15 | #include <asm/irq.h> | ||
16 | #include <asm/pstate.h> | ||
17 | #include <asm/prom.h> | ||
18 | #include <asm/upa.h> | ||
19 | |||
20 | #include "pci_impl.h" | ||
21 | #include "iommu_common.h" | ||
22 | |||
23 | #define DRIVER_NAME "schizo" | ||
24 | #define PFX DRIVER_NAME ": " | ||
25 | |||
26 | /* This is a convention that at least Excalibur and Merlin | ||
27 | * follow. I suppose the SCHIZO used in Starcat and friends | ||
28 | * will do similar. | ||
29 | * | ||
30 | * The only way I could see this changing is if the newlink | ||
31 | * block requires more space in Schizo's address space than | ||
32 | * they predicted, thus requiring an address space reorg when | ||
33 | * the newer Schizo is taped out. | ||
34 | */ | ||
35 | |||
36 | /* Streaming buffer control register. */ | ||
37 | #define SCHIZO_STRBUF_CTRL_LPTR 0x00000000000000f0UL /* LRU Lock Pointer */ | ||
38 | #define SCHIZO_STRBUF_CTRL_LENAB 0x0000000000000008UL /* LRU Lock Enable */ | ||
39 | #define SCHIZO_STRBUF_CTRL_RRDIS 0x0000000000000004UL /* Rerun Disable */ | ||
40 | #define SCHIZO_STRBUF_CTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */ | ||
41 | #define SCHIZO_STRBUF_CTRL_ENAB 0x0000000000000001UL /* Streaming Buffer Enable */ | ||
42 | |||
43 | /* IOMMU control register. */ | ||
44 | #define SCHIZO_IOMMU_CTRL_RESV 0xfffffffff9000000UL /* Reserved */ | ||
45 | #define SCHIZO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL /* Translation Error Status */ | ||
46 | #define SCHIZO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL /* Translation Error encountered */ | ||
47 | #define SCHIZO_IOMMU_CTRL_LCKEN 0x0000000000800000UL /* Enable translation locking */ | ||
48 | #define SCHIZO_IOMMU_CTRL_LCKPTR 0x0000000000780000UL /* Translation lock pointer */ | ||
49 | #define SCHIZO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL /* TSB Size */ | ||
50 | #define SCHIZO_IOMMU_TSBSZ_1K 0x0000000000000000UL /* TSB Table 1024 8-byte entries */ | ||
51 | #define SCHIZO_IOMMU_TSBSZ_2K 0x0000000000010000UL /* TSB Table 2048 8-byte entries */ | ||
52 | #define SCHIZO_IOMMU_TSBSZ_4K 0x0000000000020000UL /* TSB Table 4096 8-byte entries */ | ||
53 | #define SCHIZO_IOMMU_TSBSZ_8K 0x0000000000030000UL /* TSB Table 8192 8-byte entries */ | ||
54 | #define SCHIZO_IOMMU_TSBSZ_16K 0x0000000000040000UL /* TSB Table 16k 8-byte entries */ | ||
55 | #define SCHIZO_IOMMU_TSBSZ_32K 0x0000000000050000UL /* TSB Table 32k 8-byte entries */ | ||
56 | #define SCHIZO_IOMMU_TSBSZ_64K 0x0000000000060000UL /* TSB Table 64k 8-byte entries */ | ||
57 | #define SCHIZO_IOMMU_TSBSZ_128K 0x0000000000070000UL /* TSB Table 128k 8-byte entries */ | ||
58 | #define SCHIZO_IOMMU_CTRL_RESV2 0x000000000000fff8UL /* Reserved */ | ||
59 | #define SCHIZO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL /* Assumed page size, 0=8k 1=64k */ | ||
60 | #define SCHIZO_IOMMU_CTRL_DENAB 0x0000000000000002UL /* Diagnostic mode enable */ | ||
61 | #define SCHIZO_IOMMU_CTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */ | ||
62 | |||
63 | /* Schizo config space address format is nearly identical to | ||
64 | * that of PSYCHO: | ||
65 | * | ||
66 | * 32 24 23 16 15 11 10 8 7 2 1 0 | ||
67 | * --------------------------------------------------------- | ||
68 | * |0 0 0 0 0 0 0 0 0| bus | device | function | reg | 0 0 | | ||
69 | * --------------------------------------------------------- | ||
70 | */ | ||
71 | #define SCHIZO_CONFIG_BASE(PBM) ((PBM)->config_space) | ||
72 | #define SCHIZO_CONFIG_ENCODE(BUS, DEVFN, REG) \ | ||
73 | (((unsigned long)(BUS) << 16) | \ | ||
74 | ((unsigned long)(DEVFN) << 8) | \ | ||
75 | ((unsigned long)(REG))) | ||
76 | |||
77 | static void *schizo_pci_config_mkaddr(struct pci_pbm_info *pbm, | ||
78 | unsigned char bus, | ||
79 | unsigned int devfn, | ||
80 | int where) | ||
81 | { | ||
82 | if (!pbm) | ||
83 | return NULL; | ||
84 | bus -= pbm->pci_first_busno; | ||
85 | return (void *) | ||
86 | (SCHIZO_CONFIG_BASE(pbm) | | ||
87 | SCHIZO_CONFIG_ENCODE(bus, devfn, where)); | ||
88 | } | ||
89 | |||
90 | /* SCHIZO error handling support. */ | ||
91 | enum schizo_error_type { | ||
92 | UE_ERR, CE_ERR, PCI_ERR, SAFARI_ERR | ||
93 | }; | ||
94 | |||
95 | static DEFINE_SPINLOCK(stc_buf_lock); | ||
96 | static unsigned long stc_error_buf[128]; | ||
97 | static unsigned long stc_tag_buf[16]; | ||
98 | static unsigned long stc_line_buf[16]; | ||
99 | |||
100 | #define SCHIZO_UE_INO 0x30 /* Uncorrectable ECC error */ | ||
101 | #define SCHIZO_CE_INO 0x31 /* Correctable ECC error */ | ||
102 | #define SCHIZO_PCIERR_A_INO 0x32 /* PBM A PCI bus error */ | ||
103 | #define SCHIZO_PCIERR_B_INO 0x33 /* PBM B PCI bus error */ | ||
104 | #define SCHIZO_SERR_INO 0x34 /* Safari interface error */ | ||
105 | |||
106 | #define SCHIZO_STC_ERR 0xb800UL /* --> 0xba00 */ | ||
107 | #define SCHIZO_STC_TAG 0xba00UL /* --> 0xba80 */ | ||
108 | #define SCHIZO_STC_LINE 0xbb00UL /* --> 0xbb80 */ | ||
109 | |||
110 | #define SCHIZO_STCERR_WRITE 0x2UL | ||
111 | #define SCHIZO_STCERR_READ 0x1UL | ||
112 | |||
113 | #define SCHIZO_STCTAG_PPN 0x3fffffff00000000UL | ||
114 | #define SCHIZO_STCTAG_VPN 0x00000000ffffe000UL | ||
115 | #define SCHIZO_STCTAG_VALID 0x8000000000000000UL | ||
116 | #define SCHIZO_STCTAG_READ 0x4000000000000000UL | ||
117 | |||
118 | #define SCHIZO_STCLINE_LINDX 0x0000000007800000UL | ||
119 | #define SCHIZO_STCLINE_SPTR 0x000000000007e000UL | ||
120 | #define SCHIZO_STCLINE_LADDR 0x0000000000001fc0UL | ||
121 | #define SCHIZO_STCLINE_EPTR 0x000000000000003fUL | ||
122 | #define SCHIZO_STCLINE_VALID 0x0000000000600000UL | ||
123 | #define SCHIZO_STCLINE_FOFN 0x0000000000180000UL | ||
124 | |||
125 | static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm, | ||
126 | enum schizo_error_type type) | ||
127 | { | ||
128 | struct strbuf *strbuf = &pbm->stc; | ||
129 | unsigned long regbase = pbm->pbm_regs; | ||
130 | unsigned long err_base, tag_base, line_base; | ||
131 | u64 control; | ||
132 | int i; | ||
133 | |||
134 | err_base = regbase + SCHIZO_STC_ERR; | ||
135 | tag_base = regbase + SCHIZO_STC_TAG; | ||
136 | line_base = regbase + SCHIZO_STC_LINE; | ||
137 | |||
138 | spin_lock(&stc_buf_lock); | ||
139 | |||
140 | /* This is __REALLY__ dangerous. When we put the | ||
141 | * streaming buffer into diagnostic mode to probe | ||
142 | * it's tags and error status, we _must_ clear all | ||
143 | * of the line tag valid bits before re-enabling | ||
144 | * the streaming buffer. If any dirty data lives | ||
145 | * in the STC when we do this, we will end up | ||
146 | * invalidating it before it has a chance to reach | ||
147 | * main memory. | ||
148 | */ | ||
149 | control = upa_readq(strbuf->strbuf_control); | ||
150 | upa_writeq((control | SCHIZO_STRBUF_CTRL_DENAB), | ||
151 | strbuf->strbuf_control); | ||
152 | for (i = 0; i < 128; i++) { | ||
153 | unsigned long val; | ||
154 | |||
155 | val = upa_readq(err_base + (i * 8UL)); | ||
156 | upa_writeq(0UL, err_base + (i * 8UL)); | ||
157 | stc_error_buf[i] = val; | ||
158 | } | ||
159 | for (i = 0; i < 16; i++) { | ||
160 | stc_tag_buf[i] = upa_readq(tag_base + (i * 8UL)); | ||
161 | stc_line_buf[i] = upa_readq(line_base + (i * 8UL)); | ||
162 | upa_writeq(0UL, tag_base + (i * 8UL)); | ||
163 | upa_writeq(0UL, line_base + (i * 8UL)); | ||
164 | } | ||
165 | |||
166 | /* OK, state is logged, exit diagnostic mode. */ | ||
167 | upa_writeq(control, strbuf->strbuf_control); | ||
168 | |||
169 | for (i = 0; i < 16; i++) { | ||
170 | int j, saw_error, first, last; | ||
171 | |||
172 | saw_error = 0; | ||
173 | first = i * 8; | ||
174 | last = first + 8; | ||
175 | for (j = first; j < last; j++) { | ||
176 | unsigned long errval = stc_error_buf[j]; | ||
177 | if (errval != 0) { | ||
178 | saw_error++; | ||
179 | printk("%s: STC_ERR(%d)[wr(%d)rd(%d)]\n", | ||
180 | pbm->name, | ||
181 | j, | ||
182 | (errval & SCHIZO_STCERR_WRITE) ? 1 : 0, | ||
183 | (errval & SCHIZO_STCERR_READ) ? 1 : 0); | ||
184 | } | ||
185 | } | ||
186 | if (saw_error != 0) { | ||
187 | unsigned long tagval = stc_tag_buf[i]; | ||
188 | unsigned long lineval = stc_line_buf[i]; | ||
189 | printk("%s: STC_TAG(%d)[PA(%016lx)VA(%08lx)V(%d)R(%d)]\n", | ||
190 | pbm->name, | ||
191 | i, | ||
192 | ((tagval & SCHIZO_STCTAG_PPN) >> 19UL), | ||
193 | (tagval & SCHIZO_STCTAG_VPN), | ||
194 | ((tagval & SCHIZO_STCTAG_VALID) ? 1 : 0), | ||
195 | ((tagval & SCHIZO_STCTAG_READ) ? 1 : 0)); | ||
196 | |||
197 | /* XXX Should spit out per-bank error information... -DaveM */ | ||
198 | printk("%s: STC_LINE(%d)[LIDX(%lx)SP(%lx)LADDR(%lx)EP(%lx)" | ||
199 | "V(%d)FOFN(%d)]\n", | ||
200 | pbm->name, | ||
201 | i, | ||
202 | ((lineval & SCHIZO_STCLINE_LINDX) >> 23UL), | ||
203 | ((lineval & SCHIZO_STCLINE_SPTR) >> 13UL), | ||
204 | ((lineval & SCHIZO_STCLINE_LADDR) >> 6UL), | ||
205 | ((lineval & SCHIZO_STCLINE_EPTR) >> 0UL), | ||
206 | ((lineval & SCHIZO_STCLINE_VALID) ? 1 : 0), | ||
207 | ((lineval & SCHIZO_STCLINE_FOFN) ? 1 : 0)); | ||
208 | } | ||
209 | } | ||
210 | |||
211 | spin_unlock(&stc_buf_lock); | ||
212 | } | ||
213 | |||
214 | /* IOMMU is per-PBM in Schizo, so interrogate both for anonymous | ||
215 | * controller level errors. | ||
216 | */ | ||
217 | |||
218 | #define SCHIZO_IOMMU_TAG 0xa580UL | ||
219 | #define SCHIZO_IOMMU_DATA 0xa600UL | ||
220 | |||
221 | #define SCHIZO_IOMMU_TAG_CTXT 0x0000001ffe000000UL | ||
222 | #define SCHIZO_IOMMU_TAG_ERRSTS 0x0000000001800000UL | ||
223 | #define SCHIZO_IOMMU_TAG_ERR 0x0000000000400000UL | ||
224 | #define SCHIZO_IOMMU_TAG_WRITE 0x0000000000200000UL | ||
225 | #define SCHIZO_IOMMU_TAG_STREAM 0x0000000000100000UL | ||
226 | #define SCHIZO_IOMMU_TAG_SIZE 0x0000000000080000UL | ||
227 | #define SCHIZO_IOMMU_TAG_VPAGE 0x000000000007ffffUL | ||
228 | |||
229 | #define SCHIZO_IOMMU_DATA_VALID 0x0000000100000000UL | ||
230 | #define SCHIZO_IOMMU_DATA_CACHE 0x0000000040000000UL | ||
231 | #define SCHIZO_IOMMU_DATA_PPAGE 0x000000003fffffffUL | ||
232 | |||
233 | static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm, | ||
234 | enum schizo_error_type type) | ||
235 | { | ||
236 | struct iommu *iommu = pbm->iommu; | ||
237 | unsigned long iommu_tag[16]; | ||
238 | unsigned long iommu_data[16]; | ||
239 | unsigned long flags; | ||
240 | u64 control; | ||
241 | int i; | ||
242 | |||
243 | spin_lock_irqsave(&iommu->lock, flags); | ||
244 | control = upa_readq(iommu->iommu_control); | ||
245 | if (control & SCHIZO_IOMMU_CTRL_XLTEERR) { | ||
246 | unsigned long base; | ||
247 | char *type_string; | ||
248 | |||
249 | /* Clear the error encountered bit. */ | ||
250 | control &= ~SCHIZO_IOMMU_CTRL_XLTEERR; | ||
251 | upa_writeq(control, iommu->iommu_control); | ||
252 | |||
253 | switch((control & SCHIZO_IOMMU_CTRL_XLTESTAT) >> 25UL) { | ||
254 | case 0: | ||
255 | type_string = "Protection Error"; | ||
256 | break; | ||
257 | case 1: | ||
258 | type_string = "Invalid Error"; | ||
259 | break; | ||
260 | case 2: | ||
261 | type_string = "TimeOut Error"; | ||
262 | break; | ||
263 | case 3: | ||
264 | default: | ||
265 | type_string = "ECC Error"; | ||
266 | break; | ||
267 | }; | ||
268 | printk("%s: IOMMU Error, type[%s]\n", | ||
269 | pbm->name, type_string); | ||
270 | |||
271 | /* Put the IOMMU into diagnostic mode and probe | ||
272 | * it's TLB for entries with error status. | ||
273 | * | ||
274 | * It is very possible for another DVMA to occur | ||
275 | * while we do this probe, and corrupt the system | ||
276 | * further. But we are so screwed at this point | ||
277 | * that we are likely to crash hard anyways, so | ||
278 | * get as much diagnostic information to the | ||
279 | * console as we can. | ||
280 | */ | ||
281 | upa_writeq(control | SCHIZO_IOMMU_CTRL_DENAB, | ||
282 | iommu->iommu_control); | ||
283 | |||
284 | base = pbm->pbm_regs; | ||
285 | |||
286 | for (i = 0; i < 16; i++) { | ||
287 | iommu_tag[i] = | ||
288 | upa_readq(base + SCHIZO_IOMMU_TAG + (i * 8UL)); | ||
289 | iommu_data[i] = | ||
290 | upa_readq(base + SCHIZO_IOMMU_DATA + (i * 8UL)); | ||
291 | |||
292 | /* Now clear out the entry. */ | ||
293 | upa_writeq(0, base + SCHIZO_IOMMU_TAG + (i * 8UL)); | ||
294 | upa_writeq(0, base + SCHIZO_IOMMU_DATA + (i * 8UL)); | ||
295 | } | ||
296 | |||
297 | /* Leave diagnostic mode. */ | ||
298 | upa_writeq(control, iommu->iommu_control); | ||
299 | |||
300 | for (i = 0; i < 16; i++) { | ||
301 | unsigned long tag, data; | ||
302 | |||
303 | tag = iommu_tag[i]; | ||
304 | if (!(tag & SCHIZO_IOMMU_TAG_ERR)) | ||
305 | continue; | ||
306 | |||
307 | data = iommu_data[i]; | ||
308 | switch((tag & SCHIZO_IOMMU_TAG_ERRSTS) >> 23UL) { | ||
309 | case 0: | ||
310 | type_string = "Protection Error"; | ||
311 | break; | ||
312 | case 1: | ||
313 | type_string = "Invalid Error"; | ||
314 | break; | ||
315 | case 2: | ||
316 | type_string = "TimeOut Error"; | ||
317 | break; | ||
318 | case 3: | ||
319 | default: | ||
320 | type_string = "ECC Error"; | ||
321 | break; | ||
322 | }; | ||
323 | printk("%s: IOMMU TAG(%d)[error(%s) ctx(%x) wr(%d) str(%d) " | ||
324 | "sz(%dK) vpg(%08lx)]\n", | ||
325 | pbm->name, i, type_string, | ||
326 | (int)((tag & SCHIZO_IOMMU_TAG_CTXT) >> 25UL), | ||
327 | ((tag & SCHIZO_IOMMU_TAG_WRITE) ? 1 : 0), | ||
328 | ((tag & SCHIZO_IOMMU_TAG_STREAM) ? 1 : 0), | ||
329 | ((tag & SCHIZO_IOMMU_TAG_SIZE) ? 64 : 8), | ||
330 | (tag & SCHIZO_IOMMU_TAG_VPAGE) << IOMMU_PAGE_SHIFT); | ||
331 | printk("%s: IOMMU DATA(%d)[valid(%d) cache(%d) ppg(%016lx)]\n", | ||
332 | pbm->name, i, | ||
333 | ((data & SCHIZO_IOMMU_DATA_VALID) ? 1 : 0), | ||
334 | ((data & SCHIZO_IOMMU_DATA_CACHE) ? 1 : 0), | ||
335 | (data & SCHIZO_IOMMU_DATA_PPAGE) << IOMMU_PAGE_SHIFT); | ||
336 | } | ||
337 | } | ||
338 | if (pbm->stc.strbuf_enabled) | ||
339 | __schizo_check_stc_error_pbm(pbm, type); | ||
340 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
341 | } | ||
342 | |||
343 | static void schizo_check_iommu_error(struct pci_pbm_info *pbm, | ||
344 | enum schizo_error_type type) | ||
345 | { | ||
346 | schizo_check_iommu_error_pbm(pbm, type); | ||
347 | if (pbm->sibling) | ||
348 | schizo_check_iommu_error_pbm(pbm->sibling, type); | ||
349 | } | ||
350 | |||
351 | /* Uncorrectable ECC error status gathering. */ | ||
352 | #define SCHIZO_UE_AFSR 0x10030UL | ||
353 | #define SCHIZO_UE_AFAR 0x10038UL | ||
354 | |||
355 | #define SCHIZO_UEAFSR_PPIO 0x8000000000000000UL /* Safari */ | ||
356 | #define SCHIZO_UEAFSR_PDRD 0x4000000000000000UL /* Safari/Tomatillo */ | ||
357 | #define SCHIZO_UEAFSR_PDWR 0x2000000000000000UL /* Safari */ | ||
358 | #define SCHIZO_UEAFSR_SPIO 0x1000000000000000UL /* Safari */ | ||
359 | #define SCHIZO_UEAFSR_SDMA 0x0800000000000000UL /* Safari/Tomatillo */ | ||
360 | #define SCHIZO_UEAFSR_ERRPNDG 0x0300000000000000UL /* Safari */ | ||
361 | #define SCHIZO_UEAFSR_BMSK 0x000003ff00000000UL /* Safari */ | ||
362 | #define SCHIZO_UEAFSR_QOFF 0x00000000c0000000UL /* Safari/Tomatillo */ | ||
363 | #define SCHIZO_UEAFSR_AID 0x000000001f000000UL /* Safari/Tomatillo */ | ||
364 | #define SCHIZO_UEAFSR_PARTIAL 0x0000000000800000UL /* Safari */ | ||
365 | #define SCHIZO_UEAFSR_OWNEDIN 0x0000000000400000UL /* Safari */ | ||
366 | #define SCHIZO_UEAFSR_MTAGSYND 0x00000000000f0000UL /* Safari */ | ||
367 | #define SCHIZO_UEAFSR_MTAG 0x000000000000e000UL /* Safari */ | ||
368 | #define SCHIZO_UEAFSR_ECCSYND 0x00000000000001ffUL /* Safari */ | ||
369 | |||
370 | static irqreturn_t schizo_ue_intr(int irq, void *dev_id) | ||
371 | { | ||
372 | struct pci_pbm_info *pbm = dev_id; | ||
373 | unsigned long afsr_reg = pbm->controller_regs + SCHIZO_UE_AFSR; | ||
374 | unsigned long afar_reg = pbm->controller_regs + SCHIZO_UE_AFAR; | ||
375 | unsigned long afsr, afar, error_bits; | ||
376 | int reported, limit; | ||
377 | |||
378 | /* Latch uncorrectable error status. */ | ||
379 | afar = upa_readq(afar_reg); | ||
380 | |||
381 | /* If either of the error pending bits are set in the | ||
382 | * AFSR, the error status is being actively updated by | ||
383 | * the hardware and we must re-read to get a clean value. | ||
384 | */ | ||
385 | limit = 1000; | ||
386 | do { | ||
387 | afsr = upa_readq(afsr_reg); | ||
388 | } while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit); | ||
389 | |||
390 | /* Clear the primary/secondary error status bits. */ | ||
391 | error_bits = afsr & | ||
392 | (SCHIZO_UEAFSR_PPIO | SCHIZO_UEAFSR_PDRD | SCHIZO_UEAFSR_PDWR | | ||
393 | SCHIZO_UEAFSR_SPIO | SCHIZO_UEAFSR_SDMA); | ||
394 | if (!error_bits) | ||
395 | return IRQ_NONE; | ||
396 | upa_writeq(error_bits, afsr_reg); | ||
397 | |||
398 | /* Log the error. */ | ||
399 | printk("%s: Uncorrectable Error, primary error type[%s]\n", | ||
400 | pbm->name, | ||
401 | (((error_bits & SCHIZO_UEAFSR_PPIO) ? | ||
402 | "PIO" : | ||
403 | ((error_bits & SCHIZO_UEAFSR_PDRD) ? | ||
404 | "DMA Read" : | ||
405 | ((error_bits & SCHIZO_UEAFSR_PDWR) ? | ||
406 | "DMA Write" : "???"))))); | ||
407 | printk("%s: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n", | ||
408 | pbm->name, | ||
409 | (afsr & SCHIZO_UEAFSR_BMSK) >> 32UL, | ||
410 | (afsr & SCHIZO_UEAFSR_QOFF) >> 30UL, | ||
411 | (afsr & SCHIZO_UEAFSR_AID) >> 24UL); | ||
412 | printk("%s: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n", | ||
413 | pbm->name, | ||
414 | (afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0, | ||
415 | (afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0, | ||
416 | (afsr & SCHIZO_UEAFSR_MTAG) >> 13UL, | ||
417 | (afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL, | ||
418 | (afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL); | ||
419 | printk("%s: UE AFAR [%016lx]\n", pbm->name, afar); | ||
420 | printk("%s: UE Secondary errors [", pbm->name); | ||
421 | reported = 0; | ||
422 | if (afsr & SCHIZO_UEAFSR_SPIO) { | ||
423 | reported++; | ||
424 | printk("(PIO)"); | ||
425 | } | ||
426 | if (afsr & SCHIZO_UEAFSR_SDMA) { | ||
427 | reported++; | ||
428 | printk("(DMA)"); | ||
429 | } | ||
430 | if (!reported) | ||
431 | printk("(none)"); | ||
432 | printk("]\n"); | ||
433 | |||
434 | /* Interrogate IOMMU for error status. */ | ||
435 | schizo_check_iommu_error(pbm, UE_ERR); | ||
436 | |||
437 | return IRQ_HANDLED; | ||
438 | } | ||
439 | |||
440 | #define SCHIZO_CE_AFSR 0x10040UL | ||
441 | #define SCHIZO_CE_AFAR 0x10048UL | ||
442 | |||
443 | #define SCHIZO_CEAFSR_PPIO 0x8000000000000000UL | ||
444 | #define SCHIZO_CEAFSR_PDRD 0x4000000000000000UL | ||
445 | #define SCHIZO_CEAFSR_PDWR 0x2000000000000000UL | ||
446 | #define SCHIZO_CEAFSR_SPIO 0x1000000000000000UL | ||
447 | #define SCHIZO_CEAFSR_SDMA 0x0800000000000000UL | ||
448 | #define SCHIZO_CEAFSR_ERRPNDG 0x0300000000000000UL | ||
449 | #define SCHIZO_CEAFSR_BMSK 0x000003ff00000000UL | ||
450 | #define SCHIZO_CEAFSR_QOFF 0x00000000c0000000UL | ||
451 | #define SCHIZO_CEAFSR_AID 0x000000001f000000UL | ||
452 | #define SCHIZO_CEAFSR_PARTIAL 0x0000000000800000UL | ||
453 | #define SCHIZO_CEAFSR_OWNEDIN 0x0000000000400000UL | ||
454 | #define SCHIZO_CEAFSR_MTAGSYND 0x00000000000f0000UL | ||
455 | #define SCHIZO_CEAFSR_MTAG 0x000000000000e000UL | ||
456 | #define SCHIZO_CEAFSR_ECCSYND 0x00000000000001ffUL | ||
457 | |||
458 | static irqreturn_t schizo_ce_intr(int irq, void *dev_id) | ||
459 | { | ||
460 | struct pci_pbm_info *pbm = dev_id; | ||
461 | unsigned long afsr_reg = pbm->controller_regs + SCHIZO_CE_AFSR; | ||
462 | unsigned long afar_reg = pbm->controller_regs + SCHIZO_CE_AFAR; | ||
463 | unsigned long afsr, afar, error_bits; | ||
464 | int reported, limit; | ||
465 | |||
466 | /* Latch error status. */ | ||
467 | afar = upa_readq(afar_reg); | ||
468 | |||
469 | /* If either of the error pending bits are set in the | ||
470 | * AFSR, the error status is being actively updated by | ||
471 | * the hardware and we must re-read to get a clean value. | ||
472 | */ | ||
473 | limit = 1000; | ||
474 | do { | ||
475 | afsr = upa_readq(afsr_reg); | ||
476 | } while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit); | ||
477 | |||
478 | /* Clear primary/secondary error status bits. */ | ||
479 | error_bits = afsr & | ||
480 | (SCHIZO_CEAFSR_PPIO | SCHIZO_CEAFSR_PDRD | SCHIZO_CEAFSR_PDWR | | ||
481 | SCHIZO_CEAFSR_SPIO | SCHIZO_CEAFSR_SDMA); | ||
482 | if (!error_bits) | ||
483 | return IRQ_NONE; | ||
484 | upa_writeq(error_bits, afsr_reg); | ||
485 | |||
486 | /* Log the error. */ | ||
487 | printk("%s: Correctable Error, primary error type[%s]\n", | ||
488 | pbm->name, | ||
489 | (((error_bits & SCHIZO_CEAFSR_PPIO) ? | ||
490 | "PIO" : | ||
491 | ((error_bits & SCHIZO_CEAFSR_PDRD) ? | ||
492 | "DMA Read" : | ||
493 | ((error_bits & SCHIZO_CEAFSR_PDWR) ? | ||
494 | "DMA Write" : "???"))))); | ||
495 | |||
496 | /* XXX Use syndrome and afar to print out module string just like | ||
497 | * XXX UDB CE trap handler does... -DaveM | ||
498 | */ | ||
499 | printk("%s: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n", | ||
500 | pbm->name, | ||
501 | (afsr & SCHIZO_UEAFSR_BMSK) >> 32UL, | ||
502 | (afsr & SCHIZO_UEAFSR_QOFF) >> 30UL, | ||
503 | (afsr & SCHIZO_UEAFSR_AID) >> 24UL); | ||
504 | printk("%s: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n", | ||
505 | pbm->name, | ||
506 | (afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0, | ||
507 | (afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0, | ||
508 | (afsr & SCHIZO_UEAFSR_MTAG) >> 13UL, | ||
509 | (afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL, | ||
510 | (afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL); | ||
511 | printk("%s: CE AFAR [%016lx]\n", pbm->name, afar); | ||
512 | printk("%s: CE Secondary errors [", pbm->name); | ||
513 | reported = 0; | ||
514 | if (afsr & SCHIZO_CEAFSR_SPIO) { | ||
515 | reported++; | ||
516 | printk("(PIO)"); | ||
517 | } | ||
518 | if (afsr & SCHIZO_CEAFSR_SDMA) { | ||
519 | reported++; | ||
520 | printk("(DMA)"); | ||
521 | } | ||
522 | if (!reported) | ||
523 | printk("(none)"); | ||
524 | printk("]\n"); | ||
525 | |||
526 | return IRQ_HANDLED; | ||
527 | } | ||
528 | |||
529 | #define SCHIZO_PCI_AFSR 0x2010UL | ||
530 | #define SCHIZO_PCI_AFAR 0x2018UL | ||
531 | |||
532 | #define SCHIZO_PCIAFSR_PMA 0x8000000000000000UL /* Schizo/Tomatillo */ | ||
533 | #define SCHIZO_PCIAFSR_PTA 0x4000000000000000UL /* Schizo/Tomatillo */ | ||
534 | #define SCHIZO_PCIAFSR_PRTRY 0x2000000000000000UL /* Schizo/Tomatillo */ | ||
535 | #define SCHIZO_PCIAFSR_PPERR 0x1000000000000000UL /* Schizo/Tomatillo */ | ||
536 | #define SCHIZO_PCIAFSR_PTTO 0x0800000000000000UL /* Schizo/Tomatillo */ | ||
537 | #define SCHIZO_PCIAFSR_PUNUS 0x0400000000000000UL /* Schizo */ | ||
538 | #define SCHIZO_PCIAFSR_SMA 0x0200000000000000UL /* Schizo/Tomatillo */ | ||
539 | #define SCHIZO_PCIAFSR_STA 0x0100000000000000UL /* Schizo/Tomatillo */ | ||
540 | #define SCHIZO_PCIAFSR_SRTRY 0x0080000000000000UL /* Schizo/Tomatillo */ | ||
541 | #define SCHIZO_PCIAFSR_SPERR 0x0040000000000000UL /* Schizo/Tomatillo */ | ||
542 | #define SCHIZO_PCIAFSR_STTO 0x0020000000000000UL /* Schizo/Tomatillo */ | ||
543 | #define SCHIZO_PCIAFSR_SUNUS 0x0010000000000000UL /* Schizo */ | ||
544 | #define SCHIZO_PCIAFSR_BMSK 0x000003ff00000000UL /* Schizo/Tomatillo */ | ||
545 | #define SCHIZO_PCIAFSR_BLK 0x0000000080000000UL /* Schizo/Tomatillo */ | ||
546 | #define SCHIZO_PCIAFSR_CFG 0x0000000040000000UL /* Schizo/Tomatillo */ | ||
547 | #define SCHIZO_PCIAFSR_MEM 0x0000000020000000UL /* Schizo/Tomatillo */ | ||
548 | #define SCHIZO_PCIAFSR_IO 0x0000000010000000UL /* Schizo/Tomatillo */ | ||
549 | |||
550 | #define SCHIZO_PCI_CTRL (0x2000UL) | ||
551 | #define SCHIZO_PCICTRL_BUS_UNUS (1UL << 63UL) /* Safari */ | ||
552 | #define SCHIZO_PCICTRL_DTO_INT (1UL << 61UL) /* Tomatillo */ | ||
553 | #define SCHIZO_PCICTRL_ARB_PRIO (0x1ff << 52UL) /* Tomatillo */ | ||
554 | #define SCHIZO_PCICTRL_ESLCK (1UL << 51UL) /* Safari */ | ||
555 | #define SCHIZO_PCICTRL_ERRSLOT (7UL << 48UL) /* Safari */ | ||
556 | #define SCHIZO_PCICTRL_TTO_ERR (1UL << 38UL) /* Safari/Tomatillo */ | ||
557 | #define SCHIZO_PCICTRL_RTRY_ERR (1UL << 37UL) /* Safari/Tomatillo */ | ||
558 | #define SCHIZO_PCICTRL_DTO_ERR (1UL << 36UL) /* Safari/Tomatillo */ | ||
559 | #define SCHIZO_PCICTRL_SBH_ERR (1UL << 35UL) /* Safari */ | ||
560 | #define SCHIZO_PCICTRL_SERR (1UL << 34UL) /* Safari/Tomatillo */ | ||
561 | #define SCHIZO_PCICTRL_PCISPD (1UL << 33UL) /* Safari */ | ||
562 | #define SCHIZO_PCICTRL_MRM_PREF (1UL << 30UL) /* Tomatillo */ | ||
563 | #define SCHIZO_PCICTRL_RDO_PREF (1UL << 29UL) /* Tomatillo */ | ||
564 | #define SCHIZO_PCICTRL_RDL_PREF (1UL << 28UL) /* Tomatillo */ | ||
565 | #define SCHIZO_PCICTRL_PTO (3UL << 24UL) /* Safari/Tomatillo */ | ||
566 | #define SCHIZO_PCICTRL_PTO_SHIFT 24UL | ||
567 | #define SCHIZO_PCICTRL_TRWSW (7UL << 21UL) /* Tomatillo */ | ||
568 | #define SCHIZO_PCICTRL_F_TGT_A (1UL << 20UL) /* Tomatillo */ | ||
569 | #define SCHIZO_PCICTRL_S_DTO_INT (1UL << 19UL) /* Safari */ | ||
570 | #define SCHIZO_PCICTRL_F_TGT_RT (1UL << 19UL) /* Tomatillo */ | ||
571 | #define SCHIZO_PCICTRL_SBH_INT (1UL << 18UL) /* Safari */ | ||
572 | #define SCHIZO_PCICTRL_T_DTO_INT (1UL << 18UL) /* Tomatillo */ | ||
573 | #define SCHIZO_PCICTRL_EEN (1UL << 17UL) /* Safari/Tomatillo */ | ||
574 | #define SCHIZO_PCICTRL_PARK (1UL << 16UL) /* Safari/Tomatillo */ | ||
575 | #define SCHIZO_PCICTRL_PCIRST (1UL << 8UL) /* Safari */ | ||
576 | #define SCHIZO_PCICTRL_ARB_S (0x3fUL << 0UL) /* Safari */ | ||
577 | #define SCHIZO_PCICTRL_ARB_T (0xffUL << 0UL) /* Tomatillo */ | ||
578 | |||
579 | static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm) | ||
580 | { | ||
581 | unsigned long csr_reg, csr, csr_error_bits; | ||
582 | irqreturn_t ret = IRQ_NONE; | ||
583 | u16 stat; | ||
584 | |||
585 | csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL; | ||
586 | csr = upa_readq(csr_reg); | ||
587 | csr_error_bits = | ||
588 | csr & (SCHIZO_PCICTRL_BUS_UNUS | | ||
589 | SCHIZO_PCICTRL_TTO_ERR | | ||
590 | SCHIZO_PCICTRL_RTRY_ERR | | ||
591 | SCHIZO_PCICTRL_DTO_ERR | | ||
592 | SCHIZO_PCICTRL_SBH_ERR | | ||
593 | SCHIZO_PCICTRL_SERR); | ||
594 | if (csr_error_bits) { | ||
595 | /* Clear the errors. */ | ||
596 | upa_writeq(csr, csr_reg); | ||
597 | |||
598 | /* Log 'em. */ | ||
599 | if (csr_error_bits & SCHIZO_PCICTRL_BUS_UNUS) | ||
600 | printk("%s: Bus unusable error asserted.\n", | ||
601 | pbm->name); | ||
602 | if (csr_error_bits & SCHIZO_PCICTRL_TTO_ERR) | ||
603 | printk("%s: PCI TRDY# timeout error asserted.\n", | ||
604 | pbm->name); | ||
605 | if (csr_error_bits & SCHIZO_PCICTRL_RTRY_ERR) | ||
606 | printk("%s: PCI excessive retry error asserted.\n", | ||
607 | pbm->name); | ||
608 | if (csr_error_bits & SCHIZO_PCICTRL_DTO_ERR) | ||
609 | printk("%s: PCI discard timeout error asserted.\n", | ||
610 | pbm->name); | ||
611 | if (csr_error_bits & SCHIZO_PCICTRL_SBH_ERR) | ||
612 | printk("%s: PCI streaming byte hole error asserted.\n", | ||
613 | pbm->name); | ||
614 | if (csr_error_bits & SCHIZO_PCICTRL_SERR) | ||
615 | printk("%s: PCI SERR signal asserted.\n", | ||
616 | pbm->name); | ||
617 | ret = IRQ_HANDLED; | ||
618 | } | ||
619 | pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat); | ||
620 | if (stat & (PCI_STATUS_PARITY | | ||
621 | PCI_STATUS_SIG_TARGET_ABORT | | ||
622 | PCI_STATUS_REC_TARGET_ABORT | | ||
623 | PCI_STATUS_REC_MASTER_ABORT | | ||
624 | PCI_STATUS_SIG_SYSTEM_ERROR)) { | ||
625 | printk("%s: PCI bus error, PCI_STATUS[%04x]\n", | ||
626 | pbm->name, stat); | ||
627 | pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff); | ||
628 | ret = IRQ_HANDLED; | ||
629 | } | ||
630 | return ret; | ||
631 | } | ||
632 | |||
633 | static irqreturn_t schizo_pcierr_intr(int irq, void *dev_id) | ||
634 | { | ||
635 | struct pci_pbm_info *pbm = dev_id; | ||
636 | unsigned long afsr_reg, afar_reg, base; | ||
637 | unsigned long afsr, afar, error_bits; | ||
638 | int reported; | ||
639 | |||
640 | base = pbm->pbm_regs; | ||
641 | |||
642 | afsr_reg = base + SCHIZO_PCI_AFSR; | ||
643 | afar_reg = base + SCHIZO_PCI_AFAR; | ||
644 | |||
645 | /* Latch error status. */ | ||
646 | afar = upa_readq(afar_reg); | ||
647 | afsr = upa_readq(afsr_reg); | ||
648 | |||
649 | /* Clear primary/secondary error status bits. */ | ||
650 | error_bits = afsr & | ||
651 | (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA | | ||
652 | SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR | | ||
653 | SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS | | ||
654 | SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA | | ||
655 | SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR | | ||
656 | SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS); | ||
657 | if (!error_bits) | ||
658 | return schizo_pcierr_intr_other(pbm); | ||
659 | upa_writeq(error_bits, afsr_reg); | ||
660 | |||
661 | /* Log the error. */ | ||
662 | printk("%s: PCI Error, primary error type[%s]\n", | ||
663 | pbm->name, | ||
664 | (((error_bits & SCHIZO_PCIAFSR_PMA) ? | ||
665 | "Master Abort" : | ||
666 | ((error_bits & SCHIZO_PCIAFSR_PTA) ? | ||
667 | "Target Abort" : | ||
668 | ((error_bits & SCHIZO_PCIAFSR_PRTRY) ? | ||
669 | "Excessive Retries" : | ||
670 | ((error_bits & SCHIZO_PCIAFSR_PPERR) ? | ||
671 | "Parity Error" : | ||
672 | ((error_bits & SCHIZO_PCIAFSR_PTTO) ? | ||
673 | "Timeout" : | ||
674 | ((error_bits & SCHIZO_PCIAFSR_PUNUS) ? | ||
675 | "Bus Unusable" : "???")))))))); | ||
676 | printk("%s: bytemask[%04lx] was_block(%d) space(%s)\n", | ||
677 | pbm->name, | ||
678 | (afsr & SCHIZO_PCIAFSR_BMSK) >> 32UL, | ||
679 | (afsr & SCHIZO_PCIAFSR_BLK) ? 1 : 0, | ||
680 | ((afsr & SCHIZO_PCIAFSR_CFG) ? | ||
681 | "Config" : | ||
682 | ((afsr & SCHIZO_PCIAFSR_MEM) ? | ||
683 | "Memory" : | ||
684 | ((afsr & SCHIZO_PCIAFSR_IO) ? | ||
685 | "I/O" : "???")))); | ||
686 | printk("%s: PCI AFAR [%016lx]\n", | ||
687 | pbm->name, afar); | ||
688 | printk("%s: PCI Secondary errors [", | ||
689 | pbm->name); | ||
690 | reported = 0; | ||
691 | if (afsr & SCHIZO_PCIAFSR_SMA) { | ||
692 | reported++; | ||
693 | printk("(Master Abort)"); | ||
694 | } | ||
695 | if (afsr & SCHIZO_PCIAFSR_STA) { | ||
696 | reported++; | ||
697 | printk("(Target Abort)"); | ||
698 | } | ||
699 | if (afsr & SCHIZO_PCIAFSR_SRTRY) { | ||
700 | reported++; | ||
701 | printk("(Excessive Retries)"); | ||
702 | } | ||
703 | if (afsr & SCHIZO_PCIAFSR_SPERR) { | ||
704 | reported++; | ||
705 | printk("(Parity Error)"); | ||
706 | } | ||
707 | if (afsr & SCHIZO_PCIAFSR_STTO) { | ||
708 | reported++; | ||
709 | printk("(Timeout)"); | ||
710 | } | ||
711 | if (afsr & SCHIZO_PCIAFSR_SUNUS) { | ||
712 | reported++; | ||
713 | printk("(Bus Unusable)"); | ||
714 | } | ||
715 | if (!reported) | ||
716 | printk("(none)"); | ||
717 | printk("]\n"); | ||
718 | |||
719 | /* For the error types shown, scan PBM's PCI bus for devices | ||
720 | * which have logged that error type. | ||
721 | */ | ||
722 | |||
723 | /* If we see a Target Abort, this could be the result of an | ||
724 | * IOMMU translation error of some sort. It is extremely | ||
725 | * useful to log this information as usually it indicates | ||
726 | * a bug in the IOMMU support code or a PCI device driver. | ||
727 | */ | ||
728 | if (error_bits & (SCHIZO_PCIAFSR_PTA | SCHIZO_PCIAFSR_STA)) { | ||
729 | schizo_check_iommu_error(pbm, PCI_ERR); | ||
730 | pci_scan_for_target_abort(pbm, pbm->pci_bus); | ||
731 | } | ||
732 | if (error_bits & (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_SMA)) | ||
733 | pci_scan_for_master_abort(pbm, pbm->pci_bus); | ||
734 | |||
735 | /* For excessive retries, PSYCHO/PBM will abort the device | ||
736 | * and there is no way to specifically check for excessive | ||
737 | * retries in the config space status registers. So what | ||
738 | * we hope is that we'll catch it via the master/target | ||
739 | * abort events. | ||
740 | */ | ||
741 | |||
742 | if (error_bits & (SCHIZO_PCIAFSR_PPERR | SCHIZO_PCIAFSR_SPERR)) | ||
743 | pci_scan_for_parity_error(pbm, pbm->pci_bus); | ||
744 | |||
745 | return IRQ_HANDLED; | ||
746 | } | ||
747 | |||
748 | #define SCHIZO_SAFARI_ERRLOG 0x10018UL | ||
749 | |||
750 | #define SAFARI_ERRLOG_ERROUT 0x8000000000000000UL | ||
751 | |||
752 | #define BUS_ERROR_BADCMD 0x4000000000000000UL /* Schizo/Tomatillo */ | ||
753 | #define BUS_ERROR_SSMDIS 0x2000000000000000UL /* Safari */ | ||
754 | #define BUS_ERROR_BADMA 0x1000000000000000UL /* Safari */ | ||
755 | #define BUS_ERROR_BADMB 0x0800000000000000UL /* Safari */ | ||
756 | #define BUS_ERROR_BADMC 0x0400000000000000UL /* Safari */ | ||
757 | #define BUS_ERROR_SNOOP_GR 0x0000000000200000UL /* Tomatillo */ | ||
758 | #define BUS_ERROR_SNOOP_PCI 0x0000000000100000UL /* Tomatillo */ | ||
759 | #define BUS_ERROR_SNOOP_RD 0x0000000000080000UL /* Tomatillo */ | ||
760 | #define BUS_ERROR_SNOOP_RDS 0x0000000000020000UL /* Tomatillo */ | ||
761 | #define BUS_ERROR_SNOOP_RDSA 0x0000000000010000UL /* Tomatillo */ | ||
762 | #define BUS_ERROR_SNOOP_OWN 0x0000000000008000UL /* Tomatillo */ | ||
763 | #define BUS_ERROR_SNOOP_RDO 0x0000000000004000UL /* Tomatillo */ | ||
764 | #define BUS_ERROR_CPU1PS 0x0000000000002000UL /* Safari */ | ||
765 | #define BUS_ERROR_WDATA_PERR 0x0000000000002000UL /* Tomatillo */ | ||
766 | #define BUS_ERROR_CPU1PB 0x0000000000001000UL /* Safari */ | ||
767 | #define BUS_ERROR_CTRL_PERR 0x0000000000001000UL /* Tomatillo */ | ||
768 | #define BUS_ERROR_CPU0PS 0x0000000000000800UL /* Safari */ | ||
769 | #define BUS_ERROR_SNOOP_ERR 0x0000000000000800UL /* Tomatillo */ | ||
770 | #define BUS_ERROR_CPU0PB 0x0000000000000400UL /* Safari */ | ||
771 | #define BUS_ERROR_JBUS_ILL_B 0x0000000000000400UL /* Tomatillo */ | ||
772 | #define BUS_ERROR_CIQTO 0x0000000000000200UL /* Safari */ | ||
773 | #define BUS_ERROR_LPQTO 0x0000000000000100UL /* Safari */ | ||
774 | #define BUS_ERROR_JBUS_ILL_C 0x0000000000000100UL /* Tomatillo */ | ||
775 | #define BUS_ERROR_SFPQTO 0x0000000000000080UL /* Safari */ | ||
776 | #define BUS_ERROR_UFPQTO 0x0000000000000040UL /* Safari */ | ||
777 | #define BUS_ERROR_RD_PERR 0x0000000000000040UL /* Tomatillo */ | ||
778 | #define BUS_ERROR_APERR 0x0000000000000020UL /* Safari/Tomatillo */ | ||
779 | #define BUS_ERROR_UNMAP 0x0000000000000010UL /* Safari/Tomatillo */ | ||
780 | #define BUS_ERROR_BUSERR 0x0000000000000004UL /* Safari/Tomatillo */ | ||
781 | #define BUS_ERROR_TIMEOUT 0x0000000000000002UL /* Safari/Tomatillo */ | ||
782 | #define BUS_ERROR_ILL 0x0000000000000001UL /* Safari */ | ||
783 | |||
784 | /* We only expect UNMAP errors here. The rest of the Safari errors | ||
785 | * are marked fatal and thus cause a system reset. | ||
786 | */ | ||
787 | static irqreturn_t schizo_safarierr_intr(int irq, void *dev_id) | ||
788 | { | ||
789 | struct pci_pbm_info *pbm = dev_id; | ||
790 | u64 errlog; | ||
791 | |||
792 | errlog = upa_readq(pbm->controller_regs + SCHIZO_SAFARI_ERRLOG); | ||
793 | upa_writeq(errlog & ~(SAFARI_ERRLOG_ERROUT), | ||
794 | pbm->controller_regs + SCHIZO_SAFARI_ERRLOG); | ||
795 | |||
796 | if (!(errlog & BUS_ERROR_UNMAP)) { | ||
797 | printk("%s: Unexpected Safari/JBUS error interrupt, errlog[%016lx]\n", | ||
798 | pbm->name, errlog); | ||
799 | |||
800 | return IRQ_HANDLED; | ||
801 | } | ||
802 | |||
803 | printk("%s: Safari/JBUS interrupt, UNMAPPED error, interrogating IOMMUs.\n", | ||
804 | pbm->name); | ||
805 | schizo_check_iommu_error(pbm, SAFARI_ERR); | ||
806 | |||
807 | return IRQ_HANDLED; | ||
808 | } | ||
809 | |||
810 | /* Nearly identical to PSYCHO equivalents... */ | ||
811 | #define SCHIZO_ECC_CTRL 0x10020UL | ||
812 | #define SCHIZO_ECCCTRL_EE 0x8000000000000000UL /* Enable ECC Checking */ | ||
813 | #define SCHIZO_ECCCTRL_UE 0x4000000000000000UL /* Enable UE Interrupts */ | ||
814 | #define SCHIZO_ECCCTRL_CE 0x2000000000000000UL /* Enable CE INterrupts */ | ||
815 | |||
816 | #define SCHIZO_SAFARI_ERRCTRL 0x10008UL | ||
817 | #define SCHIZO_SAFERRCTRL_EN 0x8000000000000000UL | ||
818 | #define SCHIZO_SAFARI_IRQCTRL 0x10010UL | ||
819 | #define SCHIZO_SAFIRQCTRL_EN 0x8000000000000000UL | ||
820 | |||
821 | static int pbm_routes_this_ino(struct pci_pbm_info *pbm, u32 ino) | ||
822 | { | ||
823 | ino &= IMAP_INO; | ||
824 | |||
825 | if (pbm->ino_bitmap & (1UL << ino)) | ||
826 | return 1; | ||
827 | |||
828 | return 0; | ||
829 | } | ||
830 | |||
831 | /* How the Tomatillo IRQs are routed around is pure guesswork here. | ||
832 | * | ||
833 | * All the Tomatillo devices I see in prtconf dumps seem to have only | ||
834 | * a single PCI bus unit attached to it. It would seem they are separate | ||
835 | * devices because their PortID (ie. JBUS ID) values are all different | ||
836 | * and thus the registers are mapped to totally different locations. | ||
837 | * | ||
838 | * However, two Tomatillo's look "similar" in that the only difference | ||
839 | * in their PortID is the lowest bit. | ||
840 | * | ||
841 | * So if we were to ignore this lower bit, it certainly looks like two | ||
842 | * PCI bus units of the same Tomatillo. I still have not really | ||
843 | * figured this out... | ||
844 | */ | ||
845 | static void tomatillo_register_error_handlers(struct pci_pbm_info *pbm) | ||
846 | { | ||
847 | struct of_device *op = of_find_device_by_node(pbm->op->node); | ||
848 | u64 tmp, err_mask, err_no_mask; | ||
849 | int err; | ||
850 | |||
851 | /* Tomatillo IRQ property layout is: | ||
852 | * 0: PCIERR | ||
853 | * 1: UE ERR | ||
854 | * 2: CE ERR | ||
855 | * 3: SERR | ||
856 | * 4: POWER FAIL? | ||
857 | */ | ||
858 | |||
859 | if (pbm_routes_this_ino(pbm, SCHIZO_UE_INO)) { | ||
860 | err = request_irq(op->irqs[1], schizo_ue_intr, 0, | ||
861 | "TOMATILLO_UE", pbm); | ||
862 | if (err) | ||
863 | printk(KERN_WARNING "%s: Could not register UE, " | ||
864 | "err=%d\n", pbm->name, err); | ||
865 | } | ||
866 | if (pbm_routes_this_ino(pbm, SCHIZO_CE_INO)) { | ||
867 | err = request_irq(op->irqs[2], schizo_ce_intr, 0, | ||
868 | "TOMATILLO_CE", pbm); | ||
869 | if (err) | ||
870 | printk(KERN_WARNING "%s: Could not register CE, " | ||
871 | "err=%d\n", pbm->name, err); | ||
872 | } | ||
873 | err = 0; | ||
874 | if (pbm_routes_this_ino(pbm, SCHIZO_PCIERR_A_INO)) { | ||
875 | err = request_irq(op->irqs[0], schizo_pcierr_intr, 0, | ||
876 | "TOMATILLO_PCIERR", pbm); | ||
877 | } else if (pbm_routes_this_ino(pbm, SCHIZO_PCIERR_B_INO)) { | ||
878 | err = request_irq(op->irqs[0], schizo_pcierr_intr, 0, | ||
879 | "TOMATILLO_PCIERR", pbm); | ||
880 | } | ||
881 | if (err) | ||
882 | printk(KERN_WARNING "%s: Could not register PCIERR, " | ||
883 | "err=%d\n", pbm->name, err); | ||
884 | |||
885 | if (pbm_routes_this_ino(pbm, SCHIZO_SERR_INO)) { | ||
886 | err = request_irq(op->irqs[3], schizo_safarierr_intr, 0, | ||
887 | "TOMATILLO_SERR", pbm); | ||
888 | if (err) | ||
889 | printk(KERN_WARNING "%s: Could not register SERR, " | ||
890 | "err=%d\n", pbm->name, err); | ||
891 | } | ||
892 | |||
893 | /* Enable UE and CE interrupts for controller. */ | ||
894 | upa_writeq((SCHIZO_ECCCTRL_EE | | ||
895 | SCHIZO_ECCCTRL_UE | | ||
896 | SCHIZO_ECCCTRL_CE), pbm->controller_regs + SCHIZO_ECC_CTRL); | ||
897 | |||
898 | /* Enable PCI Error interrupts and clear error | ||
899 | * bits. | ||
900 | */ | ||
901 | err_mask = (SCHIZO_PCICTRL_BUS_UNUS | | ||
902 | SCHIZO_PCICTRL_TTO_ERR | | ||
903 | SCHIZO_PCICTRL_RTRY_ERR | | ||
904 | SCHIZO_PCICTRL_SERR | | ||
905 | SCHIZO_PCICTRL_EEN); | ||
906 | |||
907 | err_no_mask = SCHIZO_PCICTRL_DTO_ERR; | ||
908 | |||
909 | tmp = upa_readq(pbm->pbm_regs + SCHIZO_PCI_CTRL); | ||
910 | tmp |= err_mask; | ||
911 | tmp &= ~err_no_mask; | ||
912 | upa_writeq(tmp, pbm->pbm_regs + SCHIZO_PCI_CTRL); | ||
913 | |||
914 | err_mask = (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA | | ||
915 | SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR | | ||
916 | SCHIZO_PCIAFSR_PTTO | | ||
917 | SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA | | ||
918 | SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR | | ||
919 | SCHIZO_PCIAFSR_STTO); | ||
920 | |||
921 | upa_writeq(err_mask, pbm->pbm_regs + SCHIZO_PCI_AFSR); | ||
922 | |||
923 | err_mask = (BUS_ERROR_BADCMD | BUS_ERROR_SNOOP_GR | | ||
924 | BUS_ERROR_SNOOP_PCI | BUS_ERROR_SNOOP_RD | | ||
925 | BUS_ERROR_SNOOP_RDS | BUS_ERROR_SNOOP_RDSA | | ||
926 | BUS_ERROR_SNOOP_OWN | BUS_ERROR_SNOOP_RDO | | ||
927 | BUS_ERROR_WDATA_PERR | BUS_ERROR_CTRL_PERR | | ||
928 | BUS_ERROR_SNOOP_ERR | BUS_ERROR_JBUS_ILL_B | | ||
929 | BUS_ERROR_JBUS_ILL_C | BUS_ERROR_RD_PERR | | ||
930 | BUS_ERROR_APERR | BUS_ERROR_UNMAP | | ||
931 | BUS_ERROR_BUSERR | BUS_ERROR_TIMEOUT); | ||
932 | |||
933 | upa_writeq((SCHIZO_SAFERRCTRL_EN | err_mask), | ||
934 | pbm->controller_regs + SCHIZO_SAFARI_ERRCTRL); | ||
935 | |||
936 | upa_writeq((SCHIZO_SAFIRQCTRL_EN | (BUS_ERROR_UNMAP)), | ||
937 | pbm->controller_regs + SCHIZO_SAFARI_IRQCTRL); | ||
938 | } | ||
939 | |||
940 | static void schizo_register_error_handlers(struct pci_pbm_info *pbm) | ||
941 | { | ||
942 | struct of_device *op = of_find_device_by_node(pbm->op->node); | ||
943 | u64 tmp, err_mask, err_no_mask; | ||
944 | int err; | ||
945 | |||
946 | /* Schizo IRQ property layout is: | ||
947 | * 0: PCIERR | ||
948 | * 1: UE ERR | ||
949 | * 2: CE ERR | ||
950 | * 3: SERR | ||
951 | * 4: POWER FAIL? | ||
952 | */ | ||
953 | |||
954 | if (pbm_routes_this_ino(pbm, SCHIZO_UE_INO)) { | ||
955 | err = request_irq(op->irqs[1], schizo_ue_intr, 0, | ||
956 | "SCHIZO_UE", pbm); | ||
957 | if (err) | ||
958 | printk(KERN_WARNING "%s: Could not register UE, " | ||
959 | "err=%d\n", pbm->name, err); | ||
960 | } | ||
961 | if (pbm_routes_this_ino(pbm, SCHIZO_CE_INO)) { | ||
962 | err = request_irq(op->irqs[2], schizo_ce_intr, 0, | ||
963 | "SCHIZO_CE", pbm); | ||
964 | if (err) | ||
965 | printk(KERN_WARNING "%s: Could not register CE, " | ||
966 | "err=%d\n", pbm->name, err); | ||
967 | } | ||
968 | err = 0; | ||
969 | if (pbm_routes_this_ino(pbm, SCHIZO_PCIERR_A_INO)) { | ||
970 | err = request_irq(op->irqs[0], schizo_pcierr_intr, 0, | ||
971 | "SCHIZO_PCIERR", pbm); | ||
972 | } else if (pbm_routes_this_ino(pbm, SCHIZO_PCIERR_B_INO)) { | ||
973 | err = request_irq(op->irqs[0], schizo_pcierr_intr, 0, | ||
974 | "SCHIZO_PCIERR", pbm); | ||
975 | } | ||
976 | if (err) | ||
977 | printk(KERN_WARNING "%s: Could not register PCIERR, " | ||
978 | "err=%d\n", pbm->name, err); | ||
979 | |||
980 | if (pbm_routes_this_ino(pbm, SCHIZO_SERR_INO)) { | ||
981 | err = request_irq(op->irqs[3], schizo_safarierr_intr, 0, | ||
982 | "SCHIZO_SERR", pbm); | ||
983 | if (err) | ||
984 | printk(KERN_WARNING "%s: Could not register SERR, " | ||
985 | "err=%d\n", pbm->name, err); | ||
986 | } | ||
987 | |||
988 | /* Enable UE and CE interrupts for controller. */ | ||
989 | upa_writeq((SCHIZO_ECCCTRL_EE | | ||
990 | SCHIZO_ECCCTRL_UE | | ||
991 | SCHIZO_ECCCTRL_CE), pbm->controller_regs + SCHIZO_ECC_CTRL); | ||
992 | |||
993 | err_mask = (SCHIZO_PCICTRL_BUS_UNUS | | ||
994 | SCHIZO_PCICTRL_ESLCK | | ||
995 | SCHIZO_PCICTRL_TTO_ERR | | ||
996 | SCHIZO_PCICTRL_RTRY_ERR | | ||
997 | SCHIZO_PCICTRL_SBH_ERR | | ||
998 | SCHIZO_PCICTRL_SERR | | ||
999 | SCHIZO_PCICTRL_EEN); | ||
1000 | |||
1001 | err_no_mask = (SCHIZO_PCICTRL_DTO_ERR | | ||
1002 | SCHIZO_PCICTRL_SBH_INT); | ||
1003 | |||
1004 | /* Enable PCI Error interrupts and clear error | ||
1005 | * bits for each PBM. | ||
1006 | */ | ||
1007 | tmp = upa_readq(pbm->pbm_regs + SCHIZO_PCI_CTRL); | ||
1008 | tmp |= err_mask; | ||
1009 | tmp &= ~err_no_mask; | ||
1010 | upa_writeq(tmp, pbm->pbm_regs + SCHIZO_PCI_CTRL); | ||
1011 | |||
1012 | upa_writeq((SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA | | ||
1013 | SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR | | ||
1014 | SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS | | ||
1015 | SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA | | ||
1016 | SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR | | ||
1017 | SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS), | ||
1018 | pbm->pbm_regs + SCHIZO_PCI_AFSR); | ||
1019 | |||
1020 | /* Make all Safari error conditions fatal except unmapped | ||
1021 | * errors which we make generate interrupts. | ||
1022 | */ | ||
1023 | err_mask = (BUS_ERROR_BADCMD | BUS_ERROR_SSMDIS | | ||
1024 | BUS_ERROR_BADMA | BUS_ERROR_BADMB | | ||
1025 | BUS_ERROR_BADMC | | ||
1026 | BUS_ERROR_CPU1PS | BUS_ERROR_CPU1PB | | ||
1027 | BUS_ERROR_CPU0PS | BUS_ERROR_CPU0PB | | ||
1028 | BUS_ERROR_CIQTO | | ||
1029 | BUS_ERROR_LPQTO | BUS_ERROR_SFPQTO | | ||
1030 | BUS_ERROR_UFPQTO | BUS_ERROR_APERR | | ||
1031 | BUS_ERROR_BUSERR | BUS_ERROR_TIMEOUT | | ||
1032 | BUS_ERROR_ILL); | ||
1033 | #if 1 | ||
1034 | /* XXX Something wrong with some Excalibur systems | ||
1035 | * XXX Sun is shipping. The behavior on a 2-cpu | ||
1036 | * XXX machine is that both CPU1 parity error bits | ||
1037 | * XXX are set and are immediately set again when | ||
1038 | * XXX their error status bits are cleared. Just | ||
1039 | * XXX ignore them for now. -DaveM | ||
1040 | */ | ||
1041 | err_mask &= ~(BUS_ERROR_CPU1PS | BUS_ERROR_CPU1PB | | ||
1042 | BUS_ERROR_CPU0PS | BUS_ERROR_CPU0PB); | ||
1043 | #endif | ||
1044 | |||
1045 | upa_writeq((SCHIZO_SAFERRCTRL_EN | err_mask), | ||
1046 | pbm->controller_regs + SCHIZO_SAFARI_ERRCTRL); | ||
1047 | } | ||
1048 | |||
1049 | static void pbm_config_busmastering(struct pci_pbm_info *pbm) | ||
1050 | { | ||
1051 | u8 *addr; | ||
1052 | |||
1053 | /* Set cache-line size to 64 bytes, this is actually | ||
1054 | * a nop but I do it for completeness. | ||
1055 | */ | ||
1056 | addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno, | ||
1057 | 0, PCI_CACHE_LINE_SIZE); | ||
1058 | pci_config_write8(addr, 64 / sizeof(u32)); | ||
1059 | |||
1060 | /* Set PBM latency timer to 64 PCI clocks. */ | ||
1061 | addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno, | ||
1062 | 0, PCI_LATENCY_TIMER); | ||
1063 | pci_config_write8(addr, 64); | ||
1064 | } | ||
1065 | |||
1066 | static void __devinit schizo_scan_bus(struct pci_pbm_info *pbm, | ||
1067 | struct device *parent) | ||
1068 | { | ||
1069 | pbm_config_busmastering(pbm); | ||
1070 | pbm->is_66mhz_capable = | ||
1071 | (of_find_property(pbm->op->node, "66mhz-capable", NULL) | ||
1072 | != NULL); | ||
1073 | |||
1074 | pbm->pci_bus = pci_scan_one_pbm(pbm, parent); | ||
1075 | |||
1076 | if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) | ||
1077 | tomatillo_register_error_handlers(pbm); | ||
1078 | else | ||
1079 | schizo_register_error_handlers(pbm); | ||
1080 | } | ||
1081 | |||
1082 | #define SCHIZO_STRBUF_CONTROL (0x02800UL) | ||
1083 | #define SCHIZO_STRBUF_FLUSH (0x02808UL) | ||
1084 | #define SCHIZO_STRBUF_FSYNC (0x02810UL) | ||
1085 | #define SCHIZO_STRBUF_CTXFLUSH (0x02818UL) | ||
1086 | #define SCHIZO_STRBUF_CTXMATCH (0x10000UL) | ||
1087 | |||
1088 | static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm) | ||
1089 | { | ||
1090 | unsigned long base = pbm->pbm_regs; | ||
1091 | u64 control; | ||
1092 | |||
1093 | if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) { | ||
1094 | /* TOMATILLO lacks streaming cache. */ | ||
1095 | return; | ||
1096 | } | ||
1097 | |||
1098 | /* SCHIZO has context flushing. */ | ||
1099 | pbm->stc.strbuf_control = base + SCHIZO_STRBUF_CONTROL; | ||
1100 | pbm->stc.strbuf_pflush = base + SCHIZO_STRBUF_FLUSH; | ||
1101 | pbm->stc.strbuf_fsync = base + SCHIZO_STRBUF_FSYNC; | ||
1102 | pbm->stc.strbuf_ctxflush = base + SCHIZO_STRBUF_CTXFLUSH; | ||
1103 | pbm->stc.strbuf_ctxmatch_base = base + SCHIZO_STRBUF_CTXMATCH; | ||
1104 | |||
1105 | pbm->stc.strbuf_flushflag = (volatile unsigned long *) | ||
1106 | ((((unsigned long)&pbm->stc.__flushflag_buf[0]) | ||
1107 | + 63UL) | ||
1108 | & ~63UL); | ||
1109 | pbm->stc.strbuf_flushflag_pa = (unsigned long) | ||
1110 | __pa(pbm->stc.strbuf_flushflag); | ||
1111 | |||
1112 | /* Turn off LRU locking and diag mode, enable the | ||
1113 | * streaming buffer and leave the rerun-disable | ||
1114 | * setting however OBP set it. | ||
1115 | */ | ||
1116 | control = upa_readq(pbm->stc.strbuf_control); | ||
1117 | control &= ~(SCHIZO_STRBUF_CTRL_LPTR | | ||
1118 | SCHIZO_STRBUF_CTRL_LENAB | | ||
1119 | SCHIZO_STRBUF_CTRL_DENAB); | ||
1120 | control |= SCHIZO_STRBUF_CTRL_ENAB; | ||
1121 | upa_writeq(control, pbm->stc.strbuf_control); | ||
1122 | |||
1123 | pbm->stc.strbuf_enabled = 1; | ||
1124 | } | ||
1125 | |||
1126 | #define SCHIZO_IOMMU_CONTROL (0x00200UL) | ||
1127 | #define SCHIZO_IOMMU_TSBBASE (0x00208UL) | ||
1128 | #define SCHIZO_IOMMU_FLUSH (0x00210UL) | ||
1129 | #define SCHIZO_IOMMU_CTXFLUSH (0x00218UL) | ||
1130 | |||
1131 | static int schizo_pbm_iommu_init(struct pci_pbm_info *pbm) | ||
1132 | { | ||
1133 | static const u32 vdma_default[] = { 0xc0000000, 0x40000000 }; | ||
1134 | unsigned long i, tagbase, database; | ||
1135 | struct iommu *iommu = pbm->iommu; | ||
1136 | int tsbsize, err; | ||
1137 | const u32 *vdma; | ||
1138 | u32 dma_mask; | ||
1139 | u64 control; | ||
1140 | |||
1141 | vdma = of_get_property(pbm->op->node, "virtual-dma", NULL); | ||
1142 | if (!vdma) | ||
1143 | vdma = vdma_default; | ||
1144 | |||
1145 | dma_mask = vdma[0]; | ||
1146 | switch (vdma[1]) { | ||
1147 | case 0x20000000: | ||
1148 | dma_mask |= 0x1fffffff; | ||
1149 | tsbsize = 64; | ||
1150 | break; | ||
1151 | |||
1152 | case 0x40000000: | ||
1153 | dma_mask |= 0x3fffffff; | ||
1154 | tsbsize = 128; | ||
1155 | break; | ||
1156 | |||
1157 | case 0x80000000: | ||
1158 | dma_mask |= 0x7fffffff; | ||
1159 | tsbsize = 128; | ||
1160 | break; | ||
1161 | |||
1162 | default: | ||
1163 | printk(KERN_ERR PFX "Strange virtual-dma size.\n"); | ||
1164 | return -EINVAL; | ||
1165 | } | ||
1166 | |||
1167 | /* Register addresses, SCHIZO has iommu ctx flushing. */ | ||
1168 | iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL; | ||
1169 | iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE; | ||
1170 | iommu->iommu_flush = pbm->pbm_regs + SCHIZO_IOMMU_FLUSH; | ||
1171 | iommu->iommu_tags = iommu->iommu_flush + (0xa580UL - 0x0210UL); | ||
1172 | iommu->iommu_ctxflush = pbm->pbm_regs + SCHIZO_IOMMU_CTXFLUSH; | ||
1173 | |||
1174 | /* We use the main control/status register of SCHIZO as the write | ||
1175 | * completion register. | ||
1176 | */ | ||
1177 | iommu->write_complete_reg = pbm->controller_regs + 0x10000UL; | ||
1178 | |||
1179 | /* | ||
1180 | * Invalidate TLB Entries. | ||
1181 | */ | ||
1182 | control = upa_readq(iommu->iommu_control); | ||
1183 | control |= SCHIZO_IOMMU_CTRL_DENAB; | ||
1184 | upa_writeq(control, iommu->iommu_control); | ||
1185 | |||
1186 | tagbase = SCHIZO_IOMMU_TAG, database = SCHIZO_IOMMU_DATA; | ||
1187 | |||
1188 | for (i = 0; i < 16; i++) { | ||
1189 | upa_writeq(0, pbm->pbm_regs + tagbase + (i * 8UL)); | ||
1190 | upa_writeq(0, pbm->pbm_regs + database + (i * 8UL)); | ||
1191 | } | ||
1192 | |||
1193 | /* Leave diag mode enabled for full-flushing done | ||
1194 | * in pci_iommu.c | ||
1195 | */ | ||
1196 | err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask, | ||
1197 | pbm->numa_node); | ||
1198 | if (err) { | ||
1199 | printk(KERN_ERR PFX "iommu_table_init() fails with %d\n", err); | ||
1200 | return err; | ||
1201 | } | ||
1202 | |||
1203 | upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase); | ||
1204 | |||
1205 | control = upa_readq(iommu->iommu_control); | ||
1206 | control &= ~(SCHIZO_IOMMU_CTRL_TSBSZ | SCHIZO_IOMMU_CTRL_TBWSZ); | ||
1207 | switch (tsbsize) { | ||
1208 | case 64: | ||
1209 | control |= SCHIZO_IOMMU_TSBSZ_64K; | ||
1210 | break; | ||
1211 | case 128: | ||
1212 | control |= SCHIZO_IOMMU_TSBSZ_128K; | ||
1213 | break; | ||
1214 | } | ||
1215 | |||
1216 | control |= SCHIZO_IOMMU_CTRL_ENAB; | ||
1217 | upa_writeq(control, iommu->iommu_control); | ||
1218 | |||
1219 | return 0; | ||
1220 | } | ||
1221 | |||
1222 | #define SCHIZO_PCI_IRQ_RETRY (0x1a00UL) | ||
1223 | #define SCHIZO_IRQ_RETRY_INF 0xffUL | ||
1224 | |||
1225 | #define SCHIZO_PCI_DIAG (0x2020UL) | ||
1226 | #define SCHIZO_PCIDIAG_D_BADECC (1UL << 10UL) /* Disable BAD ECC errors (Schizo) */ | ||
1227 | #define SCHIZO_PCIDIAG_D_BYPASS (1UL << 9UL) /* Disable MMU bypass mode (Schizo/Tomatillo) */ | ||
1228 | #define SCHIZO_PCIDIAG_D_TTO (1UL << 8UL) /* Disable TTO errors (Schizo/Tomatillo) */ | ||
1229 | #define SCHIZO_PCIDIAG_D_RTRYARB (1UL << 7UL) /* Disable retry arbitration (Schizo) */ | ||
1230 | #define SCHIZO_PCIDIAG_D_RETRY (1UL << 6UL) /* Disable retry limit (Schizo/Tomatillo) */ | ||
1231 | #define SCHIZO_PCIDIAG_D_INTSYNC (1UL << 5UL) /* Disable interrupt/DMA synch (Schizo/Tomatillo) */ | ||
1232 | #define SCHIZO_PCIDIAG_I_DMA_PARITY (1UL << 3UL) /* Invert DMA parity (Schizo/Tomatillo) */ | ||
1233 | #define SCHIZO_PCIDIAG_I_PIOD_PARITY (1UL << 2UL) /* Invert PIO data parity (Schizo/Tomatillo) */ | ||
1234 | #define SCHIZO_PCIDIAG_I_PIOA_PARITY (1UL << 1UL) /* Invert PIO address parity (Schizo/Tomatillo) */ | ||
1235 | |||
1236 | #define TOMATILLO_PCI_IOC_CSR (0x2248UL) | ||
1237 | #define TOMATILLO_IOC_PART_WPENAB 0x0000000000080000UL | ||
1238 | #define TOMATILLO_IOC_RDMULT_PENAB 0x0000000000040000UL | ||
1239 | #define TOMATILLO_IOC_RDONE_PENAB 0x0000000000020000UL | ||
1240 | #define TOMATILLO_IOC_RDLINE_PENAB 0x0000000000010000UL | ||
1241 | #define TOMATILLO_IOC_RDMULT_PLEN 0x000000000000c000UL | ||
1242 | #define TOMATILLO_IOC_RDMULT_PLEN_SHIFT 14UL | ||
1243 | #define TOMATILLO_IOC_RDONE_PLEN 0x0000000000003000UL | ||
1244 | #define TOMATILLO_IOC_RDONE_PLEN_SHIFT 12UL | ||
1245 | #define TOMATILLO_IOC_RDLINE_PLEN 0x0000000000000c00UL | ||
1246 | #define TOMATILLO_IOC_RDLINE_PLEN_SHIFT 10UL | ||
1247 | #define TOMATILLO_IOC_PREF_OFF 0x00000000000003f8UL | ||
1248 | #define TOMATILLO_IOC_PREF_OFF_SHIFT 3UL | ||
1249 | #define TOMATILLO_IOC_RDMULT_CPENAB 0x0000000000000004UL | ||
1250 | #define TOMATILLO_IOC_RDONE_CPENAB 0x0000000000000002UL | ||
1251 | #define TOMATILLO_IOC_RDLINE_CPENAB 0x0000000000000001UL | ||
1252 | |||
1253 | #define TOMATILLO_PCI_IOC_TDIAG (0x2250UL) | ||
1254 | #define TOMATILLO_PCI_IOC_DDIAG (0x2290UL) | ||
1255 | |||
1256 | static void schizo_pbm_hw_init(struct pci_pbm_info *pbm) | ||
1257 | { | ||
1258 | u64 tmp; | ||
1259 | |||
1260 | upa_writeq(5, pbm->pbm_regs + SCHIZO_PCI_IRQ_RETRY); | ||
1261 | |||
1262 | tmp = upa_readq(pbm->pbm_regs + SCHIZO_PCI_CTRL); | ||
1263 | |||
1264 | /* Enable arbiter for all PCI slots. */ | ||
1265 | tmp |= 0xff; | ||
1266 | |||
1267 | if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO && | ||
1268 | pbm->chip_version >= 0x2) | ||
1269 | tmp |= 0x3UL << SCHIZO_PCICTRL_PTO_SHIFT; | ||
1270 | |||
1271 | if (!of_find_property(pbm->op->node, "no-bus-parking", NULL)) | ||
1272 | tmp |= SCHIZO_PCICTRL_PARK; | ||
1273 | else | ||
1274 | tmp &= ~SCHIZO_PCICTRL_PARK; | ||
1275 | |||
1276 | if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO && | ||
1277 | pbm->chip_version <= 0x1) | ||
1278 | tmp |= SCHIZO_PCICTRL_DTO_INT; | ||
1279 | else | ||
1280 | tmp &= ~SCHIZO_PCICTRL_DTO_INT; | ||
1281 | |||
1282 | if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) | ||
1283 | tmp |= (SCHIZO_PCICTRL_MRM_PREF | | ||
1284 | SCHIZO_PCICTRL_RDO_PREF | | ||
1285 | SCHIZO_PCICTRL_RDL_PREF); | ||
1286 | |||
1287 | upa_writeq(tmp, pbm->pbm_regs + SCHIZO_PCI_CTRL); | ||
1288 | |||
1289 | tmp = upa_readq(pbm->pbm_regs + SCHIZO_PCI_DIAG); | ||
1290 | tmp &= ~(SCHIZO_PCIDIAG_D_RTRYARB | | ||
1291 | SCHIZO_PCIDIAG_D_RETRY | | ||
1292 | SCHIZO_PCIDIAG_D_INTSYNC); | ||
1293 | upa_writeq(tmp, pbm->pbm_regs + SCHIZO_PCI_DIAG); | ||
1294 | |||
1295 | if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) { | ||
1296 | /* Clear prefetch lengths to workaround a bug in | ||
1297 | * Jalapeno... | ||
1298 | */ | ||
1299 | tmp = (TOMATILLO_IOC_PART_WPENAB | | ||
1300 | (1 << TOMATILLO_IOC_PREF_OFF_SHIFT) | | ||
1301 | TOMATILLO_IOC_RDMULT_CPENAB | | ||
1302 | TOMATILLO_IOC_RDONE_CPENAB | | ||
1303 | TOMATILLO_IOC_RDLINE_CPENAB); | ||
1304 | |||
1305 | upa_writeq(tmp, pbm->pbm_regs + TOMATILLO_PCI_IOC_CSR); | ||
1306 | } | ||
1307 | } | ||
1308 | |||
1309 | static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm, | ||
1310 | struct of_device *op, u32 portid, | ||
1311 | int chip_type) | ||
1312 | { | ||
1313 | const struct linux_prom64_registers *regs; | ||
1314 | struct device_node *dp = op->node; | ||
1315 | const char *chipset_name; | ||
1316 | int is_pbm_a, err; | ||
1317 | |||
1318 | switch (chip_type) { | ||
1319 | case PBM_CHIP_TYPE_TOMATILLO: | ||
1320 | chipset_name = "TOMATILLO"; | ||
1321 | break; | ||
1322 | |||
1323 | case PBM_CHIP_TYPE_SCHIZO_PLUS: | ||
1324 | chipset_name = "SCHIZO+"; | ||
1325 | break; | ||
1326 | |||
1327 | case PBM_CHIP_TYPE_SCHIZO: | ||
1328 | default: | ||
1329 | chipset_name = "SCHIZO"; | ||
1330 | break; | ||
1331 | }; | ||
1332 | |||
1333 | /* For SCHIZO, three OBP regs: | ||
1334 | * 1) PBM controller regs | ||
1335 | * 2) Schizo front-end controller regs (same for both PBMs) | ||
1336 | * 3) PBM PCI config space | ||
1337 | * | ||
1338 | * For TOMATILLO, four OBP regs: | ||
1339 | * 1) PBM controller regs | ||
1340 | * 2) Tomatillo front-end controller regs | ||
1341 | * 3) PBM PCI config space | ||
1342 | * 4) Ichip regs | ||
1343 | */ | ||
1344 | regs = of_get_property(dp, "reg", NULL); | ||
1345 | |||
1346 | is_pbm_a = ((regs[0].phys_addr & 0x00700000) == 0x00600000); | ||
1347 | |||
1348 | pbm->next = pci_pbm_root; | ||
1349 | pci_pbm_root = pbm; | ||
1350 | |||
1351 | pbm->numa_node = -1; | ||
1352 | |||
1353 | pbm->pci_ops = &sun4u_pci_ops; | ||
1354 | pbm->config_space_reg_bits = 8; | ||
1355 | |||
1356 | pbm->index = pci_num_pbms++; | ||
1357 | |||
1358 | pbm->portid = portid; | ||
1359 | pbm->op = op; | ||
1360 | |||
1361 | pbm->chip_type = chip_type; | ||
1362 | pbm->chip_version = of_getintprop_default(dp, "version#", 0); | ||
1363 | pbm->chip_revision = of_getintprop_default(dp, "module-version#", 0); | ||
1364 | |||
1365 | pbm->pbm_regs = regs[0].phys_addr; | ||
1366 | pbm->controller_regs = regs[1].phys_addr - 0x10000UL; | ||
1367 | |||
1368 | if (chip_type == PBM_CHIP_TYPE_TOMATILLO) | ||
1369 | pbm->sync_reg = regs[3].phys_addr + 0x1a18UL; | ||
1370 | |||
1371 | pbm->name = dp->full_name; | ||
1372 | |||
1373 | printk("%s: %s PCI Bus Module ver[%x:%x]\n", | ||
1374 | pbm->name, chipset_name, | ||
1375 | pbm->chip_version, pbm->chip_revision); | ||
1376 | |||
1377 | schizo_pbm_hw_init(pbm); | ||
1378 | |||
1379 | pci_determine_mem_io_space(pbm); | ||
1380 | |||
1381 | pci_get_pbm_props(pbm); | ||
1382 | |||
1383 | err = schizo_pbm_iommu_init(pbm); | ||
1384 | if (err) | ||
1385 | return err; | ||
1386 | |||
1387 | schizo_pbm_strbuf_init(pbm); | ||
1388 | |||
1389 | schizo_scan_bus(pbm, &op->dev); | ||
1390 | |||
1391 | return 0; | ||
1392 | } | ||
1393 | |||
1394 | static inline int portid_compare(u32 x, u32 y, int chip_type) | ||
1395 | { | ||
1396 | if (chip_type == PBM_CHIP_TYPE_TOMATILLO) { | ||
1397 | if (x == (y ^ 1)) | ||
1398 | return 1; | ||
1399 | return 0; | ||
1400 | } | ||
1401 | return (x == y); | ||
1402 | } | ||
1403 | |||
1404 | static struct pci_pbm_info * __devinit schizo_find_sibling(u32 portid, | ||
1405 | int chip_type) | ||
1406 | { | ||
1407 | struct pci_pbm_info *pbm; | ||
1408 | |||
1409 | for (pbm = pci_pbm_root; pbm; pbm = pbm->next) { | ||
1410 | if (portid_compare(pbm->portid, portid, chip_type)) | ||
1411 | return pbm; | ||
1412 | } | ||
1413 | return NULL; | ||
1414 | } | ||
1415 | |||
1416 | static int __devinit __schizo_init(struct of_device *op, unsigned long chip_type) | ||
1417 | { | ||
1418 | struct device_node *dp = op->node; | ||
1419 | struct pci_pbm_info *pbm; | ||
1420 | struct iommu *iommu; | ||
1421 | u32 portid; | ||
1422 | int err; | ||
1423 | |||
1424 | portid = of_getintprop_default(dp, "portid", 0xff); | ||
1425 | |||
1426 | err = -ENOMEM; | ||
1427 | pbm = kzalloc(sizeof(*pbm), GFP_KERNEL); | ||
1428 | if (!pbm) { | ||
1429 | printk(KERN_ERR PFX "Cannot allocate pci_pbm_info.\n"); | ||
1430 | goto out_err; | ||
1431 | } | ||
1432 | |||
1433 | pbm->sibling = schizo_find_sibling(portid, chip_type); | ||
1434 | |||
1435 | iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL); | ||
1436 | if (!iommu) { | ||
1437 | printk(KERN_ERR PFX "Cannot allocate PBM A iommu.\n"); | ||
1438 | goto out_free_pbm; | ||
1439 | } | ||
1440 | |||
1441 | pbm->iommu = iommu; | ||
1442 | |||
1443 | if (schizo_pbm_init(pbm, op, portid, chip_type)) | ||
1444 | goto out_free_iommu; | ||
1445 | |||
1446 | if (pbm->sibling) | ||
1447 | pbm->sibling->sibling = pbm; | ||
1448 | |||
1449 | dev_set_drvdata(&op->dev, pbm); | ||
1450 | |||
1451 | return 0; | ||
1452 | |||
1453 | out_free_iommu: | ||
1454 | kfree(pbm->iommu); | ||
1455 | |||
1456 | out_free_pbm: | ||
1457 | kfree(pbm); | ||
1458 | |||
1459 | out_err: | ||
1460 | return err; | ||
1461 | } | ||
1462 | |||
1463 | static int __devinit schizo_probe(struct of_device *op, | ||
1464 | const struct of_device_id *match) | ||
1465 | { | ||
1466 | return __schizo_init(op, (unsigned long) match->data); | ||
1467 | } | ||
1468 | |||
1469 | /* The ordering of this table is very important. Some Tomatillo | ||
1470 | * nodes announce that they are compatible with both pci108e,a801 | ||
1471 | * and pci108e,8001. So list the chips in reverse chronological | ||
1472 | * order. | ||
1473 | */ | ||
1474 | static struct of_device_id __initdata schizo_match[] = { | ||
1475 | { | ||
1476 | .name = "pci", | ||
1477 | .compatible = "pci108e,a801", | ||
1478 | .data = (void *) PBM_CHIP_TYPE_TOMATILLO, | ||
1479 | }, | ||
1480 | { | ||
1481 | .name = "pci", | ||
1482 | .compatible = "pci108e,8002", | ||
1483 | .data = (void *) PBM_CHIP_TYPE_SCHIZO_PLUS, | ||
1484 | }, | ||
1485 | { | ||
1486 | .name = "pci", | ||
1487 | .compatible = "pci108e,8001", | ||
1488 | .data = (void *) PBM_CHIP_TYPE_SCHIZO, | ||
1489 | }, | ||
1490 | {}, | ||
1491 | }; | ||
1492 | |||
1493 | static struct of_platform_driver schizo_driver = { | ||
1494 | .name = DRIVER_NAME, | ||
1495 | .match_table = schizo_match, | ||
1496 | .probe = schizo_probe, | ||
1497 | }; | ||
1498 | |||
1499 | static int __init schizo_init(void) | ||
1500 | { | ||
1501 | return of_register_driver(&schizo_driver, &of_bus_type); | ||
1502 | } | ||
1503 | |||
1504 | subsys_initcall(schizo_init); | ||
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c new file mode 100644 index 000000000000..34a1fded3941 --- /dev/null +++ b/arch/sparc/kernel/pci_sun4v.c | |||
@@ -0,0 +1,1033 @@ | |||
1 | /* pci_sun4v.c: SUN4V specific PCI controller support. | ||
2 | * | ||
3 | * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/pci.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/slab.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/percpu.h> | ||
13 | #include <linux/irq.h> | ||
14 | #include <linux/msi.h> | ||
15 | #include <linux/log2.h> | ||
16 | #include <linux/of_device.h> | ||
17 | |||
18 | #include <asm/iommu.h> | ||
19 | #include <asm/irq.h> | ||
20 | #include <asm/hypervisor.h> | ||
21 | #include <asm/prom.h> | ||
22 | |||
23 | #include "pci_impl.h" | ||
24 | #include "iommu_common.h" | ||
25 | |||
26 | #include "pci_sun4v.h" | ||
27 | |||
28 | #define DRIVER_NAME "pci_sun4v" | ||
29 | #define PFX DRIVER_NAME ": " | ||
30 | |||
31 | static unsigned long vpci_major = 1; | ||
32 | static unsigned long vpci_minor = 1; | ||
33 | |||
34 | #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) | ||
35 | |||
36 | struct iommu_batch { | ||
37 | struct device *dev; /* Device mapping is for. */ | ||
38 | unsigned long prot; /* IOMMU page protections */ | ||
39 | unsigned long entry; /* Index into IOTSB. */ | ||
40 | u64 *pglist; /* List of physical pages */ | ||
41 | unsigned long npages; /* Number of pages in list. */ | ||
42 | }; | ||
43 | |||
44 | static DEFINE_PER_CPU(struct iommu_batch, iommu_batch); | ||
45 | static int iommu_batch_initialized; | ||
46 | |||
47 | /* Interrupts must be disabled. */ | ||
48 | static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry) | ||
49 | { | ||
50 | struct iommu_batch *p = &__get_cpu_var(iommu_batch); | ||
51 | |||
52 | p->dev = dev; | ||
53 | p->prot = prot; | ||
54 | p->entry = entry; | ||
55 | p->npages = 0; | ||
56 | } | ||
57 | |||
58 | /* Interrupts must be disabled. */ | ||
59 | static long iommu_batch_flush(struct iommu_batch *p) | ||
60 | { | ||
61 | struct pci_pbm_info *pbm = p->dev->archdata.host_controller; | ||
62 | unsigned long devhandle = pbm->devhandle; | ||
63 | unsigned long prot = p->prot; | ||
64 | unsigned long entry = p->entry; | ||
65 | u64 *pglist = p->pglist; | ||
66 | unsigned long npages = p->npages; | ||
67 | |||
68 | while (npages != 0) { | ||
69 | long num; | ||
70 | |||
71 | num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), | ||
72 | npages, prot, __pa(pglist)); | ||
73 | if (unlikely(num < 0)) { | ||
74 | if (printk_ratelimit()) | ||
75 | printk("iommu_batch_flush: IOMMU map of " | ||
76 | "[%08lx:%08lx:%lx:%lx:%lx] failed with " | ||
77 | "status %ld\n", | ||
78 | devhandle, HV_PCI_TSBID(0, entry), | ||
79 | npages, prot, __pa(pglist), num); | ||
80 | return -1; | ||
81 | } | ||
82 | |||
83 | entry += num; | ||
84 | npages -= num; | ||
85 | pglist += num; | ||
86 | } | ||
87 | |||
88 | p->entry = entry; | ||
89 | p->npages = 0; | ||
90 | |||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | static inline void iommu_batch_new_entry(unsigned long entry) | ||
95 | { | ||
96 | struct iommu_batch *p = &__get_cpu_var(iommu_batch); | ||
97 | |||
98 | if (p->entry + p->npages == entry) | ||
99 | return; | ||
100 | if (p->entry != ~0UL) | ||
101 | iommu_batch_flush(p); | ||
102 | p->entry = entry; | ||
103 | } | ||
104 | |||
105 | /* Interrupts must be disabled. */ | ||
106 | static inline long iommu_batch_add(u64 phys_page) | ||
107 | { | ||
108 | struct iommu_batch *p = &__get_cpu_var(iommu_batch); | ||
109 | |||
110 | BUG_ON(p->npages >= PGLIST_NENTS); | ||
111 | |||
112 | p->pglist[p->npages++] = phys_page; | ||
113 | if (p->npages == PGLIST_NENTS) | ||
114 | return iommu_batch_flush(p); | ||
115 | |||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | /* Interrupts must be disabled. */ | ||
120 | static inline long iommu_batch_end(void) | ||
121 | { | ||
122 | struct iommu_batch *p = &__get_cpu_var(iommu_batch); | ||
123 | |||
124 | BUG_ON(p->npages >= PGLIST_NENTS); | ||
125 | |||
126 | return iommu_batch_flush(p); | ||
127 | } | ||
128 | |||
129 | static void *dma_4v_alloc_coherent(struct device *dev, size_t size, | ||
130 | dma_addr_t *dma_addrp, gfp_t gfp) | ||
131 | { | ||
132 | unsigned long flags, order, first_page, npages, n; | ||
133 | struct iommu *iommu; | ||
134 | struct page *page; | ||
135 | void *ret; | ||
136 | long entry; | ||
137 | int nid; | ||
138 | |||
139 | size = IO_PAGE_ALIGN(size); | ||
140 | order = get_order(size); | ||
141 | if (unlikely(order >= MAX_ORDER)) | ||
142 | return NULL; | ||
143 | |||
144 | npages = size >> IO_PAGE_SHIFT; | ||
145 | |||
146 | nid = dev->archdata.numa_node; | ||
147 | page = alloc_pages_node(nid, gfp, order); | ||
148 | if (unlikely(!page)) | ||
149 | return NULL; | ||
150 | |||
151 | first_page = (unsigned long) page_address(page); | ||
152 | memset((char *)first_page, 0, PAGE_SIZE << order); | ||
153 | |||
154 | iommu = dev->archdata.iommu; | ||
155 | |||
156 | spin_lock_irqsave(&iommu->lock, flags); | ||
157 | entry = iommu_range_alloc(dev, iommu, npages, NULL); | ||
158 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
159 | |||
160 | if (unlikely(entry == DMA_ERROR_CODE)) | ||
161 | goto range_alloc_fail; | ||
162 | |||
163 | *dma_addrp = (iommu->page_table_map_base + | ||
164 | (entry << IO_PAGE_SHIFT)); | ||
165 | ret = (void *) first_page; | ||
166 | first_page = __pa(first_page); | ||
167 | |||
168 | local_irq_save(flags); | ||
169 | |||
170 | iommu_batch_start(dev, | ||
171 | (HV_PCI_MAP_ATTR_READ | | ||
172 | HV_PCI_MAP_ATTR_WRITE), | ||
173 | entry); | ||
174 | |||
175 | for (n = 0; n < npages; n++) { | ||
176 | long err = iommu_batch_add(first_page + (n * PAGE_SIZE)); | ||
177 | if (unlikely(err < 0L)) | ||
178 | goto iommu_map_fail; | ||
179 | } | ||
180 | |||
181 | if (unlikely(iommu_batch_end() < 0L)) | ||
182 | goto iommu_map_fail; | ||
183 | |||
184 | local_irq_restore(flags); | ||
185 | |||
186 | return ret; | ||
187 | |||
188 | iommu_map_fail: | ||
189 | /* Interrupts are disabled. */ | ||
190 | spin_lock(&iommu->lock); | ||
191 | iommu_range_free(iommu, *dma_addrp, npages); | ||
192 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
193 | |||
194 | range_alloc_fail: | ||
195 | free_pages(first_page, order); | ||
196 | return NULL; | ||
197 | } | ||
198 | |||
199 | static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, | ||
200 | dma_addr_t dvma) | ||
201 | { | ||
202 | struct pci_pbm_info *pbm; | ||
203 | struct iommu *iommu; | ||
204 | unsigned long flags, order, npages, entry; | ||
205 | u32 devhandle; | ||
206 | |||
207 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; | ||
208 | iommu = dev->archdata.iommu; | ||
209 | pbm = dev->archdata.host_controller; | ||
210 | devhandle = pbm->devhandle; | ||
211 | entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | ||
212 | |||
213 | spin_lock_irqsave(&iommu->lock, flags); | ||
214 | |||
215 | iommu_range_free(iommu, dvma, npages); | ||
216 | |||
217 | do { | ||
218 | unsigned long num; | ||
219 | |||
220 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | ||
221 | npages); | ||
222 | entry += num; | ||
223 | npages -= num; | ||
224 | } while (npages != 0); | ||
225 | |||
226 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
227 | |||
228 | order = get_order(size); | ||
229 | if (order < 10) | ||
230 | free_pages((unsigned long)cpu, order); | ||
231 | } | ||
232 | |||
233 | static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz, | ||
234 | enum dma_data_direction direction) | ||
235 | { | ||
236 | struct iommu *iommu; | ||
237 | unsigned long flags, npages, oaddr; | ||
238 | unsigned long i, base_paddr; | ||
239 | u32 bus_addr, ret; | ||
240 | unsigned long prot; | ||
241 | long entry; | ||
242 | |||
243 | iommu = dev->archdata.iommu; | ||
244 | |||
245 | if (unlikely(direction == DMA_NONE)) | ||
246 | goto bad; | ||
247 | |||
248 | oaddr = (unsigned long)ptr; | ||
249 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); | ||
250 | npages >>= IO_PAGE_SHIFT; | ||
251 | |||
252 | spin_lock_irqsave(&iommu->lock, flags); | ||
253 | entry = iommu_range_alloc(dev, iommu, npages, NULL); | ||
254 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
255 | |||
256 | if (unlikely(entry == DMA_ERROR_CODE)) | ||
257 | goto bad; | ||
258 | |||
259 | bus_addr = (iommu->page_table_map_base + | ||
260 | (entry << IO_PAGE_SHIFT)); | ||
261 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); | ||
262 | base_paddr = __pa(oaddr & IO_PAGE_MASK); | ||
263 | prot = HV_PCI_MAP_ATTR_READ; | ||
264 | if (direction != DMA_TO_DEVICE) | ||
265 | prot |= HV_PCI_MAP_ATTR_WRITE; | ||
266 | |||
267 | local_irq_save(flags); | ||
268 | |||
269 | iommu_batch_start(dev, prot, entry); | ||
270 | |||
271 | for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { | ||
272 | long err = iommu_batch_add(base_paddr); | ||
273 | if (unlikely(err < 0L)) | ||
274 | goto iommu_map_fail; | ||
275 | } | ||
276 | if (unlikely(iommu_batch_end() < 0L)) | ||
277 | goto iommu_map_fail; | ||
278 | |||
279 | local_irq_restore(flags); | ||
280 | |||
281 | return ret; | ||
282 | |||
283 | bad: | ||
284 | if (printk_ratelimit()) | ||
285 | WARN_ON(1); | ||
286 | return DMA_ERROR_CODE; | ||
287 | |||
288 | iommu_map_fail: | ||
289 | /* Interrupts are disabled. */ | ||
290 | spin_lock(&iommu->lock); | ||
291 | iommu_range_free(iommu, bus_addr, npages); | ||
292 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
293 | |||
294 | return DMA_ERROR_CODE; | ||
295 | } | ||
296 | |||
297 | static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr, | ||
298 | size_t sz, enum dma_data_direction direction) | ||
299 | { | ||
300 | struct pci_pbm_info *pbm; | ||
301 | struct iommu *iommu; | ||
302 | unsigned long flags, npages; | ||
303 | long entry; | ||
304 | u32 devhandle; | ||
305 | |||
306 | if (unlikely(direction == DMA_NONE)) { | ||
307 | if (printk_ratelimit()) | ||
308 | WARN_ON(1); | ||
309 | return; | ||
310 | } | ||
311 | |||
312 | iommu = dev->archdata.iommu; | ||
313 | pbm = dev->archdata.host_controller; | ||
314 | devhandle = pbm->devhandle; | ||
315 | |||
316 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | ||
317 | npages >>= IO_PAGE_SHIFT; | ||
318 | bus_addr &= IO_PAGE_MASK; | ||
319 | |||
320 | spin_lock_irqsave(&iommu->lock, flags); | ||
321 | |||
322 | iommu_range_free(iommu, bus_addr, npages); | ||
323 | |||
324 | entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; | ||
325 | do { | ||
326 | unsigned long num; | ||
327 | |||
328 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | ||
329 | npages); | ||
330 | entry += num; | ||
331 | npages -= num; | ||
332 | } while (npages != 0); | ||
333 | |||
334 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
335 | } | ||
336 | |||
337 | static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | ||
338 | int nelems, enum dma_data_direction direction) | ||
339 | { | ||
340 | struct scatterlist *s, *outs, *segstart; | ||
341 | unsigned long flags, handle, prot; | ||
342 | dma_addr_t dma_next = 0, dma_addr; | ||
343 | unsigned int max_seg_size; | ||
344 | unsigned long seg_boundary_size; | ||
345 | int outcount, incount, i; | ||
346 | struct iommu *iommu; | ||
347 | unsigned long base_shift; | ||
348 | long err; | ||
349 | |||
350 | BUG_ON(direction == DMA_NONE); | ||
351 | |||
352 | iommu = dev->archdata.iommu; | ||
353 | if (nelems == 0 || !iommu) | ||
354 | return 0; | ||
355 | |||
356 | prot = HV_PCI_MAP_ATTR_READ; | ||
357 | if (direction != DMA_TO_DEVICE) | ||
358 | prot |= HV_PCI_MAP_ATTR_WRITE; | ||
359 | |||
360 | outs = s = segstart = &sglist[0]; | ||
361 | outcount = 1; | ||
362 | incount = nelems; | ||
363 | handle = 0; | ||
364 | |||
365 | /* Init first segment length for backout at failure */ | ||
366 | outs->dma_length = 0; | ||
367 | |||
368 | spin_lock_irqsave(&iommu->lock, flags); | ||
369 | |||
370 | iommu_batch_start(dev, prot, ~0UL); | ||
371 | |||
372 | max_seg_size = dma_get_max_seg_size(dev); | ||
373 | seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, | ||
374 | IO_PAGE_SIZE) >> IO_PAGE_SHIFT; | ||
375 | base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT; | ||
376 | for_each_sg(sglist, s, nelems, i) { | ||
377 | unsigned long paddr, npages, entry, out_entry = 0, slen; | ||
378 | |||
379 | slen = s->length; | ||
380 | /* Sanity check */ | ||
381 | if (slen == 0) { | ||
382 | dma_next = 0; | ||
383 | continue; | ||
384 | } | ||
385 | /* Allocate iommu entries for that segment */ | ||
386 | paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); | ||
387 | npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); | ||
388 | entry = iommu_range_alloc(dev, iommu, npages, &handle); | ||
389 | |||
390 | /* Handle failure */ | ||
391 | if (unlikely(entry == DMA_ERROR_CODE)) { | ||
392 | if (printk_ratelimit()) | ||
393 | printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" | ||
394 | " npages %lx\n", iommu, paddr, npages); | ||
395 | goto iommu_map_failed; | ||
396 | } | ||
397 | |||
398 | iommu_batch_new_entry(entry); | ||
399 | |||
400 | /* Convert entry to a dma_addr_t */ | ||
401 | dma_addr = iommu->page_table_map_base + | ||
402 | (entry << IO_PAGE_SHIFT); | ||
403 | dma_addr |= (s->offset & ~IO_PAGE_MASK); | ||
404 | |||
405 | /* Insert into HW table */ | ||
406 | paddr &= IO_PAGE_MASK; | ||
407 | while (npages--) { | ||
408 | err = iommu_batch_add(paddr); | ||
409 | if (unlikely(err < 0L)) | ||
410 | goto iommu_map_failed; | ||
411 | paddr += IO_PAGE_SIZE; | ||
412 | } | ||
413 | |||
414 | /* If we are in an open segment, try merging */ | ||
415 | if (segstart != s) { | ||
416 | /* We cannot merge if: | ||
417 | * - allocated dma_addr isn't contiguous to previous allocation | ||
418 | */ | ||
419 | if ((dma_addr != dma_next) || | ||
420 | (outs->dma_length + s->length > max_seg_size) || | ||
421 | (is_span_boundary(out_entry, base_shift, | ||
422 | seg_boundary_size, outs, s))) { | ||
423 | /* Can't merge: create a new segment */ | ||
424 | segstart = s; | ||
425 | outcount++; | ||
426 | outs = sg_next(outs); | ||
427 | } else { | ||
428 | outs->dma_length += s->length; | ||
429 | } | ||
430 | } | ||
431 | |||
432 | if (segstart == s) { | ||
433 | /* This is a new segment, fill entries */ | ||
434 | outs->dma_address = dma_addr; | ||
435 | outs->dma_length = slen; | ||
436 | out_entry = entry; | ||
437 | } | ||
438 | |||
439 | /* Calculate next page pointer for contiguous check */ | ||
440 | dma_next = dma_addr + slen; | ||
441 | } | ||
442 | |||
443 | err = iommu_batch_end(); | ||
444 | |||
445 | if (unlikely(err < 0L)) | ||
446 | goto iommu_map_failed; | ||
447 | |||
448 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
449 | |||
450 | if (outcount < incount) { | ||
451 | outs = sg_next(outs); | ||
452 | outs->dma_address = DMA_ERROR_CODE; | ||
453 | outs->dma_length = 0; | ||
454 | } | ||
455 | |||
456 | return outcount; | ||
457 | |||
458 | iommu_map_failed: | ||
459 | for_each_sg(sglist, s, nelems, i) { | ||
460 | if (s->dma_length != 0) { | ||
461 | unsigned long vaddr, npages; | ||
462 | |||
463 | vaddr = s->dma_address & IO_PAGE_MASK; | ||
464 | npages = iommu_num_pages(s->dma_address, s->dma_length, | ||
465 | IO_PAGE_SIZE); | ||
466 | iommu_range_free(iommu, vaddr, npages); | ||
467 | /* XXX demap? XXX */ | ||
468 | s->dma_address = DMA_ERROR_CODE; | ||
469 | s->dma_length = 0; | ||
470 | } | ||
471 | if (s == outs) | ||
472 | break; | ||
473 | } | ||
474 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
475 | |||
476 | return 0; | ||
477 | } | ||
478 | |||
479 | static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, | ||
480 | int nelems, enum dma_data_direction direction) | ||
481 | { | ||
482 | struct pci_pbm_info *pbm; | ||
483 | struct scatterlist *sg; | ||
484 | struct iommu *iommu; | ||
485 | unsigned long flags; | ||
486 | u32 devhandle; | ||
487 | |||
488 | BUG_ON(direction == DMA_NONE); | ||
489 | |||
490 | iommu = dev->archdata.iommu; | ||
491 | pbm = dev->archdata.host_controller; | ||
492 | devhandle = pbm->devhandle; | ||
493 | |||
494 | spin_lock_irqsave(&iommu->lock, flags); | ||
495 | |||
496 | sg = sglist; | ||
497 | while (nelems--) { | ||
498 | dma_addr_t dma_handle = sg->dma_address; | ||
499 | unsigned int len = sg->dma_length; | ||
500 | unsigned long npages, entry; | ||
501 | |||
502 | if (!len) | ||
503 | break; | ||
504 | npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); | ||
505 | iommu_range_free(iommu, dma_handle, npages); | ||
506 | |||
507 | entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | ||
508 | while (npages) { | ||
509 | unsigned long num; | ||
510 | |||
511 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | ||
512 | npages); | ||
513 | entry += num; | ||
514 | npages -= num; | ||
515 | } | ||
516 | |||
517 | sg = sg_next(sg); | ||
518 | } | ||
519 | |||
520 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
521 | } | ||
522 | |||
523 | static void dma_4v_sync_single_for_cpu(struct device *dev, | ||
524 | dma_addr_t bus_addr, size_t sz, | ||
525 | enum dma_data_direction direction) | ||
526 | { | ||
527 | /* Nothing to do... */ | ||
528 | } | ||
529 | |||
530 | static void dma_4v_sync_sg_for_cpu(struct device *dev, | ||
531 | struct scatterlist *sglist, int nelems, | ||
532 | enum dma_data_direction direction) | ||
533 | { | ||
534 | /* Nothing to do... */ | ||
535 | } | ||
536 | |||
537 | static const struct dma_ops sun4v_dma_ops = { | ||
538 | .alloc_coherent = dma_4v_alloc_coherent, | ||
539 | .free_coherent = dma_4v_free_coherent, | ||
540 | .map_single = dma_4v_map_single, | ||
541 | .unmap_single = dma_4v_unmap_single, | ||
542 | .map_sg = dma_4v_map_sg, | ||
543 | .unmap_sg = dma_4v_unmap_sg, | ||
544 | .sync_single_for_cpu = dma_4v_sync_single_for_cpu, | ||
545 | .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu, | ||
546 | }; | ||
547 | |||
548 | static void __init pci_sun4v_scan_bus(struct pci_pbm_info *pbm, | ||
549 | struct device *parent) | ||
550 | { | ||
551 | struct property *prop; | ||
552 | struct device_node *dp; | ||
553 | |||
554 | dp = pbm->op->node; | ||
555 | prop = of_find_property(dp, "66mhz-capable", NULL); | ||
556 | pbm->is_66mhz_capable = (prop != NULL); | ||
557 | pbm->pci_bus = pci_scan_one_pbm(pbm, parent); | ||
558 | |||
559 | /* XXX register error interrupt handlers XXX */ | ||
560 | } | ||
561 | |||
562 | static unsigned long __init probe_existing_entries(struct pci_pbm_info *pbm, | ||
563 | struct iommu *iommu) | ||
564 | { | ||
565 | struct iommu_arena *arena = &iommu->arena; | ||
566 | unsigned long i, cnt = 0; | ||
567 | u32 devhandle; | ||
568 | |||
569 | devhandle = pbm->devhandle; | ||
570 | for (i = 0; i < arena->limit; i++) { | ||
571 | unsigned long ret, io_attrs, ra; | ||
572 | |||
573 | ret = pci_sun4v_iommu_getmap(devhandle, | ||
574 | HV_PCI_TSBID(0, i), | ||
575 | &io_attrs, &ra); | ||
576 | if (ret == HV_EOK) { | ||
577 | if (page_in_phys_avail(ra)) { | ||
578 | pci_sun4v_iommu_demap(devhandle, | ||
579 | HV_PCI_TSBID(0, i), 1); | ||
580 | } else { | ||
581 | cnt++; | ||
582 | __set_bit(i, arena->map); | ||
583 | } | ||
584 | } | ||
585 | } | ||
586 | |||
587 | return cnt; | ||
588 | } | ||
589 | |||
590 | static int __init pci_sun4v_iommu_init(struct pci_pbm_info *pbm) | ||
591 | { | ||
592 | static const u32 vdma_default[] = { 0x80000000, 0x80000000 }; | ||
593 | struct iommu *iommu = pbm->iommu; | ||
594 | unsigned long num_tsb_entries, sz, tsbsize; | ||
595 | u32 dma_mask, dma_offset; | ||
596 | const u32 *vdma; | ||
597 | |||
598 | vdma = of_get_property(pbm->op->node, "virtual-dma", NULL); | ||
599 | if (!vdma) | ||
600 | vdma = vdma_default; | ||
601 | |||
602 | if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) { | ||
603 | printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n", | ||
604 | vdma[0], vdma[1]); | ||
605 | return -EINVAL; | ||
606 | }; | ||
607 | |||
608 | dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL); | ||
609 | num_tsb_entries = vdma[1] / IO_PAGE_SIZE; | ||
610 | tsbsize = num_tsb_entries * sizeof(iopte_t); | ||
611 | |||
612 | dma_offset = vdma[0]; | ||
613 | |||
614 | /* Setup initial software IOMMU state. */ | ||
615 | spin_lock_init(&iommu->lock); | ||
616 | iommu->ctx_lowest_free = 1; | ||
617 | iommu->page_table_map_base = dma_offset; | ||
618 | iommu->dma_addr_mask = dma_mask; | ||
619 | |||
620 | /* Allocate and initialize the free area map. */ | ||
621 | sz = (num_tsb_entries + 7) / 8; | ||
622 | sz = (sz + 7UL) & ~7UL; | ||
623 | iommu->arena.map = kzalloc(sz, GFP_KERNEL); | ||
624 | if (!iommu->arena.map) { | ||
625 | printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n"); | ||
626 | return -ENOMEM; | ||
627 | } | ||
628 | iommu->arena.limit = num_tsb_entries; | ||
629 | |||
630 | sz = probe_existing_entries(pbm, iommu); | ||
631 | if (sz) | ||
632 | printk("%s: Imported %lu TSB entries from OBP\n", | ||
633 | pbm->name, sz); | ||
634 | |||
635 | return 0; | ||
636 | } | ||
637 | |||
638 | #ifdef CONFIG_PCI_MSI | ||
639 | struct pci_sun4v_msiq_entry { | ||
640 | u64 version_type; | ||
641 | #define MSIQ_VERSION_MASK 0xffffffff00000000UL | ||
642 | #define MSIQ_VERSION_SHIFT 32 | ||
643 | #define MSIQ_TYPE_MASK 0x00000000000000ffUL | ||
644 | #define MSIQ_TYPE_SHIFT 0 | ||
645 | #define MSIQ_TYPE_NONE 0x00 | ||
646 | #define MSIQ_TYPE_MSG 0x01 | ||
647 | #define MSIQ_TYPE_MSI32 0x02 | ||
648 | #define MSIQ_TYPE_MSI64 0x03 | ||
649 | #define MSIQ_TYPE_INTX 0x08 | ||
650 | #define MSIQ_TYPE_NONE2 0xff | ||
651 | |||
652 | u64 intx_sysino; | ||
653 | u64 reserved1; | ||
654 | u64 stick; | ||
655 | u64 req_id; /* bus/device/func */ | ||
656 | #define MSIQ_REQID_BUS_MASK 0xff00UL | ||
657 | #define MSIQ_REQID_BUS_SHIFT 8 | ||
658 | #define MSIQ_REQID_DEVICE_MASK 0x00f8UL | ||
659 | #define MSIQ_REQID_DEVICE_SHIFT 3 | ||
660 | #define MSIQ_REQID_FUNC_MASK 0x0007UL | ||
661 | #define MSIQ_REQID_FUNC_SHIFT 0 | ||
662 | |||
663 | u64 msi_address; | ||
664 | |||
665 | /* The format of this value is message type dependent. | ||
666 | * For MSI bits 15:0 are the data from the MSI packet. | ||
667 | * For MSI-X bits 31:0 are the data from the MSI packet. | ||
668 | * For MSG, the message code and message routing code where: | ||
669 | * bits 39:32 is the bus/device/fn of the msg target-id | ||
670 | * bits 18:16 is the message routing code | ||
671 | * bits 7:0 is the message code | ||
672 | * For INTx the low order 2-bits are: | ||
673 | * 00 - INTA | ||
674 | * 01 - INTB | ||
675 | * 10 - INTC | ||
676 | * 11 - INTD | ||
677 | */ | ||
678 | u64 msi_data; | ||
679 | |||
680 | u64 reserved2; | ||
681 | }; | ||
682 | |||
683 | static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid, | ||
684 | unsigned long *head) | ||
685 | { | ||
686 | unsigned long err, limit; | ||
687 | |||
688 | err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head); | ||
689 | if (unlikely(err)) | ||
690 | return -ENXIO; | ||
691 | |||
692 | limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); | ||
693 | if (unlikely(*head >= limit)) | ||
694 | return -EFBIG; | ||
695 | |||
696 | return 0; | ||
697 | } | ||
698 | |||
699 | static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm, | ||
700 | unsigned long msiqid, unsigned long *head, | ||
701 | unsigned long *msi) | ||
702 | { | ||
703 | struct pci_sun4v_msiq_entry *ep; | ||
704 | unsigned long err, type; | ||
705 | |||
706 | /* Note: void pointer arithmetic, 'head' is a byte offset */ | ||
707 | ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * | ||
708 | (pbm->msiq_ent_count * | ||
709 | sizeof(struct pci_sun4v_msiq_entry))) + | ||
710 | *head); | ||
711 | |||
712 | if ((ep->version_type & MSIQ_TYPE_MASK) == 0) | ||
713 | return 0; | ||
714 | |||
715 | type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT; | ||
716 | if (unlikely(type != MSIQ_TYPE_MSI32 && | ||
717 | type != MSIQ_TYPE_MSI64)) | ||
718 | return -EINVAL; | ||
719 | |||
720 | *msi = ep->msi_data; | ||
721 | |||
722 | err = pci_sun4v_msi_setstate(pbm->devhandle, | ||
723 | ep->msi_data /* msi_num */, | ||
724 | HV_MSISTATE_IDLE); | ||
725 | if (unlikely(err)) | ||
726 | return -ENXIO; | ||
727 | |||
728 | /* Clear the entry. */ | ||
729 | ep->version_type &= ~MSIQ_TYPE_MASK; | ||
730 | |||
731 | (*head) += sizeof(struct pci_sun4v_msiq_entry); | ||
732 | if (*head >= | ||
733 | (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry))) | ||
734 | *head = 0; | ||
735 | |||
736 | return 1; | ||
737 | } | ||
738 | |||
739 | static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid, | ||
740 | unsigned long head) | ||
741 | { | ||
742 | unsigned long err; | ||
743 | |||
744 | err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head); | ||
745 | if (unlikely(err)) | ||
746 | return -EINVAL; | ||
747 | |||
748 | return 0; | ||
749 | } | ||
750 | |||
751 | static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid, | ||
752 | unsigned long msi, int is_msi64) | ||
753 | { | ||
754 | if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid, | ||
755 | (is_msi64 ? | ||
756 | HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32))) | ||
757 | return -ENXIO; | ||
758 | if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE)) | ||
759 | return -ENXIO; | ||
760 | if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID)) | ||
761 | return -ENXIO; | ||
762 | return 0; | ||
763 | } | ||
764 | |||
765 | static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi) | ||
766 | { | ||
767 | unsigned long err, msiqid; | ||
768 | |||
769 | err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid); | ||
770 | if (err) | ||
771 | return -ENXIO; | ||
772 | |||
773 | pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID); | ||
774 | |||
775 | return 0; | ||
776 | } | ||
777 | |||
778 | static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm) | ||
779 | { | ||
780 | unsigned long q_size, alloc_size, pages, order; | ||
781 | int i; | ||
782 | |||
783 | q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); | ||
784 | alloc_size = (pbm->msiq_num * q_size); | ||
785 | order = get_order(alloc_size); | ||
786 | pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order); | ||
787 | if (pages == 0UL) { | ||
788 | printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n", | ||
789 | order); | ||
790 | return -ENOMEM; | ||
791 | } | ||
792 | memset((char *)pages, 0, PAGE_SIZE << order); | ||
793 | pbm->msi_queues = (void *) pages; | ||
794 | |||
795 | for (i = 0; i < pbm->msiq_num; i++) { | ||
796 | unsigned long err, base = __pa(pages + (i * q_size)); | ||
797 | unsigned long ret1, ret2; | ||
798 | |||
799 | err = pci_sun4v_msiq_conf(pbm->devhandle, | ||
800 | pbm->msiq_first + i, | ||
801 | base, pbm->msiq_ent_count); | ||
802 | if (err) { | ||
803 | printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n", | ||
804 | err); | ||
805 | goto h_error; | ||
806 | } | ||
807 | |||
808 | err = pci_sun4v_msiq_info(pbm->devhandle, | ||
809 | pbm->msiq_first + i, | ||
810 | &ret1, &ret2); | ||
811 | if (err) { | ||
812 | printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n", | ||
813 | err); | ||
814 | goto h_error; | ||
815 | } | ||
816 | if (ret1 != base || ret2 != pbm->msiq_ent_count) { | ||
817 | printk(KERN_ERR "MSI: Bogus qconf " | ||
818 | "expected[%lx:%x] got[%lx:%lx]\n", | ||
819 | base, pbm->msiq_ent_count, | ||
820 | ret1, ret2); | ||
821 | goto h_error; | ||
822 | } | ||
823 | } | ||
824 | |||
825 | return 0; | ||
826 | |||
827 | h_error: | ||
828 | free_pages(pages, order); | ||
829 | return -EINVAL; | ||
830 | } | ||
831 | |||
832 | static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm) | ||
833 | { | ||
834 | unsigned long q_size, alloc_size, pages, order; | ||
835 | int i; | ||
836 | |||
837 | for (i = 0; i < pbm->msiq_num; i++) { | ||
838 | unsigned long msiqid = pbm->msiq_first + i; | ||
839 | |||
840 | (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0); | ||
841 | } | ||
842 | |||
843 | q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); | ||
844 | alloc_size = (pbm->msiq_num * q_size); | ||
845 | order = get_order(alloc_size); | ||
846 | |||
847 | pages = (unsigned long) pbm->msi_queues; | ||
848 | |||
849 | free_pages(pages, order); | ||
850 | |||
851 | pbm->msi_queues = NULL; | ||
852 | } | ||
853 | |||
854 | static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm, | ||
855 | unsigned long msiqid, | ||
856 | unsigned long devino) | ||
857 | { | ||
858 | unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino); | ||
859 | |||
860 | if (!virt_irq) | ||
861 | return -ENOMEM; | ||
862 | |||
863 | if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE)) | ||
864 | return -EINVAL; | ||
865 | if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) | ||
866 | return -EINVAL; | ||
867 | |||
868 | return virt_irq; | ||
869 | } | ||
870 | |||
871 | static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = { | ||
872 | .get_head = pci_sun4v_get_head, | ||
873 | .dequeue_msi = pci_sun4v_dequeue_msi, | ||
874 | .set_head = pci_sun4v_set_head, | ||
875 | .msi_setup = pci_sun4v_msi_setup, | ||
876 | .msi_teardown = pci_sun4v_msi_teardown, | ||
877 | .msiq_alloc = pci_sun4v_msiq_alloc, | ||
878 | .msiq_free = pci_sun4v_msiq_free, | ||
879 | .msiq_build_irq = pci_sun4v_msiq_build_irq, | ||
880 | }; | ||
881 | |||
882 | static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) | ||
883 | { | ||
884 | sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops); | ||
885 | } | ||
886 | #else /* CONFIG_PCI_MSI */ | ||
887 | static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) | ||
888 | { | ||
889 | } | ||
890 | #endif /* !(CONFIG_PCI_MSI) */ | ||
891 | |||
892 | static int __init pci_sun4v_pbm_init(struct pci_pbm_info *pbm, | ||
893 | struct of_device *op, u32 devhandle) | ||
894 | { | ||
895 | struct device_node *dp = op->node; | ||
896 | int err; | ||
897 | |||
898 | pbm->numa_node = of_node_to_nid(dp); | ||
899 | |||
900 | pbm->pci_ops = &sun4v_pci_ops; | ||
901 | pbm->config_space_reg_bits = 12; | ||
902 | |||
903 | pbm->index = pci_num_pbms++; | ||
904 | |||
905 | pbm->op = op; | ||
906 | |||
907 | pbm->devhandle = devhandle; | ||
908 | |||
909 | pbm->name = dp->full_name; | ||
910 | |||
911 | printk("%s: SUN4V PCI Bus Module\n", pbm->name); | ||
912 | printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node); | ||
913 | |||
914 | pci_determine_mem_io_space(pbm); | ||
915 | |||
916 | pci_get_pbm_props(pbm); | ||
917 | |||
918 | err = pci_sun4v_iommu_init(pbm); | ||
919 | if (err) | ||
920 | return err; | ||
921 | |||
922 | pci_sun4v_msi_init(pbm); | ||
923 | |||
924 | pci_sun4v_scan_bus(pbm, &op->dev); | ||
925 | |||
926 | pbm->next = pci_pbm_root; | ||
927 | pci_pbm_root = pbm; | ||
928 | |||
929 | return 0; | ||
930 | } | ||
931 | |||
932 | static int __devinit pci_sun4v_probe(struct of_device *op, | ||
933 | const struct of_device_id *match) | ||
934 | { | ||
935 | const struct linux_prom64_registers *regs; | ||
936 | static int hvapi_negotiated = 0; | ||
937 | struct pci_pbm_info *pbm; | ||
938 | struct device_node *dp; | ||
939 | struct iommu *iommu; | ||
940 | u32 devhandle; | ||
941 | int i, err; | ||
942 | |||
943 | dp = op->node; | ||
944 | |||
945 | if (!hvapi_negotiated++) { | ||
946 | err = sun4v_hvapi_register(HV_GRP_PCI, | ||
947 | vpci_major, | ||
948 | &vpci_minor); | ||
949 | |||
950 | if (err) { | ||
951 | printk(KERN_ERR PFX "Could not register hvapi, " | ||
952 | "err=%d\n", err); | ||
953 | return err; | ||
954 | } | ||
955 | printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n", | ||
956 | vpci_major, vpci_minor); | ||
957 | |||
958 | dma_ops = &sun4v_dma_ops; | ||
959 | } | ||
960 | |||
961 | regs = of_get_property(dp, "reg", NULL); | ||
962 | err = -ENODEV; | ||
963 | if (!regs) { | ||
964 | printk(KERN_ERR PFX "Could not find config registers\n"); | ||
965 | goto out_err; | ||
966 | } | ||
967 | devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff; | ||
968 | |||
969 | err = -ENOMEM; | ||
970 | if (!iommu_batch_initialized) { | ||
971 | for_each_possible_cpu(i) { | ||
972 | unsigned long page = get_zeroed_page(GFP_KERNEL); | ||
973 | |||
974 | if (!page) | ||
975 | goto out_err; | ||
976 | |||
977 | per_cpu(iommu_batch, i).pglist = (u64 *) page; | ||
978 | } | ||
979 | iommu_batch_initialized = 1; | ||
980 | } | ||
981 | |||
982 | pbm = kzalloc(sizeof(*pbm), GFP_KERNEL); | ||
983 | if (!pbm) { | ||
984 | printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n"); | ||
985 | goto out_err; | ||
986 | } | ||
987 | |||
988 | iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL); | ||
989 | if (!iommu) { | ||
990 | printk(KERN_ERR PFX "Could not allocate pbm iommu\n"); | ||
991 | goto out_free_controller; | ||
992 | } | ||
993 | |||
994 | pbm->iommu = iommu; | ||
995 | |||
996 | err = pci_sun4v_pbm_init(pbm, op, devhandle); | ||
997 | if (err) | ||
998 | goto out_free_iommu; | ||
999 | |||
1000 | dev_set_drvdata(&op->dev, pbm); | ||
1001 | |||
1002 | return 0; | ||
1003 | |||
1004 | out_free_iommu: | ||
1005 | kfree(pbm->iommu); | ||
1006 | |||
1007 | out_free_controller: | ||
1008 | kfree(pbm); | ||
1009 | |||
1010 | out_err: | ||
1011 | return err; | ||
1012 | } | ||
1013 | |||
1014 | static struct of_device_id __initdata pci_sun4v_match[] = { | ||
1015 | { | ||
1016 | .name = "pci", | ||
1017 | .compatible = "SUNW,sun4v-pci", | ||
1018 | }, | ||
1019 | {}, | ||
1020 | }; | ||
1021 | |||
1022 | static struct of_platform_driver pci_sun4v_driver = { | ||
1023 | .name = DRIVER_NAME, | ||
1024 | .match_table = pci_sun4v_match, | ||
1025 | .probe = pci_sun4v_probe, | ||
1026 | }; | ||
1027 | |||
1028 | static int __init pci_sun4v_init(void) | ||
1029 | { | ||
1030 | return of_register_driver(&pci_sun4v_driver, &of_bus_type); | ||
1031 | } | ||
1032 | |||
1033 | subsys_initcall(pci_sun4v_init); | ||
diff --git a/arch/sparc/kernel/pci_sun4v.h b/arch/sparc/kernel/pci_sun4v.h new file mode 100644 index 000000000000..8e9fc3a5b4f5 --- /dev/null +++ b/arch/sparc/kernel/pci_sun4v.h | |||
@@ -0,0 +1,92 @@ | |||
1 | /* pci_sun4v.h: SUN4V specific PCI controller support. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #ifndef _PCI_SUN4V_H | ||
7 | #define _PCI_SUN4V_H | ||
8 | |||
9 | extern long pci_sun4v_iommu_map(unsigned long devhandle, | ||
10 | unsigned long tsbid, | ||
11 | unsigned long num_ttes, | ||
12 | unsigned long io_attributes, | ||
13 | unsigned long io_page_list_pa); | ||
14 | extern unsigned long pci_sun4v_iommu_demap(unsigned long devhandle, | ||
15 | unsigned long tsbid, | ||
16 | unsigned long num_ttes); | ||
17 | extern unsigned long pci_sun4v_iommu_getmap(unsigned long devhandle, | ||
18 | unsigned long tsbid, | ||
19 | unsigned long *io_attributes, | ||
20 | unsigned long *real_address); | ||
21 | extern unsigned long pci_sun4v_config_get(unsigned long devhandle, | ||
22 | unsigned long pci_device, | ||
23 | unsigned long config_offset, | ||
24 | unsigned long size); | ||
25 | extern int pci_sun4v_config_put(unsigned long devhandle, | ||
26 | unsigned long pci_device, | ||
27 | unsigned long config_offset, | ||
28 | unsigned long size, | ||
29 | unsigned long data); | ||
30 | |||
31 | extern unsigned long pci_sun4v_msiq_conf(unsigned long devhandle, | ||
32 | unsigned long msiqid, | ||
33 | unsigned long msiq_paddr, | ||
34 | unsigned long num_entries); | ||
35 | extern unsigned long pci_sun4v_msiq_info(unsigned long devhandle, | ||
36 | unsigned long msiqid, | ||
37 | unsigned long *msiq_paddr, | ||
38 | unsigned long *num_entries); | ||
39 | extern unsigned long pci_sun4v_msiq_getvalid(unsigned long devhandle, | ||
40 | unsigned long msiqid, | ||
41 | unsigned long *valid); | ||
42 | extern unsigned long pci_sun4v_msiq_setvalid(unsigned long devhandle, | ||
43 | unsigned long msiqid, | ||
44 | unsigned long valid); | ||
45 | extern unsigned long pci_sun4v_msiq_getstate(unsigned long devhandle, | ||
46 | unsigned long msiqid, | ||
47 | unsigned long *state); | ||
48 | extern unsigned long pci_sun4v_msiq_setstate(unsigned long devhandle, | ||
49 | unsigned long msiqid, | ||
50 | unsigned long state); | ||
51 | extern unsigned long pci_sun4v_msiq_gethead(unsigned long devhandle, | ||
52 | unsigned long msiqid, | ||
53 | unsigned long *head); | ||
54 | extern unsigned long pci_sun4v_msiq_sethead(unsigned long devhandle, | ||
55 | unsigned long msiqid, | ||
56 | unsigned long head); | ||
57 | extern unsigned long pci_sun4v_msiq_gettail(unsigned long devhandle, | ||
58 | unsigned long msiqid, | ||
59 | unsigned long *head); | ||
60 | extern unsigned long pci_sun4v_msi_getvalid(unsigned long devhandle, | ||
61 | unsigned long msinum, | ||
62 | unsigned long *valid); | ||
63 | extern unsigned long pci_sun4v_msi_setvalid(unsigned long devhandle, | ||
64 | unsigned long msinum, | ||
65 | unsigned long valid); | ||
66 | extern unsigned long pci_sun4v_msi_getmsiq(unsigned long devhandle, | ||
67 | unsigned long msinum, | ||
68 | unsigned long *msiq); | ||
69 | extern unsigned long pci_sun4v_msi_setmsiq(unsigned long devhandle, | ||
70 | unsigned long msinum, | ||
71 | unsigned long msiq, | ||
72 | unsigned long msitype); | ||
73 | extern unsigned long pci_sun4v_msi_getstate(unsigned long devhandle, | ||
74 | unsigned long msinum, | ||
75 | unsigned long *state); | ||
76 | extern unsigned long pci_sun4v_msi_setstate(unsigned long devhandle, | ||
77 | unsigned long msinum, | ||
78 | unsigned long state); | ||
79 | extern unsigned long pci_sun4v_msg_getmsiq(unsigned long devhandle, | ||
80 | unsigned long msinum, | ||
81 | unsigned long *msiq); | ||
82 | extern unsigned long pci_sun4v_msg_setmsiq(unsigned long devhandle, | ||
83 | unsigned long msinum, | ||
84 | unsigned long msiq); | ||
85 | extern unsigned long pci_sun4v_msg_getvalid(unsigned long devhandle, | ||
86 | unsigned long msinum, | ||
87 | unsigned long *valid); | ||
88 | extern unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle, | ||
89 | unsigned long msinum, | ||
90 | unsigned long valid); | ||
91 | |||
92 | #endif /* !(_PCI_SUN4V_H) */ | ||
diff --git a/arch/sparc/kernel/pci_sun4v_asm.S b/arch/sparc/kernel/pci_sun4v_asm.S new file mode 100644 index 000000000000..e606d46c6815 --- /dev/null +++ b/arch/sparc/kernel/pci_sun4v_asm.S | |||
@@ -0,0 +1,362 @@ | |||
1 | /* pci_sun4v_asm: Hypervisor calls for PCI support. | ||
2 | * | ||
3 | * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <linux/linkage.h> | ||
7 | #include <asm/hypervisor.h> | ||
8 | |||
9 | /* %o0: devhandle | ||
10 | * %o1: tsbid | ||
11 | * %o2: num ttes | ||
12 | * %o3: io_attributes | ||
13 | * %o4: io_page_list phys address | ||
14 | * | ||
15 | * returns %o0: -status if status was non-zero, else | ||
16 | * %o0: num pages mapped | ||
17 | */ | ||
18 | ENTRY(pci_sun4v_iommu_map) | ||
19 | mov %o5, %g1 | ||
20 | mov HV_FAST_PCI_IOMMU_MAP, %o5 | ||
21 | ta HV_FAST_TRAP | ||
22 | brnz,pn %o0, 1f | ||
23 | sub %g0, %o0, %o0 | ||
24 | mov %o1, %o0 | ||
25 | 1: retl | ||
26 | nop | ||
27 | ENDPROC(pci_sun4v_iommu_map) | ||
28 | |||
29 | /* %o0: devhandle | ||
30 | * %o1: tsbid | ||
31 | * %o2: num ttes | ||
32 | * | ||
33 | * returns %o0: num ttes demapped | ||
34 | */ | ||
35 | ENTRY(pci_sun4v_iommu_demap) | ||
36 | mov HV_FAST_PCI_IOMMU_DEMAP, %o5 | ||
37 | ta HV_FAST_TRAP | ||
38 | retl | ||
39 | mov %o1, %o0 | ||
40 | ENDPROC(pci_sun4v_iommu_demap) | ||
41 | |||
42 | /* %o0: devhandle | ||
43 | * %o1: tsbid | ||
44 | * %o2: &io_attributes | ||
45 | * %o3: &real_address | ||
46 | * | ||
47 | * returns %o0: status | ||
48 | */ | ||
49 | ENTRY(pci_sun4v_iommu_getmap) | ||
50 | mov %o2, %o4 | ||
51 | mov HV_FAST_PCI_IOMMU_GETMAP, %o5 | ||
52 | ta HV_FAST_TRAP | ||
53 | stx %o1, [%o4] | ||
54 | stx %o2, [%o3] | ||
55 | retl | ||
56 | mov %o0, %o0 | ||
57 | ENDPROC(pci_sun4v_iommu_getmap) | ||
58 | |||
59 | /* %o0: devhandle | ||
60 | * %o1: pci_device | ||
61 | * %o2: pci_config_offset | ||
62 | * %o3: size | ||
63 | * | ||
64 | * returns %o0: data | ||
65 | * | ||
66 | * If there is an error, the data will be returned | ||
67 | * as all 1's. | ||
68 | */ | ||
69 | ENTRY(pci_sun4v_config_get) | ||
70 | mov HV_FAST_PCI_CONFIG_GET, %o5 | ||
71 | ta HV_FAST_TRAP | ||
72 | brnz,a,pn %o1, 1f | ||
73 | mov -1, %o2 | ||
74 | 1: retl | ||
75 | mov %o2, %o0 | ||
76 | ENDPROC(pci_sun4v_config_get) | ||
77 | |||
78 | /* %o0: devhandle | ||
79 | * %o1: pci_device | ||
80 | * %o2: pci_config_offset | ||
81 | * %o3: size | ||
82 | * %o4: data | ||
83 | * | ||
84 | * returns %o0: status | ||
85 | * | ||
86 | * status will be zero if the operation completed | ||
87 | * successfully, else -1 if not | ||
88 | */ | ||
89 | ENTRY(pci_sun4v_config_put) | ||
90 | mov HV_FAST_PCI_CONFIG_PUT, %o5 | ||
91 | ta HV_FAST_TRAP | ||
92 | brnz,a,pn %o1, 1f | ||
93 | mov -1, %o1 | ||
94 | 1: retl | ||
95 | mov %o1, %o0 | ||
96 | ENDPROC(pci_sun4v_config_put) | ||
97 | |||
98 | /* %o0: devhandle | ||
99 | * %o1: msiqid | ||
100 | * %o2: msiq phys address | ||
101 | * %o3: num entries | ||
102 | * | ||
103 | * returns %o0: status | ||
104 | * | ||
105 | * status will be zero if the operation completed | ||
106 | * successfully, else -1 if not | ||
107 | */ | ||
108 | ENTRY(pci_sun4v_msiq_conf) | ||
109 | mov HV_FAST_PCI_MSIQ_CONF, %o5 | ||
110 | ta HV_FAST_TRAP | ||
111 | retl | ||
112 | mov %o0, %o0 | ||
113 | ENDPROC(pci_sun4v_msiq_conf) | ||
114 | |||
115 | /* %o0: devhandle | ||
116 | * %o1: msiqid | ||
117 | * %o2: &msiq_phys_addr | ||
118 | * %o3: &msiq_num_entries | ||
119 | * | ||
120 | * returns %o0: status | ||
121 | */ | ||
122 | ENTRY(pci_sun4v_msiq_info) | ||
123 | mov %o2, %o4 | ||
124 | mov HV_FAST_PCI_MSIQ_INFO, %o5 | ||
125 | ta HV_FAST_TRAP | ||
126 | stx %o1, [%o4] | ||
127 | stx %o2, [%o3] | ||
128 | retl | ||
129 | mov %o0, %o0 | ||
130 | ENDPROC(pci_sun4v_msiq_info) | ||
131 | |||
132 | /* %o0: devhandle | ||
133 | * %o1: msiqid | ||
134 | * %o2: &valid | ||
135 | * | ||
136 | * returns %o0: status | ||
137 | */ | ||
138 | ENTRY(pci_sun4v_msiq_getvalid) | ||
139 | mov HV_FAST_PCI_MSIQ_GETVALID, %o5 | ||
140 | ta HV_FAST_TRAP | ||
141 | stx %o1, [%o2] | ||
142 | retl | ||
143 | mov %o0, %o0 | ||
144 | ENDPROC(pci_sun4v_msiq_getvalid) | ||
145 | |||
146 | /* %o0: devhandle | ||
147 | * %o1: msiqid | ||
148 | * %o2: valid | ||
149 | * | ||
150 | * returns %o0: status | ||
151 | */ | ||
152 | ENTRY(pci_sun4v_msiq_setvalid) | ||
153 | mov HV_FAST_PCI_MSIQ_SETVALID, %o5 | ||
154 | ta HV_FAST_TRAP | ||
155 | retl | ||
156 | mov %o0, %o0 | ||
157 | ENDPROC(pci_sun4v_msiq_setvalid) | ||
158 | |||
159 | /* %o0: devhandle | ||
160 | * %o1: msiqid | ||
161 | * %o2: &state | ||
162 | * | ||
163 | * returns %o0: status | ||
164 | */ | ||
165 | ENTRY(pci_sun4v_msiq_getstate) | ||
166 | mov HV_FAST_PCI_MSIQ_GETSTATE, %o5 | ||
167 | ta HV_FAST_TRAP | ||
168 | stx %o1, [%o2] | ||
169 | retl | ||
170 | mov %o0, %o0 | ||
171 | ENDPROC(pci_sun4v_msiq_getstate) | ||
172 | |||
173 | /* %o0: devhandle | ||
174 | * %o1: msiqid | ||
175 | * %o2: state | ||
176 | * | ||
177 | * returns %o0: status | ||
178 | */ | ||
179 | ENTRY(pci_sun4v_msiq_setstate) | ||
180 | mov HV_FAST_PCI_MSIQ_SETSTATE, %o5 | ||
181 | ta HV_FAST_TRAP | ||
182 | retl | ||
183 | mov %o0, %o0 | ||
184 | ENDPROC(pci_sun4v_msiq_setstate) | ||
185 | |||
186 | /* %o0: devhandle | ||
187 | * %o1: msiqid | ||
188 | * %o2: &head | ||
189 | * | ||
190 | * returns %o0: status | ||
191 | */ | ||
192 | ENTRY(pci_sun4v_msiq_gethead) | ||
193 | mov HV_FAST_PCI_MSIQ_GETHEAD, %o5 | ||
194 | ta HV_FAST_TRAP | ||
195 | stx %o1, [%o2] | ||
196 | retl | ||
197 | mov %o0, %o0 | ||
198 | ENDPROC(pci_sun4v_msiq_gethead) | ||
199 | |||
200 | /* %o0: devhandle | ||
201 | * %o1: msiqid | ||
202 | * %o2: head | ||
203 | * | ||
204 | * returns %o0: status | ||
205 | */ | ||
206 | ENTRY(pci_sun4v_msiq_sethead) | ||
207 | mov HV_FAST_PCI_MSIQ_SETHEAD, %o5 | ||
208 | ta HV_FAST_TRAP | ||
209 | retl | ||
210 | mov %o0, %o0 | ||
211 | ENDPROC(pci_sun4v_msiq_sethead) | ||
212 | |||
213 | /* %o0: devhandle | ||
214 | * %o1: msiqid | ||
215 | * %o2: &tail | ||
216 | * | ||
217 | * returns %o0: status | ||
218 | */ | ||
219 | ENTRY(pci_sun4v_msiq_gettail) | ||
220 | mov HV_FAST_PCI_MSIQ_GETTAIL, %o5 | ||
221 | ta HV_FAST_TRAP | ||
222 | stx %o1, [%o2] | ||
223 | retl | ||
224 | mov %o0, %o0 | ||
225 | ENDPROC(pci_sun4v_msiq_gettail) | ||
226 | |||
227 | /* %o0: devhandle | ||
228 | * %o1: msinum | ||
229 | * %o2: &valid | ||
230 | * | ||
231 | * returns %o0: status | ||
232 | */ | ||
233 | ENTRY(pci_sun4v_msi_getvalid) | ||
234 | mov HV_FAST_PCI_MSI_GETVALID, %o5 | ||
235 | ta HV_FAST_TRAP | ||
236 | stx %o1, [%o2] | ||
237 | retl | ||
238 | mov %o0, %o0 | ||
239 | ENDPROC(pci_sun4v_msi_getvalid) | ||
240 | |||
241 | /* %o0: devhandle | ||
242 | * %o1: msinum | ||
243 | * %o2: valid | ||
244 | * | ||
245 | * returns %o0: status | ||
246 | */ | ||
247 | ENTRY(pci_sun4v_msi_setvalid) | ||
248 | mov HV_FAST_PCI_MSI_SETVALID, %o5 | ||
249 | ta HV_FAST_TRAP | ||
250 | retl | ||
251 | mov %o0, %o0 | ||
252 | ENDPROC(pci_sun4v_msi_setvalid) | ||
253 | |||
254 | /* %o0: devhandle | ||
255 | * %o1: msinum | ||
256 | * %o2: &msiq | ||
257 | * | ||
258 | * returns %o0: status | ||
259 | */ | ||
260 | ENTRY(pci_sun4v_msi_getmsiq) | ||
261 | mov HV_FAST_PCI_MSI_GETMSIQ, %o5 | ||
262 | ta HV_FAST_TRAP | ||
263 | stx %o1, [%o2] | ||
264 | retl | ||
265 | mov %o0, %o0 | ||
266 | ENDPROC(pci_sun4v_msi_getmsiq) | ||
267 | |||
268 | /* %o0: devhandle | ||
269 | * %o1: msinum | ||
270 | * %o2: msitype | ||
271 | * %o3: msiq | ||
272 | * | ||
273 | * returns %o0: status | ||
274 | */ | ||
275 | ENTRY(pci_sun4v_msi_setmsiq) | ||
276 | mov HV_FAST_PCI_MSI_SETMSIQ, %o5 | ||
277 | ta HV_FAST_TRAP | ||
278 | retl | ||
279 | mov %o0, %o0 | ||
280 | ENDPROC(pci_sun4v_msi_setmsiq) | ||
281 | |||
282 | /* %o0: devhandle | ||
283 | * %o1: msinum | ||
284 | * %o2: &state | ||
285 | * | ||
286 | * returns %o0: status | ||
287 | */ | ||
288 | ENTRY(pci_sun4v_msi_getstate) | ||
289 | mov HV_FAST_PCI_MSI_GETSTATE, %o5 | ||
290 | ta HV_FAST_TRAP | ||
291 | stx %o1, [%o2] | ||
292 | retl | ||
293 | mov %o0, %o0 | ||
294 | ENDPROC(pci_sun4v_msi_getstate) | ||
295 | |||
296 | /* %o0: devhandle | ||
297 | * %o1: msinum | ||
298 | * %o2: state | ||
299 | * | ||
300 | * returns %o0: status | ||
301 | */ | ||
302 | ENTRY(pci_sun4v_msi_setstate) | ||
303 | mov HV_FAST_PCI_MSI_SETSTATE, %o5 | ||
304 | ta HV_FAST_TRAP | ||
305 | retl | ||
306 | mov %o0, %o0 | ||
307 | ENDPROC(pci_sun4v_msi_setstate) | ||
308 | |||
309 | /* %o0: devhandle | ||
310 | * %o1: msinum | ||
311 | * %o2: &msiq | ||
312 | * | ||
313 | * returns %o0: status | ||
314 | */ | ||
315 | ENTRY(pci_sun4v_msg_getmsiq) | ||
316 | mov HV_FAST_PCI_MSG_GETMSIQ, %o5 | ||
317 | ta HV_FAST_TRAP | ||
318 | stx %o1, [%o2] | ||
319 | retl | ||
320 | mov %o0, %o0 | ||
321 | ENDPROC(pci_sun4v_msg_getmsiq) | ||
322 | |||
323 | /* %o0: devhandle | ||
324 | * %o1: msinum | ||
325 | * %o2: msiq | ||
326 | * | ||
327 | * returns %o0: status | ||
328 | */ | ||
329 | ENTRY(pci_sun4v_msg_setmsiq) | ||
330 | mov HV_FAST_PCI_MSG_SETMSIQ, %o5 | ||
331 | ta HV_FAST_TRAP | ||
332 | retl | ||
333 | mov %o0, %o0 | ||
334 | ENDPROC(pci_sun4v_msg_setmsiq) | ||
335 | |||
336 | /* %o0: devhandle | ||
337 | * %o1: msinum | ||
338 | * %o2: &valid | ||
339 | * | ||
340 | * returns %o0: status | ||
341 | */ | ||
342 | ENTRY(pci_sun4v_msg_getvalid) | ||
343 | mov HV_FAST_PCI_MSG_GETVALID, %o5 | ||
344 | ta HV_FAST_TRAP | ||
345 | stx %o1, [%o2] | ||
346 | retl | ||
347 | mov %o0, %o0 | ||
348 | ENDPROC(pci_sun4v_msg_getvalid) | ||
349 | |||
350 | /* %o0: devhandle | ||
351 | * %o1: msinum | ||
352 | * %o2: valid | ||
353 | * | ||
354 | * returns %o0: status | ||
355 | */ | ||
356 | ENTRY(pci_sun4v_msg_setvalid) | ||
357 | mov HV_FAST_PCI_MSG_SETVALID, %o5 | ||
358 | ta HV_FAST_TRAP | ||
359 | retl | ||
360 | mov %o0, %o0 | ||
361 | ENDPROC(pci_sun4v_msg_setvalid) | ||
362 | |||
diff --git a/arch/sparc/kernel/power.c b/arch/sparc/kernel/power.c new file mode 100644 index 000000000000..076cad7f9757 --- /dev/null +++ b/arch/sparc/kernel/power.c | |||
@@ -0,0 +1,75 @@ | |||
1 | /* power.c: Power management driver. | ||
2 | * | ||
3 | * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/interrupt.h> | ||
10 | #include <linux/reboot.h> | ||
11 | #include <linux/of_device.h> | ||
12 | |||
13 | #include <asm/prom.h> | ||
14 | #include <asm/io.h> | ||
15 | |||
16 | static void __iomem *power_reg; | ||
17 | |||
18 | static irqreturn_t power_handler(int irq, void *dev_id) | ||
19 | { | ||
20 | orderly_poweroff(true); | ||
21 | |||
22 | /* FIXME: Check registers for status... */ | ||
23 | return IRQ_HANDLED; | ||
24 | } | ||
25 | |||
26 | static int __init has_button_interrupt(unsigned int irq, struct device_node *dp) | ||
27 | { | ||
28 | if (irq == 0xffffffff) | ||
29 | return 0; | ||
30 | if (!of_find_property(dp, "button", NULL)) | ||
31 | return 0; | ||
32 | |||
33 | return 1; | ||
34 | } | ||
35 | |||
36 | static int __devinit power_probe(struct of_device *op, const struct of_device_id *match) | ||
37 | { | ||
38 | struct resource *res = &op->resource[0]; | ||
39 | unsigned int irq= op->irqs[0]; | ||
40 | |||
41 | power_reg = of_ioremap(res, 0, 0x4, "power"); | ||
42 | |||
43 | printk(KERN_INFO "%s: Control reg at %lx\n", | ||
44 | op->node->name, res->start); | ||
45 | |||
46 | if (has_button_interrupt(irq, op->node)) { | ||
47 | if (request_irq(irq, | ||
48 | power_handler, 0, "power", NULL) < 0) | ||
49 | printk(KERN_ERR "power: Cannot setup IRQ handler.\n"); | ||
50 | } | ||
51 | |||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | static struct of_device_id __initdata power_match[] = { | ||
56 | { | ||
57 | .name = "power", | ||
58 | }, | ||
59 | {}, | ||
60 | }; | ||
61 | |||
62 | static struct of_platform_driver power_driver = { | ||
63 | .match_table = power_match, | ||
64 | .probe = power_probe, | ||
65 | .driver = { | ||
66 | .name = "power", | ||
67 | }, | ||
68 | }; | ||
69 | |||
70 | static int __init power_init(void) | ||
71 | { | ||
72 | return of_register_driver(&power_driver, &of_platform_bus_type); | ||
73 | } | ||
74 | |||
75 | device_initcall(power_init); | ||
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c new file mode 100644 index 000000000000..d5e2acef9877 --- /dev/null +++ b/arch/sparc/kernel/process_64.c | |||
@@ -0,0 +1,812 @@ | |||
1 | /* arch/sparc64/kernel/process.c | ||
2 | * | ||
3 | * Copyright (C) 1995, 1996, 2008 David S. Miller (davem@davemloft.net) | ||
4 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) | ||
5 | * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
6 | */ | ||
7 | |||
8 | /* | ||
9 | * This file handles the architecture-dependent parts of process handling.. | ||
10 | */ | ||
11 | |||
12 | #include <stdarg.h> | ||
13 | |||
14 | #include <linux/errno.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/fs.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <linux/stddef.h> | ||
22 | #include <linux/ptrace.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/user.h> | ||
25 | #include <linux/delay.h> | ||
26 | #include <linux/compat.h> | ||
27 | #include <linux/tick.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/cpu.h> | ||
30 | #include <linux/elfcore.h> | ||
31 | #include <linux/sysrq.h> | ||
32 | |||
33 | #include <asm/uaccess.h> | ||
34 | #include <asm/system.h> | ||
35 | #include <asm/page.h> | ||
36 | #include <asm/pgalloc.h> | ||
37 | #include <asm/pgtable.h> | ||
38 | #include <asm/processor.h> | ||
39 | #include <asm/pstate.h> | ||
40 | #include <asm/elf.h> | ||
41 | #include <asm/fpumacro.h> | ||
42 | #include <asm/head.h> | ||
43 | #include <asm/cpudata.h> | ||
44 | #include <asm/mmu_context.h> | ||
45 | #include <asm/unistd.h> | ||
46 | #include <asm/hypervisor.h> | ||
47 | #include <asm/syscalls.h> | ||
48 | #include <asm/irq_regs.h> | ||
49 | #include <asm/smp.h> | ||
50 | |||
51 | #include "kstack.h" | ||
52 | |||
53 | static void sparc64_yield(int cpu) | ||
54 | { | ||
55 | if (tlb_type != hypervisor) | ||
56 | return; | ||
57 | |||
58 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
59 | smp_mb__after_clear_bit(); | ||
60 | |||
61 | while (!need_resched() && !cpu_is_offline(cpu)) { | ||
62 | unsigned long pstate; | ||
63 | |||
64 | /* Disable interrupts. */ | ||
65 | __asm__ __volatile__( | ||
66 | "rdpr %%pstate, %0\n\t" | ||
67 | "andn %0, %1, %0\n\t" | ||
68 | "wrpr %0, %%g0, %%pstate" | ||
69 | : "=&r" (pstate) | ||
70 | : "i" (PSTATE_IE)); | ||
71 | |||
72 | if (!need_resched() && !cpu_is_offline(cpu)) | ||
73 | sun4v_cpu_yield(); | ||
74 | |||
75 | /* Re-enable interrupts. */ | ||
76 | __asm__ __volatile__( | ||
77 | "rdpr %%pstate, %0\n\t" | ||
78 | "or %0, %1, %0\n\t" | ||
79 | "wrpr %0, %%g0, %%pstate" | ||
80 | : "=&r" (pstate) | ||
81 | : "i" (PSTATE_IE)); | ||
82 | } | ||
83 | |||
84 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
85 | } | ||
86 | |||
87 | /* The idle loop on sparc64. */ | ||
88 | void cpu_idle(void) | ||
89 | { | ||
90 | int cpu = smp_processor_id(); | ||
91 | |||
92 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
93 | |||
94 | while(1) { | ||
95 | tick_nohz_stop_sched_tick(1); | ||
96 | |||
97 | while (!need_resched() && !cpu_is_offline(cpu)) | ||
98 | sparc64_yield(cpu); | ||
99 | |||
100 | tick_nohz_restart_sched_tick(); | ||
101 | |||
102 | preempt_enable_no_resched(); | ||
103 | |||
104 | #ifdef CONFIG_HOTPLUG_CPU | ||
105 | if (cpu_is_offline(cpu)) | ||
106 | cpu_play_dead(); | ||
107 | #endif | ||
108 | |||
109 | schedule(); | ||
110 | preempt_disable(); | ||
111 | } | ||
112 | } | ||
113 | |||
114 | #ifdef CONFIG_COMPAT | ||
115 | static void show_regwindow32(struct pt_regs *regs) | ||
116 | { | ||
117 | struct reg_window32 __user *rw; | ||
118 | struct reg_window32 r_w; | ||
119 | mm_segment_t old_fs; | ||
120 | |||
121 | __asm__ __volatile__ ("flushw"); | ||
122 | rw = compat_ptr((unsigned)regs->u_regs[14]); | ||
123 | old_fs = get_fs(); | ||
124 | set_fs (USER_DS); | ||
125 | if (copy_from_user (&r_w, rw, sizeof(r_w))) { | ||
126 | set_fs (old_fs); | ||
127 | return; | ||
128 | } | ||
129 | |||
130 | set_fs (old_fs); | ||
131 | printk("l0: %08x l1: %08x l2: %08x l3: %08x " | ||
132 | "l4: %08x l5: %08x l6: %08x l7: %08x\n", | ||
133 | r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3], | ||
134 | r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]); | ||
135 | printk("i0: %08x i1: %08x i2: %08x i3: %08x " | ||
136 | "i4: %08x i5: %08x i6: %08x i7: %08x\n", | ||
137 | r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3], | ||
138 | r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]); | ||
139 | } | ||
140 | #else | ||
141 | #define show_regwindow32(regs) do { } while (0) | ||
142 | #endif | ||
143 | |||
144 | static void show_regwindow(struct pt_regs *regs) | ||
145 | { | ||
146 | struct reg_window __user *rw; | ||
147 | struct reg_window *rwk; | ||
148 | struct reg_window r_w; | ||
149 | mm_segment_t old_fs; | ||
150 | |||
151 | if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) { | ||
152 | __asm__ __volatile__ ("flushw"); | ||
153 | rw = (struct reg_window __user *) | ||
154 | (regs->u_regs[14] + STACK_BIAS); | ||
155 | rwk = (struct reg_window *) | ||
156 | (regs->u_regs[14] + STACK_BIAS); | ||
157 | if (!(regs->tstate & TSTATE_PRIV)) { | ||
158 | old_fs = get_fs(); | ||
159 | set_fs (USER_DS); | ||
160 | if (copy_from_user (&r_w, rw, sizeof(r_w))) { | ||
161 | set_fs (old_fs); | ||
162 | return; | ||
163 | } | ||
164 | rwk = &r_w; | ||
165 | set_fs (old_fs); | ||
166 | } | ||
167 | } else { | ||
168 | show_regwindow32(regs); | ||
169 | return; | ||
170 | } | ||
171 | printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n", | ||
172 | rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]); | ||
173 | printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n", | ||
174 | rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]); | ||
175 | printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n", | ||
176 | rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]); | ||
177 | printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n", | ||
178 | rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]); | ||
179 | if (regs->tstate & TSTATE_PRIV) | ||
180 | printk("I7: <%pS>\n", (void *) rwk->ins[7]); | ||
181 | } | ||
182 | |||
183 | void show_regs(struct pt_regs *regs) | ||
184 | { | ||
185 | printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate, | ||
186 | regs->tpc, regs->tnpc, regs->y, print_tainted()); | ||
187 | printk("TPC: <%pS>\n", (void *) regs->tpc); | ||
188 | printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n", | ||
189 | regs->u_regs[0], regs->u_regs[1], regs->u_regs[2], | ||
190 | regs->u_regs[3]); | ||
191 | printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n", | ||
192 | regs->u_regs[4], regs->u_regs[5], regs->u_regs[6], | ||
193 | regs->u_regs[7]); | ||
194 | printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n", | ||
195 | regs->u_regs[8], regs->u_regs[9], regs->u_regs[10], | ||
196 | regs->u_regs[11]); | ||
197 | printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n", | ||
198 | regs->u_regs[12], regs->u_regs[13], regs->u_regs[14], | ||
199 | regs->u_regs[15]); | ||
200 | printk("RPC: <%pS>\n", (void *) regs->u_regs[15]); | ||
201 | show_regwindow(regs); | ||
202 | } | ||
203 | |||
204 | struct global_reg_snapshot global_reg_snapshot[NR_CPUS]; | ||
205 | static DEFINE_SPINLOCK(global_reg_snapshot_lock); | ||
206 | |||
207 | static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs, | ||
208 | int this_cpu) | ||
209 | { | ||
210 | flushw_all(); | ||
211 | |||
212 | global_reg_snapshot[this_cpu].tstate = regs->tstate; | ||
213 | global_reg_snapshot[this_cpu].tpc = regs->tpc; | ||
214 | global_reg_snapshot[this_cpu].tnpc = regs->tnpc; | ||
215 | global_reg_snapshot[this_cpu].o7 = regs->u_regs[UREG_I7]; | ||
216 | |||
217 | if (regs->tstate & TSTATE_PRIV) { | ||
218 | struct reg_window *rw; | ||
219 | |||
220 | rw = (struct reg_window *) | ||
221 | (regs->u_regs[UREG_FP] + STACK_BIAS); | ||
222 | if (kstack_valid(tp, (unsigned long) rw)) { | ||
223 | global_reg_snapshot[this_cpu].i7 = rw->ins[7]; | ||
224 | rw = (struct reg_window *) | ||
225 | (rw->ins[6] + STACK_BIAS); | ||
226 | if (kstack_valid(tp, (unsigned long) rw)) | ||
227 | global_reg_snapshot[this_cpu].rpc = rw->ins[7]; | ||
228 | } | ||
229 | } else { | ||
230 | global_reg_snapshot[this_cpu].i7 = 0; | ||
231 | global_reg_snapshot[this_cpu].rpc = 0; | ||
232 | } | ||
233 | global_reg_snapshot[this_cpu].thread = tp; | ||
234 | } | ||
235 | |||
236 | /* In order to avoid hangs we do not try to synchronize with the | ||
237 | * global register dump client cpus. The last store they make is to | ||
238 | * the thread pointer, so do a short poll waiting for that to become | ||
239 | * non-NULL. | ||
240 | */ | ||
241 | static void __global_reg_poll(struct global_reg_snapshot *gp) | ||
242 | { | ||
243 | int limit = 0; | ||
244 | |||
245 | while (!gp->thread && ++limit < 100) { | ||
246 | barrier(); | ||
247 | udelay(1); | ||
248 | } | ||
249 | } | ||
250 | |||
251 | void __trigger_all_cpu_backtrace(void) | ||
252 | { | ||
253 | struct thread_info *tp = current_thread_info(); | ||
254 | struct pt_regs *regs = get_irq_regs(); | ||
255 | unsigned long flags; | ||
256 | int this_cpu, cpu; | ||
257 | |||
258 | if (!regs) | ||
259 | regs = tp->kregs; | ||
260 | |||
261 | spin_lock_irqsave(&global_reg_snapshot_lock, flags); | ||
262 | |||
263 | memset(global_reg_snapshot, 0, sizeof(global_reg_snapshot)); | ||
264 | |||
265 | this_cpu = raw_smp_processor_id(); | ||
266 | |||
267 | __global_reg_self(tp, regs, this_cpu); | ||
268 | |||
269 | smp_fetch_global_regs(); | ||
270 | |||
271 | for_each_online_cpu(cpu) { | ||
272 | struct global_reg_snapshot *gp = &global_reg_snapshot[cpu]; | ||
273 | |||
274 | __global_reg_poll(gp); | ||
275 | |||
276 | tp = gp->thread; | ||
277 | printk("%c CPU[%3d]: TSTATE[%016lx] TPC[%016lx] TNPC[%016lx] TASK[%s:%d]\n", | ||
278 | (cpu == this_cpu ? '*' : ' '), cpu, | ||
279 | gp->tstate, gp->tpc, gp->tnpc, | ||
280 | ((tp && tp->task) ? tp->task->comm : "NULL"), | ||
281 | ((tp && tp->task) ? tp->task->pid : -1)); | ||
282 | |||
283 | if (gp->tstate & TSTATE_PRIV) { | ||
284 | printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n", | ||
285 | (void *) gp->tpc, | ||
286 | (void *) gp->o7, | ||
287 | (void *) gp->i7, | ||
288 | (void *) gp->rpc); | ||
289 | } else { | ||
290 | printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n", | ||
291 | gp->tpc, gp->o7, gp->i7, gp->rpc); | ||
292 | } | ||
293 | } | ||
294 | |||
295 | memset(global_reg_snapshot, 0, sizeof(global_reg_snapshot)); | ||
296 | |||
297 | spin_unlock_irqrestore(&global_reg_snapshot_lock, flags); | ||
298 | } | ||
299 | |||
300 | #ifdef CONFIG_MAGIC_SYSRQ | ||
301 | |||
302 | static void sysrq_handle_globreg(int key, struct tty_struct *tty) | ||
303 | { | ||
304 | __trigger_all_cpu_backtrace(); | ||
305 | } | ||
306 | |||
307 | static struct sysrq_key_op sparc_globalreg_op = { | ||
308 | .handler = sysrq_handle_globreg, | ||
309 | .help_msg = "Globalregs", | ||
310 | .action_msg = "Show Global CPU Regs", | ||
311 | }; | ||
312 | |||
313 | static int __init sparc_globreg_init(void) | ||
314 | { | ||
315 | return register_sysrq_key('y', &sparc_globalreg_op); | ||
316 | } | ||
317 | |||
318 | core_initcall(sparc_globreg_init); | ||
319 | |||
320 | #endif | ||
321 | |||
322 | unsigned long thread_saved_pc(struct task_struct *tsk) | ||
323 | { | ||
324 | struct thread_info *ti = task_thread_info(tsk); | ||
325 | unsigned long ret = 0xdeadbeefUL; | ||
326 | |||
327 | if (ti && ti->ksp) { | ||
328 | unsigned long *sp; | ||
329 | sp = (unsigned long *)(ti->ksp + STACK_BIAS); | ||
330 | if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL && | ||
331 | sp[14]) { | ||
332 | unsigned long *fp; | ||
333 | fp = (unsigned long *)(sp[14] + STACK_BIAS); | ||
334 | if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL) | ||
335 | ret = fp[15]; | ||
336 | } | ||
337 | } | ||
338 | return ret; | ||
339 | } | ||
340 | |||
341 | /* Free current thread data structures etc.. */ | ||
342 | void exit_thread(void) | ||
343 | { | ||
344 | struct thread_info *t = current_thread_info(); | ||
345 | |||
346 | if (t->utraps) { | ||
347 | if (t->utraps[0] < 2) | ||
348 | kfree (t->utraps); | ||
349 | else | ||
350 | t->utraps[0]--; | ||
351 | } | ||
352 | |||
353 | if (test_and_clear_thread_flag(TIF_PERFCTR)) { | ||
354 | t->user_cntd0 = t->user_cntd1 = NULL; | ||
355 | t->pcr_reg = 0; | ||
356 | write_pcr(0); | ||
357 | } | ||
358 | } | ||
359 | |||
360 | void flush_thread(void) | ||
361 | { | ||
362 | struct thread_info *t = current_thread_info(); | ||
363 | struct mm_struct *mm; | ||
364 | |||
365 | if (test_ti_thread_flag(t, TIF_ABI_PENDING)) { | ||
366 | clear_ti_thread_flag(t, TIF_ABI_PENDING); | ||
367 | if (test_ti_thread_flag(t, TIF_32BIT)) | ||
368 | clear_ti_thread_flag(t, TIF_32BIT); | ||
369 | else | ||
370 | set_ti_thread_flag(t, TIF_32BIT); | ||
371 | } | ||
372 | |||
373 | mm = t->task->mm; | ||
374 | if (mm) | ||
375 | tsb_context_switch(mm); | ||
376 | |||
377 | set_thread_wsaved(0); | ||
378 | |||
379 | /* Turn off performance counters if on. */ | ||
380 | if (test_and_clear_thread_flag(TIF_PERFCTR)) { | ||
381 | t->user_cntd0 = t->user_cntd1 = NULL; | ||
382 | t->pcr_reg = 0; | ||
383 | write_pcr(0); | ||
384 | } | ||
385 | |||
386 | /* Clear FPU register state. */ | ||
387 | t->fpsaved[0] = 0; | ||
388 | |||
389 | if (get_thread_current_ds() != ASI_AIUS) | ||
390 | set_fs(USER_DS); | ||
391 | } | ||
392 | |||
393 | /* It's a bit more tricky when 64-bit tasks are involved... */ | ||
394 | static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) | ||
395 | { | ||
396 | unsigned long fp, distance, rval; | ||
397 | |||
398 | if (!(test_thread_flag(TIF_32BIT))) { | ||
399 | csp += STACK_BIAS; | ||
400 | psp += STACK_BIAS; | ||
401 | __get_user(fp, &(((struct reg_window __user *)psp)->ins[6])); | ||
402 | fp += STACK_BIAS; | ||
403 | } else | ||
404 | __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6])); | ||
405 | |||
406 | /* Now 8-byte align the stack as this is mandatory in the | ||
407 | * Sparc ABI due to how register windows work. This hides | ||
408 | * the restriction from thread libraries etc. -DaveM | ||
409 | */ | ||
410 | csp &= ~7UL; | ||
411 | |||
412 | distance = fp - psp; | ||
413 | rval = (csp - distance); | ||
414 | if (copy_in_user((void __user *) rval, (void __user *) psp, distance)) | ||
415 | rval = 0; | ||
416 | else if (test_thread_flag(TIF_32BIT)) { | ||
417 | if (put_user(((u32)csp), | ||
418 | &(((struct reg_window32 __user *)rval)->ins[6]))) | ||
419 | rval = 0; | ||
420 | } else { | ||
421 | if (put_user(((u64)csp - STACK_BIAS), | ||
422 | &(((struct reg_window __user *)rval)->ins[6]))) | ||
423 | rval = 0; | ||
424 | else | ||
425 | rval = rval - STACK_BIAS; | ||
426 | } | ||
427 | |||
428 | return rval; | ||
429 | } | ||
430 | |||
431 | /* Standard stuff. */ | ||
432 | static inline void shift_window_buffer(int first_win, int last_win, | ||
433 | struct thread_info *t) | ||
434 | { | ||
435 | int i; | ||
436 | |||
437 | for (i = first_win; i < last_win; i++) { | ||
438 | t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1]; | ||
439 | memcpy(&t->reg_window[i], &t->reg_window[i+1], | ||
440 | sizeof(struct reg_window)); | ||
441 | } | ||
442 | } | ||
443 | |||
444 | void synchronize_user_stack(void) | ||
445 | { | ||
446 | struct thread_info *t = current_thread_info(); | ||
447 | unsigned long window; | ||
448 | |||
449 | flush_user_windows(); | ||
450 | if ((window = get_thread_wsaved()) != 0) { | ||
451 | int winsize = sizeof(struct reg_window); | ||
452 | int bias = 0; | ||
453 | |||
454 | if (test_thread_flag(TIF_32BIT)) | ||
455 | winsize = sizeof(struct reg_window32); | ||
456 | else | ||
457 | bias = STACK_BIAS; | ||
458 | |||
459 | window -= 1; | ||
460 | do { | ||
461 | unsigned long sp = (t->rwbuf_stkptrs[window] + bias); | ||
462 | struct reg_window *rwin = &t->reg_window[window]; | ||
463 | |||
464 | if (!copy_to_user((char __user *)sp, rwin, winsize)) { | ||
465 | shift_window_buffer(window, get_thread_wsaved() - 1, t); | ||
466 | set_thread_wsaved(get_thread_wsaved() - 1); | ||
467 | } | ||
468 | } while (window--); | ||
469 | } | ||
470 | } | ||
471 | |||
472 | static void stack_unaligned(unsigned long sp) | ||
473 | { | ||
474 | siginfo_t info; | ||
475 | |||
476 | info.si_signo = SIGBUS; | ||
477 | info.si_errno = 0; | ||
478 | info.si_code = BUS_ADRALN; | ||
479 | info.si_addr = (void __user *) sp; | ||
480 | info.si_trapno = 0; | ||
481 | force_sig_info(SIGBUS, &info, current); | ||
482 | } | ||
483 | |||
484 | void fault_in_user_windows(void) | ||
485 | { | ||
486 | struct thread_info *t = current_thread_info(); | ||
487 | unsigned long window; | ||
488 | int winsize = sizeof(struct reg_window); | ||
489 | int bias = 0; | ||
490 | |||
491 | if (test_thread_flag(TIF_32BIT)) | ||
492 | winsize = sizeof(struct reg_window32); | ||
493 | else | ||
494 | bias = STACK_BIAS; | ||
495 | |||
496 | flush_user_windows(); | ||
497 | window = get_thread_wsaved(); | ||
498 | |||
499 | if (likely(window != 0)) { | ||
500 | window -= 1; | ||
501 | do { | ||
502 | unsigned long sp = (t->rwbuf_stkptrs[window] + bias); | ||
503 | struct reg_window *rwin = &t->reg_window[window]; | ||
504 | |||
505 | if (unlikely(sp & 0x7UL)) | ||
506 | stack_unaligned(sp); | ||
507 | |||
508 | if (unlikely(copy_to_user((char __user *)sp, | ||
509 | rwin, winsize))) | ||
510 | goto barf; | ||
511 | } while (window--); | ||
512 | } | ||
513 | set_thread_wsaved(0); | ||
514 | return; | ||
515 | |||
516 | barf: | ||
517 | set_thread_wsaved(window + 1); | ||
518 | do_exit(SIGILL); | ||
519 | } | ||
520 | |||
521 | asmlinkage long sparc_do_fork(unsigned long clone_flags, | ||
522 | unsigned long stack_start, | ||
523 | struct pt_regs *regs, | ||
524 | unsigned long stack_size) | ||
525 | { | ||
526 | int __user *parent_tid_ptr, *child_tid_ptr; | ||
527 | unsigned long orig_i1 = regs->u_regs[UREG_I1]; | ||
528 | long ret; | ||
529 | |||
530 | #ifdef CONFIG_COMPAT | ||
531 | if (test_thread_flag(TIF_32BIT)) { | ||
532 | parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]); | ||
533 | child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]); | ||
534 | } else | ||
535 | #endif | ||
536 | { | ||
537 | parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2]; | ||
538 | child_tid_ptr = (int __user *) regs->u_regs[UREG_I4]; | ||
539 | } | ||
540 | |||
541 | ret = do_fork(clone_flags, stack_start, | ||
542 | regs, stack_size, | ||
543 | parent_tid_ptr, child_tid_ptr); | ||
544 | |||
545 | /* If we get an error and potentially restart the system | ||
546 | * call, we're screwed because copy_thread() clobbered | ||
547 | * the parent's %o1. So detect that case and restore it | ||
548 | * here. | ||
549 | */ | ||
550 | if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK) | ||
551 | regs->u_regs[UREG_I1] = orig_i1; | ||
552 | |||
553 | return ret; | ||
554 | } | ||
555 | |||
556 | /* Copy a Sparc thread. The fork() return value conventions | ||
557 | * under SunOS are nothing short of bletcherous: | ||
558 | * Parent --> %o0 == childs pid, %o1 == 0 | ||
559 | * Child --> %o0 == parents pid, %o1 == 1 | ||
560 | */ | ||
561 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | ||
562 | unsigned long unused, | ||
563 | struct task_struct *p, struct pt_regs *regs) | ||
564 | { | ||
565 | struct thread_info *t = task_thread_info(p); | ||
566 | struct sparc_stackf *parent_sf; | ||
567 | unsigned long child_stack_sz; | ||
568 | char *child_trap_frame; | ||
569 | int kernel_thread; | ||
570 | |||
571 | kernel_thread = (regs->tstate & TSTATE_PRIV) ? 1 : 0; | ||
572 | parent_sf = ((struct sparc_stackf *) regs) - 1; | ||
573 | |||
574 | /* Calculate offset to stack_frame & pt_regs */ | ||
575 | child_stack_sz = ((STACKFRAME_SZ + TRACEREG_SZ) + | ||
576 | (kernel_thread ? STACKFRAME_SZ : 0)); | ||
577 | child_trap_frame = (task_stack_page(p) + | ||
578 | (THREAD_SIZE - child_stack_sz)); | ||
579 | memcpy(child_trap_frame, parent_sf, child_stack_sz); | ||
580 | |||
581 | t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | | ||
582 | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) | | ||
583 | (((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT); | ||
584 | t->new_child = 1; | ||
585 | t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS; | ||
586 | t->kregs = (struct pt_regs *) (child_trap_frame + | ||
587 | sizeof(struct sparc_stackf)); | ||
588 | t->fpsaved[0] = 0; | ||
589 | |||
590 | if (kernel_thread) { | ||
591 | struct sparc_stackf *child_sf = (struct sparc_stackf *) | ||
592 | (child_trap_frame + (STACKFRAME_SZ + TRACEREG_SZ)); | ||
593 | |||
594 | /* Zero terminate the stack backtrace. */ | ||
595 | child_sf->fp = NULL; | ||
596 | t->kregs->u_regs[UREG_FP] = | ||
597 | ((unsigned long) child_sf) - STACK_BIAS; | ||
598 | |||
599 | /* Special case, if we are spawning a kernel thread from | ||
600 | * a userspace task (usermode helper, NFS or similar), we | ||
601 | * must disable performance counters in the child because | ||
602 | * the address space and protection realm are changing. | ||
603 | */ | ||
604 | if (t->flags & _TIF_PERFCTR) { | ||
605 | t->user_cntd0 = t->user_cntd1 = NULL; | ||
606 | t->pcr_reg = 0; | ||
607 | t->flags &= ~_TIF_PERFCTR; | ||
608 | } | ||
609 | t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT); | ||
610 | t->kregs->u_regs[UREG_G6] = (unsigned long) t; | ||
611 | t->kregs->u_regs[UREG_G4] = (unsigned long) t->task; | ||
612 | } else { | ||
613 | if (t->flags & _TIF_32BIT) { | ||
614 | sp &= 0x00000000ffffffffUL; | ||
615 | regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL; | ||
616 | } | ||
617 | t->kregs->u_regs[UREG_FP] = sp; | ||
618 | t->flags |= ((long)ASI_AIUS << TI_FLAG_CURRENT_DS_SHIFT); | ||
619 | if (sp != regs->u_regs[UREG_FP]) { | ||
620 | unsigned long csp; | ||
621 | |||
622 | csp = clone_stackframe(sp, regs->u_regs[UREG_FP]); | ||
623 | if (!csp) | ||
624 | return -EFAULT; | ||
625 | t->kregs->u_regs[UREG_FP] = csp; | ||
626 | } | ||
627 | if (t->utraps) | ||
628 | t->utraps[0]++; | ||
629 | } | ||
630 | |||
631 | /* Set the return value for the child. */ | ||
632 | t->kregs->u_regs[UREG_I0] = current->pid; | ||
633 | t->kregs->u_regs[UREG_I1] = 1; | ||
634 | |||
635 | /* Set the second return value for the parent. */ | ||
636 | regs->u_regs[UREG_I1] = 0; | ||
637 | |||
638 | if (clone_flags & CLONE_SETTLS) | ||
639 | t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3]; | ||
640 | |||
641 | return 0; | ||
642 | } | ||
643 | |||
644 | /* | ||
645 | * This is the mechanism for creating a new kernel thread. | ||
646 | * | ||
647 | * NOTE! Only a kernel-only process(ie the swapper or direct descendants | ||
648 | * who haven't done an "execve()") should use this: it will work within | ||
649 | * a system call from a "real" process, but the process memory space will | ||
650 | * not be freed until both the parent and the child have exited. | ||
651 | */ | ||
652 | pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | ||
653 | { | ||
654 | long retval; | ||
655 | |||
656 | /* If the parent runs before fn(arg) is called by the child, | ||
657 | * the input registers of this function can be clobbered. | ||
658 | * So we stash 'fn' and 'arg' into global registers which | ||
659 | * will not be modified by the parent. | ||
660 | */ | ||
661 | __asm__ __volatile__("mov %4, %%g2\n\t" /* Save FN into global */ | ||
662 | "mov %5, %%g3\n\t" /* Save ARG into global */ | ||
663 | "mov %1, %%g1\n\t" /* Clone syscall nr. */ | ||
664 | "mov %2, %%o0\n\t" /* Clone flags. */ | ||
665 | "mov 0, %%o1\n\t" /* usp arg == 0 */ | ||
666 | "t 0x6d\n\t" /* Linux/Sparc clone(). */ | ||
667 | "brz,a,pn %%o1, 1f\n\t" /* Parent, just return. */ | ||
668 | " mov %%o0, %0\n\t" | ||
669 | "jmpl %%g2, %%o7\n\t" /* Call the function. */ | ||
670 | " mov %%g3, %%o0\n\t" /* Set arg in delay. */ | ||
671 | "mov %3, %%g1\n\t" | ||
672 | "t 0x6d\n\t" /* Linux/Sparc exit(). */ | ||
673 | /* Notreached by child. */ | ||
674 | "1:" : | ||
675 | "=r" (retval) : | ||
676 | "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED), | ||
677 | "i" (__NR_exit), "r" (fn), "r" (arg) : | ||
678 | "g1", "g2", "g3", "o0", "o1", "memory", "cc"); | ||
679 | return retval; | ||
680 | } | ||
681 | |||
682 | typedef struct { | ||
683 | union { | ||
684 | unsigned int pr_regs[32]; | ||
685 | unsigned long pr_dregs[16]; | ||
686 | } pr_fr; | ||
687 | unsigned int __unused; | ||
688 | unsigned int pr_fsr; | ||
689 | unsigned char pr_qcnt; | ||
690 | unsigned char pr_q_entrysize; | ||
691 | unsigned char pr_en; | ||
692 | unsigned int pr_q[64]; | ||
693 | } elf_fpregset_t32; | ||
694 | |||
695 | /* | ||
696 | * fill in the fpu structure for a core dump. | ||
697 | */ | ||
698 | int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs) | ||
699 | { | ||
700 | unsigned long *kfpregs = current_thread_info()->fpregs; | ||
701 | unsigned long fprs = current_thread_info()->fpsaved[0]; | ||
702 | |||
703 | if (test_thread_flag(TIF_32BIT)) { | ||
704 | elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs; | ||
705 | |||
706 | if (fprs & FPRS_DL) | ||
707 | memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs, | ||
708 | sizeof(unsigned int) * 32); | ||
709 | else | ||
710 | memset(&fpregs32->pr_fr.pr_regs[0], 0, | ||
711 | sizeof(unsigned int) * 32); | ||
712 | fpregs32->pr_qcnt = 0; | ||
713 | fpregs32->pr_q_entrysize = 8; | ||
714 | memset(&fpregs32->pr_q[0], 0, | ||
715 | (sizeof(unsigned int) * 64)); | ||
716 | if (fprs & FPRS_FEF) { | ||
717 | fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0]; | ||
718 | fpregs32->pr_en = 1; | ||
719 | } else { | ||
720 | fpregs32->pr_fsr = 0; | ||
721 | fpregs32->pr_en = 0; | ||
722 | } | ||
723 | } else { | ||
724 | if(fprs & FPRS_DL) | ||
725 | memcpy(&fpregs->pr_regs[0], kfpregs, | ||
726 | sizeof(unsigned int) * 32); | ||
727 | else | ||
728 | memset(&fpregs->pr_regs[0], 0, | ||
729 | sizeof(unsigned int) * 32); | ||
730 | if(fprs & FPRS_DU) | ||
731 | memcpy(&fpregs->pr_regs[16], kfpregs+16, | ||
732 | sizeof(unsigned int) * 32); | ||
733 | else | ||
734 | memset(&fpregs->pr_regs[16], 0, | ||
735 | sizeof(unsigned int) * 32); | ||
736 | if(fprs & FPRS_FEF) { | ||
737 | fpregs->pr_fsr = current_thread_info()->xfsr[0]; | ||
738 | fpregs->pr_gsr = current_thread_info()->gsr[0]; | ||
739 | } else { | ||
740 | fpregs->pr_fsr = fpregs->pr_gsr = 0; | ||
741 | } | ||
742 | fpregs->pr_fprs = fprs; | ||
743 | } | ||
744 | return 1; | ||
745 | } | ||
746 | |||
747 | /* | ||
748 | * sparc_execve() executes a new program after the asm stub has set | ||
749 | * things up for us. This should basically do what I want it to. | ||
750 | */ | ||
751 | asmlinkage int sparc_execve(struct pt_regs *regs) | ||
752 | { | ||
753 | int error, base = 0; | ||
754 | char *filename; | ||
755 | |||
756 | /* User register window flush is done by entry.S */ | ||
757 | |||
758 | /* Check for indirect call. */ | ||
759 | if (regs->u_regs[UREG_G1] == 0) | ||
760 | base = 1; | ||
761 | |||
762 | filename = getname((char __user *)regs->u_regs[base + UREG_I0]); | ||
763 | error = PTR_ERR(filename); | ||
764 | if (IS_ERR(filename)) | ||
765 | goto out; | ||
766 | error = do_execve(filename, | ||
767 | (char __user * __user *) | ||
768 | regs->u_regs[base + UREG_I1], | ||
769 | (char __user * __user *) | ||
770 | regs->u_regs[base + UREG_I2], regs); | ||
771 | putname(filename); | ||
772 | if (!error) { | ||
773 | fprs_write(0); | ||
774 | current_thread_info()->xfsr[0] = 0; | ||
775 | current_thread_info()->fpsaved[0] = 0; | ||
776 | regs->tstate &= ~TSTATE_PEF; | ||
777 | } | ||
778 | out: | ||
779 | return error; | ||
780 | } | ||
781 | |||
782 | unsigned long get_wchan(struct task_struct *task) | ||
783 | { | ||
784 | unsigned long pc, fp, bias = 0; | ||
785 | struct thread_info *tp; | ||
786 | struct reg_window *rw; | ||
787 | unsigned long ret = 0; | ||
788 | int count = 0; | ||
789 | |||
790 | if (!task || task == current || | ||
791 | task->state == TASK_RUNNING) | ||
792 | goto out; | ||
793 | |||
794 | tp = task_thread_info(task); | ||
795 | bias = STACK_BIAS; | ||
796 | fp = task_thread_info(task)->ksp + bias; | ||
797 | |||
798 | do { | ||
799 | if (!kstack_valid(tp, fp)) | ||
800 | break; | ||
801 | rw = (struct reg_window *) fp; | ||
802 | pc = rw->ins[7]; | ||
803 | if (!in_sched_functions(pc)) { | ||
804 | ret = pc; | ||
805 | goto out; | ||
806 | } | ||
807 | fp = rw->ins[6] + bias; | ||
808 | } while (++count < 16); | ||
809 | |||
810 | out: | ||
811 | return ret; | ||
812 | } | ||
diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c new file mode 100644 index 000000000000..dbba82f9b142 --- /dev/null +++ b/arch/sparc/kernel/prom_64.c | |||
@@ -0,0 +1,1684 @@ | |||
1 | /* | ||
2 | * Procedures for creating, accessing and interpreting the device tree. | ||
3 | * | ||
4 | * Paul Mackerras August 1996. | ||
5 | * Copyright (C) 1996-2005 Paul Mackerras. | ||
6 | * | ||
7 | * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. | ||
8 | * {engebret|bergner}@us.ibm.com | ||
9 | * | ||
10 | * Adapted for sparc64 by David S. Miller davem@davemloft.net | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version | ||
15 | * 2 of the License, or (at your option) any later version. | ||
16 | */ | ||
17 | |||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/types.h> | ||
20 | #include <linux/string.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/lmb.h> | ||
24 | #include <linux/of_device.h> | ||
25 | |||
26 | #include <asm/prom.h> | ||
27 | #include <asm/oplib.h> | ||
28 | #include <asm/irq.h> | ||
29 | #include <asm/asi.h> | ||
30 | #include <asm/upa.h> | ||
31 | #include <asm/smp.h> | ||
32 | |||
33 | extern struct device_node *allnodes; /* temporary while merging */ | ||
34 | |||
35 | extern rwlock_t devtree_lock; /* temporary while merging */ | ||
36 | |||
37 | struct device_node *of_find_node_by_phandle(phandle handle) | ||
38 | { | ||
39 | struct device_node *np; | ||
40 | |||
41 | for (np = allnodes; np; np = np->allnext) | ||
42 | if (np->node == handle) | ||
43 | break; | ||
44 | |||
45 | return np; | ||
46 | } | ||
47 | EXPORT_SYMBOL(of_find_node_by_phandle); | ||
48 | |||
49 | int of_getintprop_default(struct device_node *np, const char *name, int def) | ||
50 | { | ||
51 | struct property *prop; | ||
52 | int len; | ||
53 | |||
54 | prop = of_find_property(np, name, &len); | ||
55 | if (!prop || len != 4) | ||
56 | return def; | ||
57 | |||
58 | return *(int *) prop->value; | ||
59 | } | ||
60 | EXPORT_SYMBOL(of_getintprop_default); | ||
61 | |||
62 | DEFINE_MUTEX(of_set_property_mutex); | ||
63 | EXPORT_SYMBOL(of_set_property_mutex); | ||
64 | |||
65 | int of_set_property(struct device_node *dp, const char *name, void *val, int len) | ||
66 | { | ||
67 | struct property **prevp; | ||
68 | void *new_val; | ||
69 | int err; | ||
70 | |||
71 | new_val = kmalloc(len, GFP_KERNEL); | ||
72 | if (!new_val) | ||
73 | return -ENOMEM; | ||
74 | |||
75 | memcpy(new_val, val, len); | ||
76 | |||
77 | err = -ENODEV; | ||
78 | |||
79 | write_lock(&devtree_lock); | ||
80 | prevp = &dp->properties; | ||
81 | while (*prevp) { | ||
82 | struct property *prop = *prevp; | ||
83 | |||
84 | if (!strcasecmp(prop->name, name)) { | ||
85 | void *old_val = prop->value; | ||
86 | int ret; | ||
87 | |||
88 | mutex_lock(&of_set_property_mutex); | ||
89 | ret = prom_setprop(dp->node, name, val, len); | ||
90 | mutex_unlock(&of_set_property_mutex); | ||
91 | |||
92 | err = -EINVAL; | ||
93 | if (ret >= 0) { | ||
94 | prop->value = new_val; | ||
95 | prop->length = len; | ||
96 | |||
97 | if (OF_IS_DYNAMIC(prop)) | ||
98 | kfree(old_val); | ||
99 | |||
100 | OF_MARK_DYNAMIC(prop); | ||
101 | |||
102 | err = 0; | ||
103 | } | ||
104 | break; | ||
105 | } | ||
106 | prevp = &(*prevp)->next; | ||
107 | } | ||
108 | write_unlock(&devtree_lock); | ||
109 | |||
110 | /* XXX Upate procfs if necessary... */ | ||
111 | |||
112 | return err; | ||
113 | } | ||
114 | EXPORT_SYMBOL(of_set_property); | ||
115 | |||
116 | int of_find_in_proplist(const char *list, const char *match, int len) | ||
117 | { | ||
118 | while (len > 0) { | ||
119 | int l; | ||
120 | |||
121 | if (!strcmp(list, match)) | ||
122 | return 1; | ||
123 | l = strlen(list) + 1; | ||
124 | list += l; | ||
125 | len -= l; | ||
126 | } | ||
127 | return 0; | ||
128 | } | ||
129 | EXPORT_SYMBOL(of_find_in_proplist); | ||
130 | |||
131 | static unsigned int prom_early_allocated __initdata; | ||
132 | |||
133 | static void * __init prom_early_alloc(unsigned long size) | ||
134 | { | ||
135 | unsigned long paddr = lmb_alloc(size, SMP_CACHE_BYTES); | ||
136 | void *ret; | ||
137 | |||
138 | if (!paddr) { | ||
139 | prom_printf("prom_early_alloc(%lu) failed\n"); | ||
140 | prom_halt(); | ||
141 | } | ||
142 | |||
143 | ret = __va(paddr); | ||
144 | memset(ret, 0, size); | ||
145 | prom_early_allocated += size; | ||
146 | |||
147 | return ret; | ||
148 | } | ||
149 | |||
150 | #ifdef CONFIG_PCI | ||
151 | /* PSYCHO interrupt mapping support. */ | ||
152 | #define PSYCHO_IMAP_A_SLOT0 0x0c00UL | ||
153 | #define PSYCHO_IMAP_B_SLOT0 0x0c20UL | ||
154 | static unsigned long psycho_pcislot_imap_offset(unsigned long ino) | ||
155 | { | ||
156 | unsigned int bus = (ino & 0x10) >> 4; | ||
157 | unsigned int slot = (ino & 0x0c) >> 2; | ||
158 | |||
159 | if (bus == 0) | ||
160 | return PSYCHO_IMAP_A_SLOT0 + (slot * 8); | ||
161 | else | ||
162 | return PSYCHO_IMAP_B_SLOT0 + (slot * 8); | ||
163 | } | ||
164 | |||
165 | #define PSYCHO_OBIO_IMAP_BASE 0x1000UL | ||
166 | |||
167 | #define PSYCHO_ONBOARD_IRQ_BASE 0x20 | ||
168 | #define psycho_onboard_imap_offset(__ino) \ | ||
169 | (PSYCHO_OBIO_IMAP_BASE + (((__ino) & 0x1f) << 3)) | ||
170 | |||
171 | #define PSYCHO_ICLR_A_SLOT0 0x1400UL | ||
172 | #define PSYCHO_ICLR_SCSI 0x1800UL | ||
173 | |||
174 | #define psycho_iclr_offset(ino) \ | ||
175 | ((ino & 0x20) ? (PSYCHO_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \ | ||
176 | (PSYCHO_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3))) | ||
177 | |||
178 | static unsigned int psycho_irq_build(struct device_node *dp, | ||
179 | unsigned int ino, | ||
180 | void *_data) | ||
181 | { | ||
182 | unsigned long controller_regs = (unsigned long) _data; | ||
183 | unsigned long imap, iclr; | ||
184 | unsigned long imap_off, iclr_off; | ||
185 | int inofixup = 0; | ||
186 | |||
187 | ino &= 0x3f; | ||
188 | if (ino < PSYCHO_ONBOARD_IRQ_BASE) { | ||
189 | /* PCI slot */ | ||
190 | imap_off = psycho_pcislot_imap_offset(ino); | ||
191 | } else { | ||
192 | /* Onboard device */ | ||
193 | imap_off = psycho_onboard_imap_offset(ino); | ||
194 | } | ||
195 | |||
196 | /* Now build the IRQ bucket. */ | ||
197 | imap = controller_regs + imap_off; | ||
198 | |||
199 | iclr_off = psycho_iclr_offset(ino); | ||
200 | iclr = controller_regs + iclr_off; | ||
201 | |||
202 | if ((ino & 0x20) == 0) | ||
203 | inofixup = ino & 0x03; | ||
204 | |||
205 | return build_irq(inofixup, iclr, imap); | ||
206 | } | ||
207 | |||
208 | static void __init psycho_irq_trans_init(struct device_node *dp) | ||
209 | { | ||
210 | const struct linux_prom64_registers *regs; | ||
211 | |||
212 | dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); | ||
213 | dp->irq_trans->irq_build = psycho_irq_build; | ||
214 | |||
215 | regs = of_get_property(dp, "reg", NULL); | ||
216 | dp->irq_trans->data = (void *) regs[2].phys_addr; | ||
217 | } | ||
218 | |||
219 | #define sabre_read(__reg) \ | ||
220 | ({ u64 __ret; \ | ||
221 | __asm__ __volatile__("ldxa [%1] %2, %0" \ | ||
222 | : "=r" (__ret) \ | ||
223 | : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \ | ||
224 | : "memory"); \ | ||
225 | __ret; \ | ||
226 | }) | ||
227 | |||
228 | struct sabre_irq_data { | ||
229 | unsigned long controller_regs; | ||
230 | unsigned int pci_first_busno; | ||
231 | }; | ||
232 | #define SABRE_CONFIGSPACE 0x001000000UL | ||
233 | #define SABRE_WRSYNC 0x1c20UL | ||
234 | |||
235 | #define SABRE_CONFIG_BASE(CONFIG_SPACE) \ | ||
236 | (CONFIG_SPACE | (1UL << 24)) | ||
237 | #define SABRE_CONFIG_ENCODE(BUS, DEVFN, REG) \ | ||
238 | (((unsigned long)(BUS) << 16) | \ | ||
239 | ((unsigned long)(DEVFN) << 8) | \ | ||
240 | ((unsigned long)(REG))) | ||
241 | |||
242 | /* When a device lives behind a bridge deeper in the PCI bus topology | ||
243 | * than APB, a special sequence must run to make sure all pending DMA | ||
244 | * transfers at the time of IRQ delivery are visible in the coherency | ||
245 | * domain by the cpu. This sequence is to perform a read on the far | ||
246 | * side of the non-APB bridge, then perform a read of Sabre's DMA | ||
247 | * write-sync register. | ||
248 | */ | ||
249 | static void sabre_wsync_handler(unsigned int ino, void *_arg1, void *_arg2) | ||
250 | { | ||
251 | unsigned int phys_hi = (unsigned int) (unsigned long) _arg1; | ||
252 | struct sabre_irq_data *irq_data = _arg2; | ||
253 | unsigned long controller_regs = irq_data->controller_regs; | ||
254 | unsigned long sync_reg = controller_regs + SABRE_WRSYNC; | ||
255 | unsigned long config_space = controller_regs + SABRE_CONFIGSPACE; | ||
256 | unsigned int bus, devfn; | ||
257 | u16 _unused; | ||
258 | |||
259 | config_space = SABRE_CONFIG_BASE(config_space); | ||
260 | |||
261 | bus = (phys_hi >> 16) & 0xff; | ||
262 | devfn = (phys_hi >> 8) & 0xff; | ||
263 | |||
264 | config_space |= SABRE_CONFIG_ENCODE(bus, devfn, 0x00); | ||
265 | |||
266 | __asm__ __volatile__("membar #Sync\n\t" | ||
267 | "lduha [%1] %2, %0\n\t" | ||
268 | "membar #Sync" | ||
269 | : "=r" (_unused) | ||
270 | : "r" ((u16 *) config_space), | ||
271 | "i" (ASI_PHYS_BYPASS_EC_E_L) | ||
272 | : "memory"); | ||
273 | |||
274 | sabre_read(sync_reg); | ||
275 | } | ||
276 | |||
277 | #define SABRE_IMAP_A_SLOT0 0x0c00UL | ||
278 | #define SABRE_IMAP_B_SLOT0 0x0c20UL | ||
279 | #define SABRE_ICLR_A_SLOT0 0x1400UL | ||
280 | #define SABRE_ICLR_B_SLOT0 0x1480UL | ||
281 | #define SABRE_ICLR_SCSI 0x1800UL | ||
282 | #define SABRE_ICLR_ETH 0x1808UL | ||
283 | #define SABRE_ICLR_BPP 0x1810UL | ||
284 | #define SABRE_ICLR_AU_REC 0x1818UL | ||
285 | #define SABRE_ICLR_AU_PLAY 0x1820UL | ||
286 | #define SABRE_ICLR_PFAIL 0x1828UL | ||
287 | #define SABRE_ICLR_KMS 0x1830UL | ||
288 | #define SABRE_ICLR_FLPY 0x1838UL | ||
289 | #define SABRE_ICLR_SHW 0x1840UL | ||
290 | #define SABRE_ICLR_KBD 0x1848UL | ||
291 | #define SABRE_ICLR_MS 0x1850UL | ||
292 | #define SABRE_ICLR_SER 0x1858UL | ||
293 | #define SABRE_ICLR_UE 0x1870UL | ||
294 | #define SABRE_ICLR_CE 0x1878UL | ||
295 | #define SABRE_ICLR_PCIERR 0x1880UL | ||
296 | |||
297 | static unsigned long sabre_pcislot_imap_offset(unsigned long ino) | ||
298 | { | ||
299 | unsigned int bus = (ino & 0x10) >> 4; | ||
300 | unsigned int slot = (ino & 0x0c) >> 2; | ||
301 | |||
302 | if (bus == 0) | ||
303 | return SABRE_IMAP_A_SLOT0 + (slot * 8); | ||
304 | else | ||
305 | return SABRE_IMAP_B_SLOT0 + (slot * 8); | ||
306 | } | ||
307 | |||
308 | #define SABRE_OBIO_IMAP_BASE 0x1000UL | ||
309 | #define SABRE_ONBOARD_IRQ_BASE 0x20 | ||
310 | #define sabre_onboard_imap_offset(__ino) \ | ||
311 | (SABRE_OBIO_IMAP_BASE + (((__ino) & 0x1f) << 3)) | ||
312 | |||
313 | #define sabre_iclr_offset(ino) \ | ||
314 | ((ino & 0x20) ? (SABRE_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \ | ||
315 | (SABRE_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3))) | ||
316 | |||
317 | static int sabre_device_needs_wsync(struct device_node *dp) | ||
318 | { | ||
319 | struct device_node *parent = dp->parent; | ||
320 | const char *parent_model, *parent_compat; | ||
321 | |||
322 | /* This traversal up towards the root is meant to | ||
323 | * handle two cases: | ||
324 | * | ||
325 | * 1) non-PCI bus sitting under PCI, such as 'ebus' | ||
326 | * 2) the PCI controller interrupts themselves, which | ||
327 | * will use the sabre_irq_build but do not need | ||
328 | * the DMA synchronization handling | ||
329 | */ | ||
330 | while (parent) { | ||
331 | if (!strcmp(parent->type, "pci")) | ||
332 | break; | ||
333 | parent = parent->parent; | ||
334 | } | ||
335 | |||
336 | if (!parent) | ||
337 | return 0; | ||
338 | |||
339 | parent_model = of_get_property(parent, | ||
340 | "model", NULL); | ||
341 | if (parent_model && | ||
342 | (!strcmp(parent_model, "SUNW,sabre") || | ||
343 | !strcmp(parent_model, "SUNW,simba"))) | ||
344 | return 0; | ||
345 | |||
346 | parent_compat = of_get_property(parent, | ||
347 | "compatible", NULL); | ||
348 | if (parent_compat && | ||
349 | (!strcmp(parent_compat, "pci108e,a000") || | ||
350 | !strcmp(parent_compat, "pci108e,a001"))) | ||
351 | return 0; | ||
352 | |||
353 | return 1; | ||
354 | } | ||
355 | |||
356 | static unsigned int sabre_irq_build(struct device_node *dp, | ||
357 | unsigned int ino, | ||
358 | void *_data) | ||
359 | { | ||
360 | struct sabre_irq_data *irq_data = _data; | ||
361 | unsigned long controller_regs = irq_data->controller_regs; | ||
362 | const struct linux_prom_pci_registers *regs; | ||
363 | unsigned long imap, iclr; | ||
364 | unsigned long imap_off, iclr_off; | ||
365 | int inofixup = 0; | ||
366 | int virt_irq; | ||
367 | |||
368 | ino &= 0x3f; | ||
369 | if (ino < SABRE_ONBOARD_IRQ_BASE) { | ||
370 | /* PCI slot */ | ||
371 | imap_off = sabre_pcislot_imap_offset(ino); | ||
372 | } else { | ||
373 | /* onboard device */ | ||
374 | imap_off = sabre_onboard_imap_offset(ino); | ||
375 | } | ||
376 | |||
377 | /* Now build the IRQ bucket. */ | ||
378 | imap = controller_regs + imap_off; | ||
379 | |||
380 | iclr_off = sabre_iclr_offset(ino); | ||
381 | iclr = controller_regs + iclr_off; | ||
382 | |||
383 | if ((ino & 0x20) == 0) | ||
384 | inofixup = ino & 0x03; | ||
385 | |||
386 | virt_irq = build_irq(inofixup, iclr, imap); | ||
387 | |||
388 | /* If the parent device is a PCI<->PCI bridge other than | ||
389 | * APB, we have to install a pre-handler to ensure that | ||
390 | * all pending DMA is drained before the interrupt handler | ||
391 | * is run. | ||
392 | */ | ||
393 | regs = of_get_property(dp, "reg", NULL); | ||
394 | if (regs && sabre_device_needs_wsync(dp)) { | ||
395 | irq_install_pre_handler(virt_irq, | ||
396 | sabre_wsync_handler, | ||
397 | (void *) (long) regs->phys_hi, | ||
398 | (void *) irq_data); | ||
399 | } | ||
400 | |||
401 | return virt_irq; | ||
402 | } | ||
403 | |||
404 | static void __init sabre_irq_trans_init(struct device_node *dp) | ||
405 | { | ||
406 | const struct linux_prom64_registers *regs; | ||
407 | struct sabre_irq_data *irq_data; | ||
408 | const u32 *busrange; | ||
409 | |||
410 | dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); | ||
411 | dp->irq_trans->irq_build = sabre_irq_build; | ||
412 | |||
413 | irq_data = prom_early_alloc(sizeof(struct sabre_irq_data)); | ||
414 | |||
415 | regs = of_get_property(dp, "reg", NULL); | ||
416 | irq_data->controller_regs = regs[0].phys_addr; | ||
417 | |||
418 | busrange = of_get_property(dp, "bus-range", NULL); | ||
419 | irq_data->pci_first_busno = busrange[0]; | ||
420 | |||
421 | dp->irq_trans->data = irq_data; | ||
422 | } | ||
423 | |||
424 | /* SCHIZO interrupt mapping support. Unlike Psycho, for this controller the | ||
425 | * imap/iclr registers are per-PBM. | ||
426 | */ | ||
427 | #define SCHIZO_IMAP_BASE 0x1000UL | ||
428 | #define SCHIZO_ICLR_BASE 0x1400UL | ||
429 | |||
430 | static unsigned long schizo_imap_offset(unsigned long ino) | ||
431 | { | ||
432 | return SCHIZO_IMAP_BASE + (ino * 8UL); | ||
433 | } | ||
434 | |||
435 | static unsigned long schizo_iclr_offset(unsigned long ino) | ||
436 | { | ||
437 | return SCHIZO_ICLR_BASE + (ino * 8UL); | ||
438 | } | ||
439 | |||
440 | static unsigned long schizo_ino_to_iclr(unsigned long pbm_regs, | ||
441 | unsigned int ino) | ||
442 | { | ||
443 | |||
444 | return pbm_regs + schizo_iclr_offset(ino); | ||
445 | } | ||
446 | |||
447 | static unsigned long schizo_ino_to_imap(unsigned long pbm_regs, | ||
448 | unsigned int ino) | ||
449 | { | ||
450 | return pbm_regs + schizo_imap_offset(ino); | ||
451 | } | ||
452 | |||
453 | #define schizo_read(__reg) \ | ||
454 | ({ u64 __ret; \ | ||
455 | __asm__ __volatile__("ldxa [%1] %2, %0" \ | ||
456 | : "=r" (__ret) \ | ||
457 | : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \ | ||
458 | : "memory"); \ | ||
459 | __ret; \ | ||
460 | }) | ||
461 | #define schizo_write(__reg, __val) \ | ||
462 | __asm__ __volatile__("stxa %0, [%1] %2" \ | ||
463 | : /* no outputs */ \ | ||
464 | : "r" (__val), "r" (__reg), \ | ||
465 | "i" (ASI_PHYS_BYPASS_EC_E) \ | ||
466 | : "memory") | ||
467 | |||
468 | static void tomatillo_wsync_handler(unsigned int ino, void *_arg1, void *_arg2) | ||
469 | { | ||
470 | unsigned long sync_reg = (unsigned long) _arg2; | ||
471 | u64 mask = 1UL << (ino & IMAP_INO); | ||
472 | u64 val; | ||
473 | int limit; | ||
474 | |||
475 | schizo_write(sync_reg, mask); | ||
476 | |||
477 | limit = 100000; | ||
478 | val = 0; | ||
479 | while (--limit) { | ||
480 | val = schizo_read(sync_reg); | ||
481 | if (!(val & mask)) | ||
482 | break; | ||
483 | } | ||
484 | if (limit <= 0) { | ||
485 | printk("tomatillo_wsync_handler: DMA won't sync [%lx:%lx]\n", | ||
486 | val, mask); | ||
487 | } | ||
488 | |||
489 | if (_arg1) { | ||
490 | static unsigned char cacheline[64] | ||
491 | __attribute__ ((aligned (64))); | ||
492 | |||
493 | __asm__ __volatile__("rd %%fprs, %0\n\t" | ||
494 | "or %0, %4, %1\n\t" | ||
495 | "wr %1, 0x0, %%fprs\n\t" | ||
496 | "stda %%f0, [%5] %6\n\t" | ||
497 | "wr %0, 0x0, %%fprs\n\t" | ||
498 | "membar #Sync" | ||
499 | : "=&r" (mask), "=&r" (val) | ||
500 | : "0" (mask), "1" (val), | ||
501 | "i" (FPRS_FEF), "r" (&cacheline[0]), | ||
502 | "i" (ASI_BLK_COMMIT_P)); | ||
503 | } | ||
504 | } | ||
505 | |||
506 | struct schizo_irq_data { | ||
507 | unsigned long pbm_regs; | ||
508 | unsigned long sync_reg; | ||
509 | u32 portid; | ||
510 | int chip_version; | ||
511 | }; | ||
512 | |||
513 | static unsigned int schizo_irq_build(struct device_node *dp, | ||
514 | unsigned int ino, | ||
515 | void *_data) | ||
516 | { | ||
517 | struct schizo_irq_data *irq_data = _data; | ||
518 | unsigned long pbm_regs = irq_data->pbm_regs; | ||
519 | unsigned long imap, iclr; | ||
520 | int ign_fixup; | ||
521 | int virt_irq; | ||
522 | int is_tomatillo; | ||
523 | |||
524 | ino &= 0x3f; | ||
525 | |||
526 | /* Now build the IRQ bucket. */ | ||
527 | imap = schizo_ino_to_imap(pbm_regs, ino); | ||
528 | iclr = schizo_ino_to_iclr(pbm_regs, ino); | ||
529 | |||
530 | /* On Schizo, no inofixup occurs. This is because each | ||
531 | * INO has it's own IMAP register. On Psycho and Sabre | ||
532 | * there is only one IMAP register for each PCI slot even | ||
533 | * though four different INOs can be generated by each | ||
534 | * PCI slot. | ||
535 | * | ||
536 | * But, for JBUS variants (essentially, Tomatillo), we have | ||
537 | * to fixup the lowest bit of the interrupt group number. | ||
538 | */ | ||
539 | ign_fixup = 0; | ||
540 | |||
541 | is_tomatillo = (irq_data->sync_reg != 0UL); | ||
542 | |||
543 | if (is_tomatillo) { | ||
544 | if (irq_data->portid & 1) | ||
545 | ign_fixup = (1 << 6); | ||
546 | } | ||
547 | |||
548 | virt_irq = build_irq(ign_fixup, iclr, imap); | ||
549 | |||
550 | if (is_tomatillo) { | ||
551 | irq_install_pre_handler(virt_irq, | ||
552 | tomatillo_wsync_handler, | ||
553 | ((irq_data->chip_version <= 4) ? | ||
554 | (void *) 1 : (void *) 0), | ||
555 | (void *) irq_data->sync_reg); | ||
556 | } | ||
557 | |||
558 | return virt_irq; | ||
559 | } | ||
560 | |||
561 | static void __init __schizo_irq_trans_init(struct device_node *dp, | ||
562 | int is_tomatillo) | ||
563 | { | ||
564 | const struct linux_prom64_registers *regs; | ||
565 | struct schizo_irq_data *irq_data; | ||
566 | |||
567 | dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); | ||
568 | dp->irq_trans->irq_build = schizo_irq_build; | ||
569 | |||
570 | irq_data = prom_early_alloc(sizeof(struct schizo_irq_data)); | ||
571 | |||
572 | regs = of_get_property(dp, "reg", NULL); | ||
573 | dp->irq_trans->data = irq_data; | ||
574 | |||
575 | irq_data->pbm_regs = regs[0].phys_addr; | ||
576 | if (is_tomatillo) | ||
577 | irq_data->sync_reg = regs[3].phys_addr + 0x1a18UL; | ||
578 | else | ||
579 | irq_data->sync_reg = 0UL; | ||
580 | irq_data->portid = of_getintprop_default(dp, "portid", 0); | ||
581 | irq_data->chip_version = of_getintprop_default(dp, "version#", 0); | ||
582 | } | ||
583 | |||
584 | static void __init schizo_irq_trans_init(struct device_node *dp) | ||
585 | { | ||
586 | __schizo_irq_trans_init(dp, 0); | ||
587 | } | ||
588 | |||
589 | static void __init tomatillo_irq_trans_init(struct device_node *dp) | ||
590 | { | ||
591 | __schizo_irq_trans_init(dp, 1); | ||
592 | } | ||
593 | |||
594 | static unsigned int pci_sun4v_irq_build(struct device_node *dp, | ||
595 | unsigned int devino, | ||
596 | void *_data) | ||
597 | { | ||
598 | u32 devhandle = (u32) (unsigned long) _data; | ||
599 | |||
600 | return sun4v_build_irq(devhandle, devino); | ||
601 | } | ||
602 | |||
603 | static void __init pci_sun4v_irq_trans_init(struct device_node *dp) | ||
604 | { | ||
605 | const struct linux_prom64_registers *regs; | ||
606 | |||
607 | dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); | ||
608 | dp->irq_trans->irq_build = pci_sun4v_irq_build; | ||
609 | |||
610 | regs = of_get_property(dp, "reg", NULL); | ||
611 | dp->irq_trans->data = (void *) (unsigned long) | ||
612 | ((regs->phys_addr >> 32UL) & 0x0fffffff); | ||
613 | } | ||
614 | |||
615 | struct fire_irq_data { | ||
616 | unsigned long pbm_regs; | ||
617 | u32 portid; | ||
618 | }; | ||
619 | |||
620 | #define FIRE_IMAP_BASE 0x001000 | ||
621 | #define FIRE_ICLR_BASE 0x001400 | ||
622 | |||
623 | static unsigned long fire_imap_offset(unsigned long ino) | ||
624 | { | ||
625 | return FIRE_IMAP_BASE + (ino * 8UL); | ||
626 | } | ||
627 | |||
628 | static unsigned long fire_iclr_offset(unsigned long ino) | ||
629 | { | ||
630 | return FIRE_ICLR_BASE + (ino * 8UL); | ||
631 | } | ||
632 | |||
633 | static unsigned long fire_ino_to_iclr(unsigned long pbm_regs, | ||
634 | unsigned int ino) | ||
635 | { | ||
636 | return pbm_regs + fire_iclr_offset(ino); | ||
637 | } | ||
638 | |||
639 | static unsigned long fire_ino_to_imap(unsigned long pbm_regs, | ||
640 | unsigned int ino) | ||
641 | { | ||
642 | return pbm_regs + fire_imap_offset(ino); | ||
643 | } | ||
644 | |||
645 | static unsigned int fire_irq_build(struct device_node *dp, | ||
646 | unsigned int ino, | ||
647 | void *_data) | ||
648 | { | ||
649 | struct fire_irq_data *irq_data = _data; | ||
650 | unsigned long pbm_regs = irq_data->pbm_regs; | ||
651 | unsigned long imap, iclr; | ||
652 | unsigned long int_ctrlr; | ||
653 | |||
654 | ino &= 0x3f; | ||
655 | |||
656 | /* Now build the IRQ bucket. */ | ||
657 | imap = fire_ino_to_imap(pbm_regs, ino); | ||
658 | iclr = fire_ino_to_iclr(pbm_regs, ino); | ||
659 | |||
660 | /* Set the interrupt controller number. */ | ||
661 | int_ctrlr = 1 << 6; | ||
662 | upa_writeq(int_ctrlr, imap); | ||
663 | |||
664 | /* The interrupt map registers do not have an INO field | ||
665 | * like other chips do. They return zero in the INO | ||
666 | * field, and the interrupt controller number is controlled | ||
667 | * in bits 6 to 9. So in order for build_irq() to get | ||
668 | * the INO right we pass it in as part of the fixup | ||
669 | * which will get added to the map register zero value | ||
670 | * read by build_irq(). | ||
671 | */ | ||
672 | ino |= (irq_data->portid << 6); | ||
673 | ino -= int_ctrlr; | ||
674 | return build_irq(ino, iclr, imap); | ||
675 | } | ||
676 | |||
677 | static void __init fire_irq_trans_init(struct device_node *dp) | ||
678 | { | ||
679 | const struct linux_prom64_registers *regs; | ||
680 | struct fire_irq_data *irq_data; | ||
681 | |||
682 | dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); | ||
683 | dp->irq_trans->irq_build = fire_irq_build; | ||
684 | |||
685 | irq_data = prom_early_alloc(sizeof(struct fire_irq_data)); | ||
686 | |||
687 | regs = of_get_property(dp, "reg", NULL); | ||
688 | dp->irq_trans->data = irq_data; | ||
689 | |||
690 | irq_data->pbm_regs = regs[0].phys_addr; | ||
691 | irq_data->portid = of_getintprop_default(dp, "portid", 0); | ||
692 | } | ||
693 | #endif /* CONFIG_PCI */ | ||
694 | |||
695 | #ifdef CONFIG_SBUS | ||
696 | /* INO number to IMAP register offset for SYSIO external IRQ's. | ||
697 | * This should conform to both Sunfire/Wildfire server and Fusion | ||
698 | * desktop designs. | ||
699 | */ | ||
700 | #define SYSIO_IMAP_SLOT0 0x2c00UL | ||
701 | #define SYSIO_IMAP_SLOT1 0x2c08UL | ||
702 | #define SYSIO_IMAP_SLOT2 0x2c10UL | ||
703 | #define SYSIO_IMAP_SLOT3 0x2c18UL | ||
704 | #define SYSIO_IMAP_SCSI 0x3000UL | ||
705 | #define SYSIO_IMAP_ETH 0x3008UL | ||
706 | #define SYSIO_IMAP_BPP 0x3010UL | ||
707 | #define SYSIO_IMAP_AUDIO 0x3018UL | ||
708 | #define SYSIO_IMAP_PFAIL 0x3020UL | ||
709 | #define SYSIO_IMAP_KMS 0x3028UL | ||
710 | #define SYSIO_IMAP_FLPY 0x3030UL | ||
711 | #define SYSIO_IMAP_SHW 0x3038UL | ||
712 | #define SYSIO_IMAP_KBD 0x3040UL | ||
713 | #define SYSIO_IMAP_MS 0x3048UL | ||
714 | #define SYSIO_IMAP_SER 0x3050UL | ||
715 | #define SYSIO_IMAP_TIM0 0x3060UL | ||
716 | #define SYSIO_IMAP_TIM1 0x3068UL | ||
717 | #define SYSIO_IMAP_UE 0x3070UL | ||
718 | #define SYSIO_IMAP_CE 0x3078UL | ||
719 | #define SYSIO_IMAP_SBERR 0x3080UL | ||
720 | #define SYSIO_IMAP_PMGMT 0x3088UL | ||
721 | #define SYSIO_IMAP_GFX 0x3090UL | ||
722 | #define SYSIO_IMAP_EUPA 0x3098UL | ||
723 | |||
724 | #define bogon ((unsigned long) -1) | ||
725 | static unsigned long sysio_irq_offsets[] = { | ||
726 | /* SBUS Slot 0 --> 3, level 1 --> 7 */ | ||
727 | SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, | ||
728 | SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, | ||
729 | SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, | ||
730 | SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, | ||
731 | SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, | ||
732 | SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, | ||
733 | SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, | ||
734 | SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, | ||
735 | |||
736 | /* Onboard devices (not relevant/used on SunFire). */ | ||
737 | SYSIO_IMAP_SCSI, | ||
738 | SYSIO_IMAP_ETH, | ||
739 | SYSIO_IMAP_BPP, | ||
740 | bogon, | ||
741 | SYSIO_IMAP_AUDIO, | ||
742 | SYSIO_IMAP_PFAIL, | ||
743 | bogon, | ||
744 | bogon, | ||
745 | SYSIO_IMAP_KMS, | ||
746 | SYSIO_IMAP_FLPY, | ||
747 | SYSIO_IMAP_SHW, | ||
748 | SYSIO_IMAP_KBD, | ||
749 | SYSIO_IMAP_MS, | ||
750 | SYSIO_IMAP_SER, | ||
751 | bogon, | ||
752 | bogon, | ||
753 | SYSIO_IMAP_TIM0, | ||
754 | SYSIO_IMAP_TIM1, | ||
755 | bogon, | ||
756 | bogon, | ||
757 | SYSIO_IMAP_UE, | ||
758 | SYSIO_IMAP_CE, | ||
759 | SYSIO_IMAP_SBERR, | ||
760 | SYSIO_IMAP_PMGMT, | ||
761 | SYSIO_IMAP_GFX, | ||
762 | SYSIO_IMAP_EUPA, | ||
763 | }; | ||
764 | |||
765 | #undef bogon | ||
766 | |||
767 | #define NUM_SYSIO_OFFSETS ARRAY_SIZE(sysio_irq_offsets) | ||
768 | |||
769 | /* Convert Interrupt Mapping register pointer to associated | ||
770 | * Interrupt Clear register pointer, SYSIO specific version. | ||
771 | */ | ||
772 | #define SYSIO_ICLR_UNUSED0 0x3400UL | ||
773 | #define SYSIO_ICLR_SLOT0 0x3408UL | ||
774 | #define SYSIO_ICLR_SLOT1 0x3448UL | ||
775 | #define SYSIO_ICLR_SLOT2 0x3488UL | ||
776 | #define SYSIO_ICLR_SLOT3 0x34c8UL | ||
777 | static unsigned long sysio_imap_to_iclr(unsigned long imap) | ||
778 | { | ||
779 | unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0; | ||
780 | return imap + diff; | ||
781 | } | ||
782 | |||
783 | static unsigned int sbus_of_build_irq(struct device_node *dp, | ||
784 | unsigned int ino, | ||
785 | void *_data) | ||
786 | { | ||
787 | unsigned long reg_base = (unsigned long) _data; | ||
788 | const struct linux_prom_registers *regs; | ||
789 | unsigned long imap, iclr; | ||
790 | int sbus_slot = 0; | ||
791 | int sbus_level = 0; | ||
792 | |||
793 | ino &= 0x3f; | ||
794 | |||
795 | regs = of_get_property(dp, "reg", NULL); | ||
796 | if (regs) | ||
797 | sbus_slot = regs->which_io; | ||
798 | |||
799 | if (ino < 0x20) | ||
800 | ino += (sbus_slot * 8); | ||
801 | |||
802 | imap = sysio_irq_offsets[ino]; | ||
803 | if (imap == ((unsigned long)-1)) { | ||
804 | prom_printf("get_irq_translations: Bad SYSIO INO[%x]\n", | ||
805 | ino); | ||
806 | prom_halt(); | ||
807 | } | ||
808 | imap += reg_base; | ||
809 | |||
810 | /* SYSIO inconsistency. For external SLOTS, we have to select | ||
811 | * the right ICLR register based upon the lower SBUS irq level | ||
812 | * bits. | ||
813 | */ | ||
814 | if (ino >= 0x20) { | ||
815 | iclr = sysio_imap_to_iclr(imap); | ||
816 | } else { | ||
817 | sbus_level = ino & 0x7; | ||
818 | |||
819 | switch(sbus_slot) { | ||
820 | case 0: | ||
821 | iclr = reg_base + SYSIO_ICLR_SLOT0; | ||
822 | break; | ||
823 | case 1: | ||
824 | iclr = reg_base + SYSIO_ICLR_SLOT1; | ||
825 | break; | ||
826 | case 2: | ||
827 | iclr = reg_base + SYSIO_ICLR_SLOT2; | ||
828 | break; | ||
829 | default: | ||
830 | case 3: | ||
831 | iclr = reg_base + SYSIO_ICLR_SLOT3; | ||
832 | break; | ||
833 | }; | ||
834 | |||
835 | iclr += ((unsigned long)sbus_level - 1UL) * 8UL; | ||
836 | } | ||
837 | return build_irq(sbus_level, iclr, imap); | ||
838 | } | ||
839 | |||
840 | static void __init sbus_irq_trans_init(struct device_node *dp) | ||
841 | { | ||
842 | const struct linux_prom64_registers *regs; | ||
843 | |||
844 | dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); | ||
845 | dp->irq_trans->irq_build = sbus_of_build_irq; | ||
846 | |||
847 | regs = of_get_property(dp, "reg", NULL); | ||
848 | dp->irq_trans->data = (void *) (unsigned long) regs->phys_addr; | ||
849 | } | ||
850 | #endif /* CONFIG_SBUS */ | ||
851 | |||
852 | |||
853 | static unsigned int central_build_irq(struct device_node *dp, | ||
854 | unsigned int ino, | ||
855 | void *_data) | ||
856 | { | ||
857 | struct device_node *central_dp = _data; | ||
858 | struct of_device *central_op = of_find_device_by_node(central_dp); | ||
859 | struct resource *res; | ||
860 | unsigned long imap, iclr; | ||
861 | u32 tmp; | ||
862 | |||
863 | if (!strcmp(dp->name, "eeprom")) { | ||
864 | res = ¢ral_op->resource[5]; | ||
865 | } else if (!strcmp(dp->name, "zs")) { | ||
866 | res = ¢ral_op->resource[4]; | ||
867 | } else if (!strcmp(dp->name, "clock-board")) { | ||
868 | res = ¢ral_op->resource[3]; | ||
869 | } else { | ||
870 | return ino; | ||
871 | } | ||
872 | |||
873 | imap = res->start + 0x00UL; | ||
874 | iclr = res->start + 0x10UL; | ||
875 | |||
876 | /* Set the INO state to idle, and disable. */ | ||
877 | upa_writel(0, iclr); | ||
878 | upa_readl(iclr); | ||
879 | |||
880 | tmp = upa_readl(imap); | ||
881 | tmp &= ~0x80000000; | ||
882 | upa_writel(tmp, imap); | ||
883 | |||
884 | return build_irq(0, iclr, imap); | ||
885 | } | ||
886 | |||
887 | static void __init central_irq_trans_init(struct device_node *dp) | ||
888 | { | ||
889 | dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); | ||
890 | dp->irq_trans->irq_build = central_build_irq; | ||
891 | |||
892 | dp->irq_trans->data = dp; | ||
893 | } | ||
894 | |||
895 | struct irq_trans { | ||
896 | const char *name; | ||
897 | void (*init)(struct device_node *); | ||
898 | }; | ||
899 | |||
900 | #ifdef CONFIG_PCI | ||
901 | static struct irq_trans __initdata pci_irq_trans_table[] = { | ||
902 | { "SUNW,sabre", sabre_irq_trans_init }, | ||
903 | { "pci108e,a000", sabre_irq_trans_init }, | ||
904 | { "pci108e,a001", sabre_irq_trans_init }, | ||
905 | { "SUNW,psycho", psycho_irq_trans_init }, | ||
906 | { "pci108e,8000", psycho_irq_trans_init }, | ||
907 | { "SUNW,schizo", schizo_irq_trans_init }, | ||
908 | { "pci108e,8001", schizo_irq_trans_init }, | ||
909 | { "SUNW,schizo+", schizo_irq_trans_init }, | ||
910 | { "pci108e,8002", schizo_irq_trans_init }, | ||
911 | { "SUNW,tomatillo", tomatillo_irq_trans_init }, | ||
912 | { "pci108e,a801", tomatillo_irq_trans_init }, | ||
913 | { "SUNW,sun4v-pci", pci_sun4v_irq_trans_init }, | ||
914 | { "pciex108e,80f0", fire_irq_trans_init }, | ||
915 | }; | ||
916 | #endif | ||
917 | |||
918 | static unsigned int sun4v_vdev_irq_build(struct device_node *dp, | ||
919 | unsigned int devino, | ||
920 | void *_data) | ||
921 | { | ||
922 | u32 devhandle = (u32) (unsigned long) _data; | ||
923 | |||
924 | return sun4v_build_irq(devhandle, devino); | ||
925 | } | ||
926 | |||
927 | static void __init sun4v_vdev_irq_trans_init(struct device_node *dp) | ||
928 | { | ||
929 | const struct linux_prom64_registers *regs; | ||
930 | |||
931 | dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); | ||
932 | dp->irq_trans->irq_build = sun4v_vdev_irq_build; | ||
933 | |||
934 | regs = of_get_property(dp, "reg", NULL); | ||
935 | dp->irq_trans->data = (void *) (unsigned long) | ||
936 | ((regs->phys_addr >> 32UL) & 0x0fffffff); | ||
937 | } | ||
938 | |||
939 | static void __init irq_trans_init(struct device_node *dp) | ||
940 | { | ||
941 | #ifdef CONFIG_PCI | ||
942 | const char *model; | ||
943 | int i; | ||
944 | #endif | ||
945 | |||
946 | #ifdef CONFIG_PCI | ||
947 | model = of_get_property(dp, "model", NULL); | ||
948 | if (!model) | ||
949 | model = of_get_property(dp, "compatible", NULL); | ||
950 | if (model) { | ||
951 | for (i = 0; i < ARRAY_SIZE(pci_irq_trans_table); i++) { | ||
952 | struct irq_trans *t = &pci_irq_trans_table[i]; | ||
953 | |||
954 | if (!strcmp(model, t->name)) { | ||
955 | t->init(dp); | ||
956 | return; | ||
957 | } | ||
958 | } | ||
959 | } | ||
960 | #endif | ||
961 | #ifdef CONFIG_SBUS | ||
962 | if (!strcmp(dp->name, "sbus") || | ||
963 | !strcmp(dp->name, "sbi")) { | ||
964 | sbus_irq_trans_init(dp); | ||
965 | return; | ||
966 | } | ||
967 | #endif | ||
968 | if (!strcmp(dp->name, "fhc") && | ||
969 | !strcmp(dp->parent->name, "central")) { | ||
970 | central_irq_trans_init(dp); | ||
971 | return; | ||
972 | } | ||
973 | if (!strcmp(dp->name, "virtual-devices") || | ||
974 | !strcmp(dp->name, "niu")) { | ||
975 | sun4v_vdev_irq_trans_init(dp); | ||
976 | return; | ||
977 | } | ||
978 | } | ||
979 | |||
980 | static int is_root_node(const struct device_node *dp) | ||
981 | { | ||
982 | if (!dp) | ||
983 | return 0; | ||
984 | |||
985 | return (dp->parent == NULL); | ||
986 | } | ||
987 | |||
988 | /* The following routines deal with the black magic of fully naming a | ||
989 | * node. | ||
990 | * | ||
991 | * Certain well known named nodes are just the simple name string. | ||
992 | * | ||
993 | * Actual devices have an address specifier appended to the base name | ||
994 | * string, like this "foo@addr". The "addr" can be in any number of | ||
995 | * formats, and the platform plus the type of the node determine the | ||
996 | * format and how it is constructed. | ||
997 | * | ||
998 | * For children of the ROOT node, the naming convention is fixed and | ||
999 | * determined by whether this is a sun4u or sun4v system. | ||
1000 | * | ||
1001 | * For children of other nodes, it is bus type specific. So | ||
1002 | * we walk up the tree until we discover a "device_type" property | ||
1003 | * we recognize and we go from there. | ||
1004 | * | ||
1005 | * As an example, the boot device on my workstation has a full path: | ||
1006 | * | ||
1007 | * /pci@1e,600000/ide@d/disk@0,0:c | ||
1008 | */ | ||
1009 | static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf) | ||
1010 | { | ||
1011 | struct linux_prom64_registers *regs; | ||
1012 | struct property *rprop; | ||
1013 | u32 high_bits, low_bits, type; | ||
1014 | |||
1015 | rprop = of_find_property(dp, "reg", NULL); | ||
1016 | if (!rprop) | ||
1017 | return; | ||
1018 | |||
1019 | regs = rprop->value; | ||
1020 | if (!is_root_node(dp->parent)) { | ||
1021 | sprintf(tmp_buf, "%s@%x,%x", | ||
1022 | dp->name, | ||
1023 | (unsigned int) (regs->phys_addr >> 32UL), | ||
1024 | (unsigned int) (regs->phys_addr & 0xffffffffUL)); | ||
1025 | return; | ||
1026 | } | ||
1027 | |||
1028 | type = regs->phys_addr >> 60UL; | ||
1029 | high_bits = (regs->phys_addr >> 32UL) & 0x0fffffffUL; | ||
1030 | low_bits = (regs->phys_addr & 0xffffffffUL); | ||
1031 | |||
1032 | if (type == 0 || type == 8) { | ||
1033 | const char *prefix = (type == 0) ? "m" : "i"; | ||
1034 | |||
1035 | if (low_bits) | ||
1036 | sprintf(tmp_buf, "%s@%s%x,%x", | ||
1037 | dp->name, prefix, | ||
1038 | high_bits, low_bits); | ||
1039 | else | ||
1040 | sprintf(tmp_buf, "%s@%s%x", | ||
1041 | dp->name, | ||
1042 | prefix, | ||
1043 | high_bits); | ||
1044 | } else if (type == 12) { | ||
1045 | sprintf(tmp_buf, "%s@%x", | ||
1046 | dp->name, high_bits); | ||
1047 | } | ||
1048 | } | ||
1049 | |||
1050 | static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf) | ||
1051 | { | ||
1052 | struct linux_prom64_registers *regs; | ||
1053 | struct property *prop; | ||
1054 | |||
1055 | prop = of_find_property(dp, "reg", NULL); | ||
1056 | if (!prop) | ||
1057 | return; | ||
1058 | |||
1059 | regs = prop->value; | ||
1060 | if (!is_root_node(dp->parent)) { | ||
1061 | sprintf(tmp_buf, "%s@%x,%x", | ||
1062 | dp->name, | ||
1063 | (unsigned int) (regs->phys_addr >> 32UL), | ||
1064 | (unsigned int) (regs->phys_addr & 0xffffffffUL)); | ||
1065 | return; | ||
1066 | } | ||
1067 | |||
1068 | prop = of_find_property(dp, "upa-portid", NULL); | ||
1069 | if (!prop) | ||
1070 | prop = of_find_property(dp, "portid", NULL); | ||
1071 | if (prop) { | ||
1072 | unsigned long mask = 0xffffffffUL; | ||
1073 | |||
1074 | if (tlb_type >= cheetah) | ||
1075 | mask = 0x7fffff; | ||
1076 | |||
1077 | sprintf(tmp_buf, "%s@%x,%x", | ||
1078 | dp->name, | ||
1079 | *(u32 *)prop->value, | ||
1080 | (unsigned int) (regs->phys_addr & mask)); | ||
1081 | } | ||
1082 | } | ||
1083 | |||
1084 | /* "name@slot,offset" */ | ||
1085 | static void __init sbus_path_component(struct device_node *dp, char *tmp_buf) | ||
1086 | { | ||
1087 | struct linux_prom_registers *regs; | ||
1088 | struct property *prop; | ||
1089 | |||
1090 | prop = of_find_property(dp, "reg", NULL); | ||
1091 | if (!prop) | ||
1092 | return; | ||
1093 | |||
1094 | regs = prop->value; | ||
1095 | sprintf(tmp_buf, "%s@%x,%x", | ||
1096 | dp->name, | ||
1097 | regs->which_io, | ||
1098 | regs->phys_addr); | ||
1099 | } | ||
1100 | |||
1101 | /* "name@devnum[,func]" */ | ||
1102 | static void __init pci_path_component(struct device_node *dp, char *tmp_buf) | ||
1103 | { | ||
1104 | struct linux_prom_pci_registers *regs; | ||
1105 | struct property *prop; | ||
1106 | unsigned int devfn; | ||
1107 | |||
1108 | prop = of_find_property(dp, "reg", NULL); | ||
1109 | if (!prop) | ||
1110 | return; | ||
1111 | |||
1112 | regs = prop->value; | ||
1113 | devfn = (regs->phys_hi >> 8) & 0xff; | ||
1114 | if (devfn & 0x07) { | ||
1115 | sprintf(tmp_buf, "%s@%x,%x", | ||
1116 | dp->name, | ||
1117 | devfn >> 3, | ||
1118 | devfn & 0x07); | ||
1119 | } else { | ||
1120 | sprintf(tmp_buf, "%s@%x", | ||
1121 | dp->name, | ||
1122 | devfn >> 3); | ||
1123 | } | ||
1124 | } | ||
1125 | |||
1126 | /* "name@UPA_PORTID,offset" */ | ||
1127 | static void __init upa_path_component(struct device_node *dp, char *tmp_buf) | ||
1128 | { | ||
1129 | struct linux_prom64_registers *regs; | ||
1130 | struct property *prop; | ||
1131 | |||
1132 | prop = of_find_property(dp, "reg", NULL); | ||
1133 | if (!prop) | ||
1134 | return; | ||
1135 | |||
1136 | regs = prop->value; | ||
1137 | |||
1138 | prop = of_find_property(dp, "upa-portid", NULL); | ||
1139 | if (!prop) | ||
1140 | return; | ||
1141 | |||
1142 | sprintf(tmp_buf, "%s@%x,%x", | ||
1143 | dp->name, | ||
1144 | *(u32 *) prop->value, | ||
1145 | (unsigned int) (regs->phys_addr & 0xffffffffUL)); | ||
1146 | } | ||
1147 | |||
1148 | /* "name@reg" */ | ||
1149 | static void __init vdev_path_component(struct device_node *dp, char *tmp_buf) | ||
1150 | { | ||
1151 | struct property *prop; | ||
1152 | u32 *regs; | ||
1153 | |||
1154 | prop = of_find_property(dp, "reg", NULL); | ||
1155 | if (!prop) | ||
1156 | return; | ||
1157 | |||
1158 | regs = prop->value; | ||
1159 | |||
1160 | sprintf(tmp_buf, "%s@%x", dp->name, *regs); | ||
1161 | } | ||
1162 | |||
1163 | /* "name@addrhi,addrlo" */ | ||
1164 | static void __init ebus_path_component(struct device_node *dp, char *tmp_buf) | ||
1165 | { | ||
1166 | struct linux_prom64_registers *regs; | ||
1167 | struct property *prop; | ||
1168 | |||
1169 | prop = of_find_property(dp, "reg", NULL); | ||
1170 | if (!prop) | ||
1171 | return; | ||
1172 | |||
1173 | regs = prop->value; | ||
1174 | |||
1175 | sprintf(tmp_buf, "%s@%x,%x", | ||
1176 | dp->name, | ||
1177 | (unsigned int) (regs->phys_addr >> 32UL), | ||
1178 | (unsigned int) (regs->phys_addr & 0xffffffffUL)); | ||
1179 | } | ||
1180 | |||
1181 | /* "name@bus,addr" */ | ||
1182 | static void __init i2c_path_component(struct device_node *dp, char *tmp_buf) | ||
1183 | { | ||
1184 | struct property *prop; | ||
1185 | u32 *regs; | ||
1186 | |||
1187 | prop = of_find_property(dp, "reg", NULL); | ||
1188 | if (!prop) | ||
1189 | return; | ||
1190 | |||
1191 | regs = prop->value; | ||
1192 | |||
1193 | /* This actually isn't right... should look at the #address-cells | ||
1194 | * property of the i2c bus node etc. etc. | ||
1195 | */ | ||
1196 | sprintf(tmp_buf, "%s@%x,%x", | ||
1197 | dp->name, regs[0], regs[1]); | ||
1198 | } | ||
1199 | |||
1200 | /* "name@reg0[,reg1]" */ | ||
1201 | static void __init usb_path_component(struct device_node *dp, char *tmp_buf) | ||
1202 | { | ||
1203 | struct property *prop; | ||
1204 | u32 *regs; | ||
1205 | |||
1206 | prop = of_find_property(dp, "reg", NULL); | ||
1207 | if (!prop) | ||
1208 | return; | ||
1209 | |||
1210 | regs = prop->value; | ||
1211 | |||
1212 | if (prop->length == sizeof(u32) || regs[1] == 1) { | ||
1213 | sprintf(tmp_buf, "%s@%x", | ||
1214 | dp->name, regs[0]); | ||
1215 | } else { | ||
1216 | sprintf(tmp_buf, "%s@%x,%x", | ||
1217 | dp->name, regs[0], regs[1]); | ||
1218 | } | ||
1219 | } | ||
1220 | |||
1221 | /* "name@reg0reg1[,reg2reg3]" */ | ||
1222 | static void __init ieee1394_path_component(struct device_node *dp, char *tmp_buf) | ||
1223 | { | ||
1224 | struct property *prop; | ||
1225 | u32 *regs; | ||
1226 | |||
1227 | prop = of_find_property(dp, "reg", NULL); | ||
1228 | if (!prop) | ||
1229 | return; | ||
1230 | |||
1231 | regs = prop->value; | ||
1232 | |||
1233 | if (regs[2] || regs[3]) { | ||
1234 | sprintf(tmp_buf, "%s@%08x%08x,%04x%08x", | ||
1235 | dp->name, regs[0], regs[1], regs[2], regs[3]); | ||
1236 | } else { | ||
1237 | sprintf(tmp_buf, "%s@%08x%08x", | ||
1238 | dp->name, regs[0], regs[1]); | ||
1239 | } | ||
1240 | } | ||
1241 | |||
1242 | static void __init __build_path_component(struct device_node *dp, char *tmp_buf) | ||
1243 | { | ||
1244 | struct device_node *parent = dp->parent; | ||
1245 | |||
1246 | if (parent != NULL) { | ||
1247 | if (!strcmp(parent->type, "pci") || | ||
1248 | !strcmp(parent->type, "pciex")) { | ||
1249 | pci_path_component(dp, tmp_buf); | ||
1250 | return; | ||
1251 | } | ||
1252 | if (!strcmp(parent->type, "sbus")) { | ||
1253 | sbus_path_component(dp, tmp_buf); | ||
1254 | return; | ||
1255 | } | ||
1256 | if (!strcmp(parent->type, "upa")) { | ||
1257 | upa_path_component(dp, tmp_buf); | ||
1258 | return; | ||
1259 | } | ||
1260 | if (!strcmp(parent->type, "ebus")) { | ||
1261 | ebus_path_component(dp, tmp_buf); | ||
1262 | return; | ||
1263 | } | ||
1264 | if (!strcmp(parent->name, "usb") || | ||
1265 | !strcmp(parent->name, "hub")) { | ||
1266 | usb_path_component(dp, tmp_buf); | ||
1267 | return; | ||
1268 | } | ||
1269 | if (!strcmp(parent->type, "i2c")) { | ||
1270 | i2c_path_component(dp, tmp_buf); | ||
1271 | return; | ||
1272 | } | ||
1273 | if (!strcmp(parent->type, "firewire")) { | ||
1274 | ieee1394_path_component(dp, tmp_buf); | ||
1275 | return; | ||
1276 | } | ||
1277 | if (!strcmp(parent->type, "virtual-devices")) { | ||
1278 | vdev_path_component(dp, tmp_buf); | ||
1279 | return; | ||
1280 | } | ||
1281 | /* "isa" is handled with platform naming */ | ||
1282 | } | ||
1283 | |||
1284 | /* Use platform naming convention. */ | ||
1285 | if (tlb_type == hypervisor) { | ||
1286 | sun4v_path_component(dp, tmp_buf); | ||
1287 | return; | ||
1288 | } else { | ||
1289 | sun4u_path_component(dp, tmp_buf); | ||
1290 | } | ||
1291 | } | ||
1292 | |||
1293 | static char * __init build_path_component(struct device_node *dp) | ||
1294 | { | ||
1295 | char tmp_buf[64], *n; | ||
1296 | |||
1297 | tmp_buf[0] = '\0'; | ||
1298 | __build_path_component(dp, tmp_buf); | ||
1299 | if (tmp_buf[0] == '\0') | ||
1300 | strcpy(tmp_buf, dp->name); | ||
1301 | |||
1302 | n = prom_early_alloc(strlen(tmp_buf) + 1); | ||
1303 | strcpy(n, tmp_buf); | ||
1304 | |||
1305 | return n; | ||
1306 | } | ||
1307 | |||
1308 | static char * __init build_full_name(struct device_node *dp) | ||
1309 | { | ||
1310 | int len, ourlen, plen; | ||
1311 | char *n; | ||
1312 | |||
1313 | plen = strlen(dp->parent->full_name); | ||
1314 | ourlen = strlen(dp->path_component_name); | ||
1315 | len = ourlen + plen + 2; | ||
1316 | |||
1317 | n = prom_early_alloc(len); | ||
1318 | strcpy(n, dp->parent->full_name); | ||
1319 | if (!is_root_node(dp->parent)) { | ||
1320 | strcpy(n + plen, "/"); | ||
1321 | plen++; | ||
1322 | } | ||
1323 | strcpy(n + plen, dp->path_component_name); | ||
1324 | |||
1325 | return n; | ||
1326 | } | ||
1327 | |||
1328 | static unsigned int unique_id; | ||
1329 | |||
1330 | static struct property * __init build_one_prop(phandle node, char *prev, char *special_name, void *special_val, int special_len) | ||
1331 | { | ||
1332 | static struct property *tmp = NULL; | ||
1333 | struct property *p; | ||
1334 | |||
1335 | if (tmp) { | ||
1336 | p = tmp; | ||
1337 | memset(p, 0, sizeof(*p) + 32); | ||
1338 | tmp = NULL; | ||
1339 | } else { | ||
1340 | p = prom_early_alloc(sizeof(struct property) + 32); | ||
1341 | p->unique_id = unique_id++; | ||
1342 | } | ||
1343 | |||
1344 | p->name = (char *) (p + 1); | ||
1345 | if (special_name) { | ||
1346 | strcpy(p->name, special_name); | ||
1347 | p->length = special_len; | ||
1348 | p->value = prom_early_alloc(special_len); | ||
1349 | memcpy(p->value, special_val, special_len); | ||
1350 | } else { | ||
1351 | if (prev == NULL) { | ||
1352 | prom_firstprop(node, p->name); | ||
1353 | } else { | ||
1354 | prom_nextprop(node, prev, p->name); | ||
1355 | } | ||
1356 | if (strlen(p->name) == 0) { | ||
1357 | tmp = p; | ||
1358 | return NULL; | ||
1359 | } | ||
1360 | p->length = prom_getproplen(node, p->name); | ||
1361 | if (p->length <= 0) { | ||
1362 | p->length = 0; | ||
1363 | } else { | ||
1364 | p->value = prom_early_alloc(p->length + 1); | ||
1365 | prom_getproperty(node, p->name, p->value, p->length); | ||
1366 | ((unsigned char *)p->value)[p->length] = '\0'; | ||
1367 | } | ||
1368 | } | ||
1369 | return p; | ||
1370 | } | ||
1371 | |||
1372 | static struct property * __init build_prop_list(phandle node) | ||
1373 | { | ||
1374 | struct property *head, *tail; | ||
1375 | |||
1376 | head = tail = build_one_prop(node, NULL, | ||
1377 | ".node", &node, sizeof(node)); | ||
1378 | |||
1379 | tail->next = build_one_prop(node, NULL, NULL, NULL, 0); | ||
1380 | tail = tail->next; | ||
1381 | while(tail) { | ||
1382 | tail->next = build_one_prop(node, tail->name, | ||
1383 | NULL, NULL, 0); | ||
1384 | tail = tail->next; | ||
1385 | } | ||
1386 | |||
1387 | return head; | ||
1388 | } | ||
1389 | |||
1390 | static char * __init get_one_property(phandle node, const char *name) | ||
1391 | { | ||
1392 | char *buf = "<NULL>"; | ||
1393 | int len; | ||
1394 | |||
1395 | len = prom_getproplen(node, name); | ||
1396 | if (len > 0) { | ||
1397 | buf = prom_early_alloc(len); | ||
1398 | prom_getproperty(node, name, buf, len); | ||
1399 | } | ||
1400 | |||
1401 | return buf; | ||
1402 | } | ||
1403 | |||
1404 | static struct device_node * __init create_node(phandle node, struct device_node *parent) | ||
1405 | { | ||
1406 | struct device_node *dp; | ||
1407 | |||
1408 | if (!node) | ||
1409 | return NULL; | ||
1410 | |||
1411 | dp = prom_early_alloc(sizeof(*dp)); | ||
1412 | dp->unique_id = unique_id++; | ||
1413 | dp->parent = parent; | ||
1414 | |||
1415 | kref_init(&dp->kref); | ||
1416 | |||
1417 | dp->name = get_one_property(node, "name"); | ||
1418 | dp->type = get_one_property(node, "device_type"); | ||
1419 | dp->node = node; | ||
1420 | |||
1421 | dp->properties = build_prop_list(node); | ||
1422 | |||
1423 | irq_trans_init(dp); | ||
1424 | |||
1425 | return dp; | ||
1426 | } | ||
1427 | |||
1428 | static struct device_node * __init build_tree(struct device_node *parent, phandle node, struct device_node ***nextp) | ||
1429 | { | ||
1430 | struct device_node *ret = NULL, *prev_sibling = NULL; | ||
1431 | struct device_node *dp; | ||
1432 | |||
1433 | while (1) { | ||
1434 | dp = create_node(node, parent); | ||
1435 | if (!dp) | ||
1436 | break; | ||
1437 | |||
1438 | if (prev_sibling) | ||
1439 | prev_sibling->sibling = dp; | ||
1440 | |||
1441 | if (!ret) | ||
1442 | ret = dp; | ||
1443 | prev_sibling = dp; | ||
1444 | |||
1445 | *(*nextp) = dp; | ||
1446 | *nextp = &dp->allnext; | ||
1447 | |||
1448 | dp->path_component_name = build_path_component(dp); | ||
1449 | dp->full_name = build_full_name(dp); | ||
1450 | |||
1451 | dp->child = build_tree(dp, prom_getchild(node), nextp); | ||
1452 | |||
1453 | node = prom_getsibling(node); | ||
1454 | } | ||
1455 | |||
1456 | return ret; | ||
1457 | } | ||
1458 | |||
1459 | static const char *get_mid_prop(void) | ||
1460 | { | ||
1461 | return (tlb_type == spitfire ? "upa-portid" : "portid"); | ||
1462 | } | ||
1463 | |||
1464 | struct device_node *of_find_node_by_cpuid(int cpuid) | ||
1465 | { | ||
1466 | struct device_node *dp; | ||
1467 | const char *mid_prop = get_mid_prop(); | ||
1468 | |||
1469 | for_each_node_by_type(dp, "cpu") { | ||
1470 | int id = of_getintprop_default(dp, mid_prop, -1); | ||
1471 | const char *this_mid_prop = mid_prop; | ||
1472 | |||
1473 | if (id < 0) { | ||
1474 | this_mid_prop = "cpuid"; | ||
1475 | id = of_getintprop_default(dp, this_mid_prop, -1); | ||
1476 | } | ||
1477 | |||
1478 | if (id < 0) { | ||
1479 | prom_printf("OF: Serious problem, cpu lacks " | ||
1480 | "%s property", this_mid_prop); | ||
1481 | prom_halt(); | ||
1482 | } | ||
1483 | if (cpuid == id) | ||
1484 | return dp; | ||
1485 | } | ||
1486 | return NULL; | ||
1487 | } | ||
1488 | |||
1489 | static void __init of_fill_in_cpu_data(void) | ||
1490 | { | ||
1491 | struct device_node *dp; | ||
1492 | const char *mid_prop = get_mid_prop(); | ||
1493 | |||
1494 | ncpus_probed = 0; | ||
1495 | for_each_node_by_type(dp, "cpu") { | ||
1496 | int cpuid = of_getintprop_default(dp, mid_prop, -1); | ||
1497 | const char *this_mid_prop = mid_prop; | ||
1498 | struct device_node *portid_parent; | ||
1499 | int portid = -1; | ||
1500 | |||
1501 | portid_parent = NULL; | ||
1502 | if (cpuid < 0) { | ||
1503 | this_mid_prop = "cpuid"; | ||
1504 | cpuid = of_getintprop_default(dp, this_mid_prop, -1); | ||
1505 | if (cpuid >= 0) { | ||
1506 | int limit = 2; | ||
1507 | |||
1508 | portid_parent = dp; | ||
1509 | while (limit--) { | ||
1510 | portid_parent = portid_parent->parent; | ||
1511 | if (!portid_parent) | ||
1512 | break; | ||
1513 | portid = of_getintprop_default(portid_parent, | ||
1514 | "portid", -1); | ||
1515 | if (portid >= 0) | ||
1516 | break; | ||
1517 | } | ||
1518 | } | ||
1519 | } | ||
1520 | |||
1521 | if (cpuid < 0) { | ||
1522 | prom_printf("OF: Serious problem, cpu lacks " | ||
1523 | "%s property", this_mid_prop); | ||
1524 | prom_halt(); | ||
1525 | } | ||
1526 | |||
1527 | ncpus_probed++; | ||
1528 | |||
1529 | #ifdef CONFIG_SMP | ||
1530 | if (cpuid >= NR_CPUS) { | ||
1531 | printk(KERN_WARNING "Ignoring CPU %d which is " | ||
1532 | ">= NR_CPUS (%d)\n", | ||
1533 | cpuid, NR_CPUS); | ||
1534 | continue; | ||
1535 | } | ||
1536 | #else | ||
1537 | /* On uniprocessor we only want the values for the | ||
1538 | * real physical cpu the kernel booted onto, however | ||
1539 | * cpu_data() only has one entry at index 0. | ||
1540 | */ | ||
1541 | if (cpuid != real_hard_smp_processor_id()) | ||
1542 | continue; | ||
1543 | cpuid = 0; | ||
1544 | #endif | ||
1545 | |||
1546 | cpu_data(cpuid).clock_tick = | ||
1547 | of_getintprop_default(dp, "clock-frequency", 0); | ||
1548 | |||
1549 | if (portid_parent) { | ||
1550 | cpu_data(cpuid).dcache_size = | ||
1551 | of_getintprop_default(dp, "l1-dcache-size", | ||
1552 | 16 * 1024); | ||
1553 | cpu_data(cpuid).dcache_line_size = | ||
1554 | of_getintprop_default(dp, "l1-dcache-line-size", | ||
1555 | 32); | ||
1556 | cpu_data(cpuid).icache_size = | ||
1557 | of_getintprop_default(dp, "l1-icache-size", | ||
1558 | 8 * 1024); | ||
1559 | cpu_data(cpuid).icache_line_size = | ||
1560 | of_getintprop_default(dp, "l1-icache-line-size", | ||
1561 | 32); | ||
1562 | cpu_data(cpuid).ecache_size = | ||
1563 | of_getintprop_default(dp, "l2-cache-size", 0); | ||
1564 | cpu_data(cpuid).ecache_line_size = | ||
1565 | of_getintprop_default(dp, "l2-cache-line-size", 0); | ||
1566 | if (!cpu_data(cpuid).ecache_size || | ||
1567 | !cpu_data(cpuid).ecache_line_size) { | ||
1568 | cpu_data(cpuid).ecache_size = | ||
1569 | of_getintprop_default(portid_parent, | ||
1570 | "l2-cache-size", | ||
1571 | (4 * 1024 * 1024)); | ||
1572 | cpu_data(cpuid).ecache_line_size = | ||
1573 | of_getintprop_default(portid_parent, | ||
1574 | "l2-cache-line-size", 64); | ||
1575 | } | ||
1576 | |||
1577 | cpu_data(cpuid).core_id = portid + 1; | ||
1578 | cpu_data(cpuid).proc_id = portid; | ||
1579 | #ifdef CONFIG_SMP | ||
1580 | sparc64_multi_core = 1; | ||
1581 | #endif | ||
1582 | } else { | ||
1583 | cpu_data(cpuid).dcache_size = | ||
1584 | of_getintprop_default(dp, "dcache-size", 16 * 1024); | ||
1585 | cpu_data(cpuid).dcache_line_size = | ||
1586 | of_getintprop_default(dp, "dcache-line-size", 32); | ||
1587 | |||
1588 | cpu_data(cpuid).icache_size = | ||
1589 | of_getintprop_default(dp, "icache-size", 16 * 1024); | ||
1590 | cpu_data(cpuid).icache_line_size = | ||
1591 | of_getintprop_default(dp, "icache-line-size", 32); | ||
1592 | |||
1593 | cpu_data(cpuid).ecache_size = | ||
1594 | of_getintprop_default(dp, "ecache-size", | ||
1595 | (4 * 1024 * 1024)); | ||
1596 | cpu_data(cpuid).ecache_line_size = | ||
1597 | of_getintprop_default(dp, "ecache-line-size", 64); | ||
1598 | |||
1599 | cpu_data(cpuid).core_id = 0; | ||
1600 | cpu_data(cpuid).proc_id = -1; | ||
1601 | } | ||
1602 | |||
1603 | #ifdef CONFIG_SMP | ||
1604 | cpu_set(cpuid, cpu_present_map); | ||
1605 | cpu_set(cpuid, cpu_possible_map); | ||
1606 | #endif | ||
1607 | } | ||
1608 | |||
1609 | smp_fill_in_sib_core_maps(); | ||
1610 | } | ||
1611 | |||
1612 | struct device_node *of_console_device; | ||
1613 | EXPORT_SYMBOL(of_console_device); | ||
1614 | |||
1615 | char *of_console_path; | ||
1616 | EXPORT_SYMBOL(of_console_path); | ||
1617 | |||
1618 | char *of_console_options; | ||
1619 | EXPORT_SYMBOL(of_console_options); | ||
1620 | |||
1621 | static void __init of_console_init(void) | ||
1622 | { | ||
1623 | char *msg = "OF stdout device is: %s\n"; | ||
1624 | struct device_node *dp; | ||
1625 | const char *type; | ||
1626 | phandle node; | ||
1627 | |||
1628 | of_console_path = prom_early_alloc(256); | ||
1629 | if (prom_ihandle2path(prom_stdout, of_console_path, 256) < 0) { | ||
1630 | prom_printf("Cannot obtain path of stdout.\n"); | ||
1631 | prom_halt(); | ||
1632 | } | ||
1633 | of_console_options = strrchr(of_console_path, ':'); | ||
1634 | if (of_console_options) { | ||
1635 | of_console_options++; | ||
1636 | if (*of_console_options == '\0') | ||
1637 | of_console_options = NULL; | ||
1638 | } | ||
1639 | |||
1640 | node = prom_inst2pkg(prom_stdout); | ||
1641 | if (!node) { | ||
1642 | prom_printf("Cannot resolve stdout node from " | ||
1643 | "instance %08x.\n", prom_stdout); | ||
1644 | prom_halt(); | ||
1645 | } | ||
1646 | |||
1647 | dp = of_find_node_by_phandle(node); | ||
1648 | type = of_get_property(dp, "device_type", NULL); | ||
1649 | if (!type) { | ||
1650 | prom_printf("Console stdout lacks device_type property.\n"); | ||
1651 | prom_halt(); | ||
1652 | } | ||
1653 | |||
1654 | if (strcmp(type, "display") && strcmp(type, "serial")) { | ||
1655 | prom_printf("Console device_type is neither display " | ||
1656 | "nor serial.\n"); | ||
1657 | prom_halt(); | ||
1658 | } | ||
1659 | |||
1660 | of_console_device = dp; | ||
1661 | |||
1662 | printk(msg, of_console_path); | ||
1663 | } | ||
1664 | |||
1665 | void __init prom_build_devicetree(void) | ||
1666 | { | ||
1667 | struct device_node **nextp; | ||
1668 | |||
1669 | allnodes = create_node(prom_root_node, NULL); | ||
1670 | allnodes->path_component_name = ""; | ||
1671 | allnodes->full_name = "/"; | ||
1672 | |||
1673 | nextp = &allnodes->allnext; | ||
1674 | allnodes->child = build_tree(allnodes, | ||
1675 | prom_getchild(allnodes->node), | ||
1676 | &nextp); | ||
1677 | of_console_init(); | ||
1678 | |||
1679 | printk("PROM: Built device tree with %u bytes of memory.\n", | ||
1680 | prom_early_allocated); | ||
1681 | |||
1682 | if (tlb_type != hypervisor) | ||
1683 | of_fill_in_cpu_data(); | ||
1684 | } | ||
diff --git a/arch/sparc/kernel/psycho_common.c b/arch/sparc/kernel/psycho_common.c new file mode 100644 index 000000000000..790996428c14 --- /dev/null +++ b/arch/sparc/kernel/psycho_common.c | |||
@@ -0,0 +1,470 @@ | |||
1 | /* psycho_common.c: Code common to PSYCHO and derivative PCI controllers. | ||
2 | * | ||
3 | * Copyright (C) 2008 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/interrupt.h> | ||
7 | |||
8 | #include <asm/upa.h> | ||
9 | |||
10 | #include "pci_impl.h" | ||
11 | #include "iommu_common.h" | ||
12 | #include "psycho_common.h" | ||
13 | |||
14 | #define PSYCHO_STRBUF_CTRL_DENAB 0x0000000000000002UL | ||
15 | #define PSYCHO_STCERR_WRITE 0x0000000000000002UL | ||
16 | #define PSYCHO_STCERR_READ 0x0000000000000001UL | ||
17 | #define PSYCHO_STCTAG_PPN 0x0fffffff00000000UL | ||
18 | #define PSYCHO_STCTAG_VPN 0x00000000ffffe000UL | ||
19 | #define PSYCHO_STCTAG_VALID 0x0000000000000002UL | ||
20 | #define PSYCHO_STCTAG_WRITE 0x0000000000000001UL | ||
21 | #define PSYCHO_STCLINE_LINDX 0x0000000001e00000UL | ||
22 | #define PSYCHO_STCLINE_SPTR 0x00000000001f8000UL | ||
23 | #define PSYCHO_STCLINE_LADDR 0x0000000000007f00UL | ||
24 | #define PSYCHO_STCLINE_EPTR 0x00000000000000fcUL | ||
25 | #define PSYCHO_STCLINE_VALID 0x0000000000000002UL | ||
26 | #define PSYCHO_STCLINE_FOFN 0x0000000000000001UL | ||
27 | |||
28 | static DEFINE_SPINLOCK(stc_buf_lock); | ||
29 | static unsigned long stc_error_buf[128]; | ||
30 | static unsigned long stc_tag_buf[16]; | ||
31 | static unsigned long stc_line_buf[16]; | ||
32 | |||
33 | static void psycho_check_stc_error(struct pci_pbm_info *pbm) | ||
34 | { | ||
35 | unsigned long err_base, tag_base, line_base; | ||
36 | struct strbuf *strbuf = &pbm->stc; | ||
37 | u64 control; | ||
38 | int i; | ||
39 | |||
40 | if (!strbuf->strbuf_control) | ||
41 | return; | ||
42 | |||
43 | err_base = strbuf->strbuf_err_stat; | ||
44 | tag_base = strbuf->strbuf_tag_diag; | ||
45 | line_base = strbuf->strbuf_line_diag; | ||
46 | |||
47 | spin_lock(&stc_buf_lock); | ||
48 | |||
49 | /* This is __REALLY__ dangerous. When we put the streaming | ||
50 | * buffer into diagnostic mode to probe it's tags and error | ||
51 | * status, we _must_ clear all of the line tag valid bits | ||
52 | * before re-enabling the streaming buffer. If any dirty data | ||
53 | * lives in the STC when we do this, we will end up | ||
54 | * invalidating it before it has a chance to reach main | ||
55 | * memory. | ||
56 | */ | ||
57 | control = upa_readq(strbuf->strbuf_control); | ||
58 | upa_writeq(control | PSYCHO_STRBUF_CTRL_DENAB, strbuf->strbuf_control); | ||
59 | for (i = 0; i < 128; i++) { | ||
60 | u64 val; | ||
61 | |||
62 | val = upa_readq(err_base + (i * 8UL)); | ||
63 | upa_writeq(0UL, err_base + (i * 8UL)); | ||
64 | stc_error_buf[i] = val; | ||
65 | } | ||
66 | for (i = 0; i < 16; i++) { | ||
67 | stc_tag_buf[i] = upa_readq(tag_base + (i * 8UL)); | ||
68 | stc_line_buf[i] = upa_readq(line_base + (i * 8UL)); | ||
69 | upa_writeq(0UL, tag_base + (i * 8UL)); | ||
70 | upa_writeq(0UL, line_base + (i * 8UL)); | ||
71 | } | ||
72 | |||
73 | /* OK, state is logged, exit diagnostic mode. */ | ||
74 | upa_writeq(control, strbuf->strbuf_control); | ||
75 | |||
76 | for (i = 0; i < 16; i++) { | ||
77 | int j, saw_error, first, last; | ||
78 | |||
79 | saw_error = 0; | ||
80 | first = i * 8; | ||
81 | last = first + 8; | ||
82 | for (j = first; j < last; j++) { | ||
83 | u64 errval = stc_error_buf[j]; | ||
84 | if (errval != 0) { | ||
85 | saw_error++; | ||
86 | printk(KERN_ERR "%s: STC_ERR(%d)[wr(%d)" | ||
87 | "rd(%d)]\n", | ||
88 | pbm->name, | ||
89 | j, | ||
90 | (errval & PSYCHO_STCERR_WRITE) ? 1 : 0, | ||
91 | (errval & PSYCHO_STCERR_READ) ? 1 : 0); | ||
92 | } | ||
93 | } | ||
94 | if (saw_error != 0) { | ||
95 | u64 tagval = stc_tag_buf[i]; | ||
96 | u64 lineval = stc_line_buf[i]; | ||
97 | printk(KERN_ERR "%s: STC_TAG(%d)[PA(%016lx)VA(%08lx)" | ||
98 | "V(%d)W(%d)]\n", | ||
99 | pbm->name, | ||
100 | i, | ||
101 | ((tagval & PSYCHO_STCTAG_PPN) >> 19UL), | ||
102 | (tagval & PSYCHO_STCTAG_VPN), | ||
103 | ((tagval & PSYCHO_STCTAG_VALID) ? 1 : 0), | ||
104 | ((tagval & PSYCHO_STCTAG_WRITE) ? 1 : 0)); | ||
105 | printk(KERN_ERR "%s: STC_LINE(%d)[LIDX(%lx)SP(%lx)" | ||
106 | "LADDR(%lx)EP(%lx)V(%d)FOFN(%d)]\n", | ||
107 | pbm->name, | ||
108 | i, | ||
109 | ((lineval & PSYCHO_STCLINE_LINDX) >> 21UL), | ||
110 | ((lineval & PSYCHO_STCLINE_SPTR) >> 15UL), | ||
111 | ((lineval & PSYCHO_STCLINE_LADDR) >> 8UL), | ||
112 | ((lineval & PSYCHO_STCLINE_EPTR) >> 2UL), | ||
113 | ((lineval & PSYCHO_STCLINE_VALID) ? 1 : 0), | ||
114 | ((lineval & PSYCHO_STCLINE_FOFN) ? 1 : 0)); | ||
115 | } | ||
116 | } | ||
117 | |||
118 | spin_unlock(&stc_buf_lock); | ||
119 | } | ||
120 | |||
121 | #define PSYCHO_IOMMU_TAG 0xa580UL | ||
122 | #define PSYCHO_IOMMU_DATA 0xa600UL | ||
123 | |||
124 | static void psycho_record_iommu_tags_and_data(struct pci_pbm_info *pbm, | ||
125 | u64 *tag, u64 *data) | ||
126 | { | ||
127 | int i; | ||
128 | |||
129 | for (i = 0; i < 16; i++) { | ||
130 | unsigned long base = pbm->controller_regs; | ||
131 | unsigned long off = i * 8UL; | ||
132 | |||
133 | tag[i] = upa_readq(base + PSYCHO_IOMMU_TAG+off); | ||
134 | data[i] = upa_readq(base + PSYCHO_IOMMU_DATA+off); | ||
135 | |||
136 | /* Now clear out the entry. */ | ||
137 | upa_writeq(0, base + PSYCHO_IOMMU_TAG + off); | ||
138 | upa_writeq(0, base + PSYCHO_IOMMU_DATA + off); | ||
139 | } | ||
140 | } | ||
141 | |||
142 | #define PSYCHO_IOMMU_TAG_ERRSTS (0x3UL << 23UL) | ||
143 | #define PSYCHO_IOMMU_TAG_ERR (0x1UL << 22UL) | ||
144 | #define PSYCHO_IOMMU_TAG_WRITE (0x1UL << 21UL) | ||
145 | #define PSYCHO_IOMMU_TAG_STREAM (0x1UL << 20UL) | ||
146 | #define PSYCHO_IOMMU_TAG_SIZE (0x1UL << 19UL) | ||
147 | #define PSYCHO_IOMMU_TAG_VPAGE 0x7ffffUL | ||
148 | #define PSYCHO_IOMMU_DATA_VALID (1UL << 30UL) | ||
149 | #define PSYCHO_IOMMU_DATA_CACHE (1UL << 28UL) | ||
150 | #define PSYCHO_IOMMU_DATA_PPAGE 0xfffffffUL | ||
151 | |||
152 | static void psycho_dump_iommu_tags_and_data(struct pci_pbm_info *pbm, | ||
153 | u64 *tag, u64 *data) | ||
154 | { | ||
155 | int i; | ||
156 | |||
157 | for (i = 0; i < 16; i++) { | ||
158 | u64 tag_val, data_val; | ||
159 | const char *type_str; | ||
160 | tag_val = tag[i]; | ||
161 | if (!(tag_val & PSYCHO_IOMMU_TAG_ERR)) | ||
162 | continue; | ||
163 | |||
164 | data_val = data[i]; | ||
165 | switch((tag_val & PSYCHO_IOMMU_TAG_ERRSTS) >> 23UL) { | ||
166 | case 0: | ||
167 | type_str = "Protection Error"; | ||
168 | break; | ||
169 | case 1: | ||
170 | type_str = "Invalid Error"; | ||
171 | break; | ||
172 | case 2: | ||
173 | type_str = "TimeOut Error"; | ||
174 | break; | ||
175 | case 3: | ||
176 | default: | ||
177 | type_str = "ECC Error"; | ||
178 | break; | ||
179 | } | ||
180 | |||
181 | printk(KERN_ERR "%s: IOMMU TAG(%d)[error(%s) wr(%d) " | ||
182 | "str(%d) sz(%dK) vpg(%08lx)]\n", | ||
183 | pbm->name, i, type_str, | ||
184 | ((tag_val & PSYCHO_IOMMU_TAG_WRITE) ? 1 : 0), | ||
185 | ((tag_val & PSYCHO_IOMMU_TAG_STREAM) ? 1 : 0), | ||
186 | ((tag_val & PSYCHO_IOMMU_TAG_SIZE) ? 64 : 8), | ||
187 | (tag_val & PSYCHO_IOMMU_TAG_VPAGE) << IOMMU_PAGE_SHIFT); | ||
188 | printk(KERN_ERR "%s: IOMMU DATA(%d)[valid(%d) cache(%d) " | ||
189 | "ppg(%016lx)]\n", | ||
190 | pbm->name, i, | ||
191 | ((data_val & PSYCHO_IOMMU_DATA_VALID) ? 1 : 0), | ||
192 | ((data_val & PSYCHO_IOMMU_DATA_CACHE) ? 1 : 0), | ||
193 | (data_val & PSYCHO_IOMMU_DATA_PPAGE)<<IOMMU_PAGE_SHIFT); | ||
194 | } | ||
195 | } | ||
196 | |||
197 | #define PSYCHO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL | ||
198 | #define PSYCHO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL | ||
199 | |||
200 | void psycho_check_iommu_error(struct pci_pbm_info *pbm, | ||
201 | unsigned long afsr, | ||
202 | unsigned long afar, | ||
203 | enum psycho_error_type type) | ||
204 | { | ||
205 | u64 control, iommu_tag[16], iommu_data[16]; | ||
206 | struct iommu *iommu = pbm->iommu; | ||
207 | unsigned long flags; | ||
208 | |||
209 | spin_lock_irqsave(&iommu->lock, flags); | ||
210 | control = upa_readq(iommu->iommu_control); | ||
211 | if (control & PSYCHO_IOMMU_CTRL_XLTEERR) { | ||
212 | const char *type_str; | ||
213 | |||
214 | control &= ~PSYCHO_IOMMU_CTRL_XLTEERR; | ||
215 | upa_writeq(control, iommu->iommu_control); | ||
216 | |||
217 | switch ((control & PSYCHO_IOMMU_CTRL_XLTESTAT) >> 25UL) { | ||
218 | case 0: | ||
219 | type_str = "Protection Error"; | ||
220 | break; | ||
221 | case 1: | ||
222 | type_str = "Invalid Error"; | ||
223 | break; | ||
224 | case 2: | ||
225 | type_str = "TimeOut Error"; | ||
226 | break; | ||
227 | case 3: | ||
228 | default: | ||
229 | type_str = "ECC Error"; | ||
230 | break; | ||
231 | }; | ||
232 | printk(KERN_ERR "%s: IOMMU Error, type[%s]\n", | ||
233 | pbm->name, type_str); | ||
234 | |||
235 | /* It is very possible for another DVMA to occur while | ||
236 | * we do this probe, and corrupt the system further. | ||
237 | * But we are so screwed at this point that we are | ||
238 | * likely to crash hard anyways, so get as much | ||
239 | * diagnostic information to the console as we can. | ||
240 | */ | ||
241 | psycho_record_iommu_tags_and_data(pbm, iommu_tag, iommu_data); | ||
242 | psycho_dump_iommu_tags_and_data(pbm, iommu_tag, iommu_data); | ||
243 | } | ||
244 | psycho_check_stc_error(pbm); | ||
245 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
246 | } | ||
247 | |||
248 | #define PSYCHO_PCICTRL_SBH_ERR 0x0000000800000000UL | ||
249 | #define PSYCHO_PCICTRL_SERR 0x0000000400000000UL | ||
250 | |||
251 | static irqreturn_t psycho_pcierr_intr_other(struct pci_pbm_info *pbm) | ||
252 | { | ||
253 | irqreturn_t ret = IRQ_NONE; | ||
254 | u64 csr, csr_error_bits; | ||
255 | u16 stat, *addr; | ||
256 | |||
257 | csr = upa_readq(pbm->pci_csr); | ||
258 | csr_error_bits = csr & (PSYCHO_PCICTRL_SBH_ERR | PSYCHO_PCICTRL_SERR); | ||
259 | if (csr_error_bits) { | ||
260 | /* Clear the errors. */ | ||
261 | upa_writeq(csr, pbm->pci_csr); | ||
262 | |||
263 | /* Log 'em. */ | ||
264 | if (csr_error_bits & PSYCHO_PCICTRL_SBH_ERR) | ||
265 | printk(KERN_ERR "%s: PCI streaming byte hole " | ||
266 | "error asserted.\n", pbm->name); | ||
267 | if (csr_error_bits & PSYCHO_PCICTRL_SERR) | ||
268 | printk(KERN_ERR "%s: PCI SERR signal asserted.\n", | ||
269 | pbm->name); | ||
270 | ret = IRQ_HANDLED; | ||
271 | } | ||
272 | addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno, | ||
273 | 0, PCI_STATUS); | ||
274 | pci_config_read16(addr, &stat); | ||
275 | if (stat & (PCI_STATUS_PARITY | | ||
276 | PCI_STATUS_SIG_TARGET_ABORT | | ||
277 | PCI_STATUS_REC_TARGET_ABORT | | ||
278 | PCI_STATUS_REC_MASTER_ABORT | | ||
279 | PCI_STATUS_SIG_SYSTEM_ERROR)) { | ||
280 | printk(KERN_ERR "%s: PCI bus error, PCI_STATUS[%04x]\n", | ||
281 | pbm->name, stat); | ||
282 | pci_config_write16(addr, 0xffff); | ||
283 | ret = IRQ_HANDLED; | ||
284 | } | ||
285 | return ret; | ||
286 | } | ||
287 | |||
288 | #define PSYCHO_PCIAFSR_PMA 0x8000000000000000UL | ||
289 | #define PSYCHO_PCIAFSR_PTA 0x4000000000000000UL | ||
290 | #define PSYCHO_PCIAFSR_PRTRY 0x2000000000000000UL | ||
291 | #define PSYCHO_PCIAFSR_PPERR 0x1000000000000000UL | ||
292 | #define PSYCHO_PCIAFSR_SMA 0x0800000000000000UL | ||
293 | #define PSYCHO_PCIAFSR_STA 0x0400000000000000UL | ||
294 | #define PSYCHO_PCIAFSR_SRTRY 0x0200000000000000UL | ||
295 | #define PSYCHO_PCIAFSR_SPERR 0x0100000000000000UL | ||
296 | #define PSYCHO_PCIAFSR_RESV1 0x00ff000000000000UL | ||
297 | #define PSYCHO_PCIAFSR_BMSK 0x0000ffff00000000UL | ||
298 | #define PSYCHO_PCIAFSR_BLK 0x0000000080000000UL | ||
299 | #define PSYCHO_PCIAFSR_RESV2 0x0000000040000000UL | ||
300 | #define PSYCHO_PCIAFSR_MID 0x000000003e000000UL | ||
301 | #define PSYCHO_PCIAFSR_RESV3 0x0000000001ffffffUL | ||
302 | |||
303 | irqreturn_t psycho_pcierr_intr(int irq, void *dev_id) | ||
304 | { | ||
305 | struct pci_pbm_info *pbm = dev_id; | ||
306 | u64 afsr, afar, error_bits; | ||
307 | int reported; | ||
308 | |||
309 | afsr = upa_readq(pbm->pci_afsr); | ||
310 | afar = upa_readq(pbm->pci_afar); | ||
311 | error_bits = afsr & | ||
312 | (PSYCHO_PCIAFSR_PMA | PSYCHO_PCIAFSR_PTA | | ||
313 | PSYCHO_PCIAFSR_PRTRY | PSYCHO_PCIAFSR_PPERR | | ||
314 | PSYCHO_PCIAFSR_SMA | PSYCHO_PCIAFSR_STA | | ||
315 | PSYCHO_PCIAFSR_SRTRY | PSYCHO_PCIAFSR_SPERR); | ||
316 | if (!error_bits) | ||
317 | return psycho_pcierr_intr_other(pbm); | ||
318 | upa_writeq(error_bits, pbm->pci_afsr); | ||
319 | printk(KERN_ERR "%s: PCI Error, primary error type[%s]\n", | ||
320 | pbm->name, | ||
321 | (((error_bits & PSYCHO_PCIAFSR_PMA) ? | ||
322 | "Master Abort" : | ||
323 | ((error_bits & PSYCHO_PCIAFSR_PTA) ? | ||
324 | "Target Abort" : | ||
325 | ((error_bits & PSYCHO_PCIAFSR_PRTRY) ? | ||
326 | "Excessive Retries" : | ||
327 | ((error_bits & PSYCHO_PCIAFSR_PPERR) ? | ||
328 | "Parity Error" : "???")))))); | ||
329 | printk(KERN_ERR "%s: bytemask[%04lx] UPA_MID[%02lx] was_block(%d)\n", | ||
330 | pbm->name, | ||
331 | (afsr & PSYCHO_PCIAFSR_BMSK) >> 32UL, | ||
332 | (afsr & PSYCHO_PCIAFSR_MID) >> 25UL, | ||
333 | (afsr & PSYCHO_PCIAFSR_BLK) ? 1 : 0); | ||
334 | printk(KERN_ERR "%s: PCI AFAR [%016lx]\n", pbm->name, afar); | ||
335 | printk(KERN_ERR "%s: PCI Secondary errors [", pbm->name); | ||
336 | reported = 0; | ||
337 | if (afsr & PSYCHO_PCIAFSR_SMA) { | ||
338 | reported++; | ||
339 | printk("(Master Abort)"); | ||
340 | } | ||
341 | if (afsr & PSYCHO_PCIAFSR_STA) { | ||
342 | reported++; | ||
343 | printk("(Target Abort)"); | ||
344 | } | ||
345 | if (afsr & PSYCHO_PCIAFSR_SRTRY) { | ||
346 | reported++; | ||
347 | printk("(Excessive Retries)"); | ||
348 | } | ||
349 | if (afsr & PSYCHO_PCIAFSR_SPERR) { | ||
350 | reported++; | ||
351 | printk("(Parity Error)"); | ||
352 | } | ||
353 | if (!reported) | ||
354 | printk("(none)"); | ||
355 | printk("]\n"); | ||
356 | |||
357 | if (error_bits & (PSYCHO_PCIAFSR_PTA | PSYCHO_PCIAFSR_STA)) { | ||
358 | psycho_check_iommu_error(pbm, afsr, afar, PCI_ERR); | ||
359 | pci_scan_for_target_abort(pbm, pbm->pci_bus); | ||
360 | } | ||
361 | if (error_bits & (PSYCHO_PCIAFSR_PMA | PSYCHO_PCIAFSR_SMA)) | ||
362 | pci_scan_for_master_abort(pbm, pbm->pci_bus); | ||
363 | |||
364 | if (error_bits & (PSYCHO_PCIAFSR_PPERR | PSYCHO_PCIAFSR_SPERR)) | ||
365 | pci_scan_for_parity_error(pbm, pbm->pci_bus); | ||
366 | |||
367 | return IRQ_HANDLED; | ||
368 | } | ||
369 | |||
370 | static void psycho_iommu_flush(struct pci_pbm_info *pbm) | ||
371 | { | ||
372 | int i; | ||
373 | |||
374 | for (i = 0; i < 16; i++) { | ||
375 | unsigned long off = i * 8; | ||
376 | |||
377 | upa_writeq(0, pbm->controller_regs + PSYCHO_IOMMU_TAG + off); | ||
378 | upa_writeq(0, pbm->controller_regs + PSYCHO_IOMMU_DATA + off); | ||
379 | } | ||
380 | } | ||
381 | |||
382 | #define PSYCHO_IOMMU_CONTROL 0x0200UL | ||
383 | #define PSYCHO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL | ||
384 | #define PSYCHO_IOMMU_TSBSZ_1K 0x0000000000000000UL | ||
385 | #define PSYCHO_IOMMU_TSBSZ_2K 0x0000000000010000UL | ||
386 | #define PSYCHO_IOMMU_TSBSZ_4K 0x0000000000020000UL | ||
387 | #define PSYCHO_IOMMU_TSBSZ_8K 0x0000000000030000UL | ||
388 | #define PSYCHO_IOMMU_TSBSZ_16K 0x0000000000040000UL | ||
389 | #define PSYCHO_IOMMU_TSBSZ_32K 0x0000000000050000UL | ||
390 | #define PSYCHO_IOMMU_TSBSZ_64K 0x0000000000060000UL | ||
391 | #define PSYCHO_IOMMU_TSBSZ_128K 0x0000000000070000UL | ||
392 | #define PSYCHO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL | ||
393 | #define PSYCHO_IOMMU_CTRL_DENAB 0x0000000000000002UL | ||
394 | #define PSYCHO_IOMMU_CTRL_ENAB 0x0000000000000001UL | ||
395 | #define PSYCHO_IOMMU_FLUSH 0x0210UL | ||
396 | #define PSYCHO_IOMMU_TSBBASE 0x0208UL | ||
397 | |||
398 | int psycho_iommu_init(struct pci_pbm_info *pbm, int tsbsize, | ||
399 | u32 dvma_offset, u32 dma_mask, | ||
400 | unsigned long write_complete_offset) | ||
401 | { | ||
402 | struct iommu *iommu = pbm->iommu; | ||
403 | u64 control; | ||
404 | int err; | ||
405 | |||
406 | iommu->iommu_control = pbm->controller_regs + PSYCHO_IOMMU_CONTROL; | ||
407 | iommu->iommu_tsbbase = pbm->controller_regs + PSYCHO_IOMMU_TSBBASE; | ||
408 | iommu->iommu_flush = pbm->controller_regs + PSYCHO_IOMMU_FLUSH; | ||
409 | iommu->iommu_tags = pbm->controller_regs + PSYCHO_IOMMU_TAG; | ||
410 | iommu->write_complete_reg = (pbm->controller_regs + | ||
411 | write_complete_offset); | ||
412 | |||
413 | iommu->iommu_ctxflush = 0; | ||
414 | |||
415 | control = upa_readq(iommu->iommu_control); | ||
416 | control |= PSYCHO_IOMMU_CTRL_DENAB; | ||
417 | upa_writeq(control, iommu->iommu_control); | ||
418 | |||
419 | psycho_iommu_flush(pbm); | ||
420 | |||
421 | /* Leave diag mode enabled for full-flushing done in pci_iommu.c */ | ||
422 | err = iommu_table_init(iommu, tsbsize * 1024 * 8, | ||
423 | dvma_offset, dma_mask, pbm->numa_node); | ||
424 | if (err) | ||
425 | return err; | ||
426 | |||
427 | upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase); | ||
428 | |||
429 | control = upa_readq(iommu->iommu_control); | ||
430 | control &= ~(PSYCHO_IOMMU_CTRL_TSBSZ | PSYCHO_IOMMU_CTRL_TBWSZ); | ||
431 | control |= PSYCHO_IOMMU_CTRL_ENAB; | ||
432 | |||
433 | switch (tsbsize) { | ||
434 | case 64: | ||
435 | control |= PSYCHO_IOMMU_TSBSZ_64K; | ||
436 | break; | ||
437 | case 128: | ||
438 | control |= PSYCHO_IOMMU_TSBSZ_128K; | ||
439 | break; | ||
440 | default: | ||
441 | return -EINVAL; | ||
442 | } | ||
443 | |||
444 | upa_writeq(control, iommu->iommu_control); | ||
445 | |||
446 | return 0; | ||
447 | |||
448 | } | ||
449 | |||
450 | void psycho_pbm_init_common(struct pci_pbm_info *pbm, struct of_device *op, | ||
451 | const char *chip_name, int chip_type) | ||
452 | { | ||
453 | struct device_node *dp = op->node; | ||
454 | |||
455 | pbm->name = dp->full_name; | ||
456 | pbm->numa_node = -1; | ||
457 | pbm->chip_type = chip_type; | ||
458 | pbm->chip_version = of_getintprop_default(dp, "version#", 0); | ||
459 | pbm->chip_revision = of_getintprop_default(dp, "module-revision#", 0); | ||
460 | pbm->op = op; | ||
461 | pbm->pci_ops = &sun4u_pci_ops; | ||
462 | pbm->config_space_reg_bits = 8; | ||
463 | pbm->index = pci_num_pbms++; | ||
464 | pci_get_pbm_props(pbm); | ||
465 | pci_determine_mem_io_space(pbm); | ||
466 | |||
467 | printk(KERN_INFO "%s: %s PCI Bus Module ver[%x:%x]\n", | ||
468 | pbm->name, chip_name, | ||
469 | pbm->chip_version, pbm->chip_revision); | ||
470 | } | ||
diff --git a/arch/sparc/kernel/psycho_common.h b/arch/sparc/kernel/psycho_common.h new file mode 100644 index 000000000000..092c278ef28d --- /dev/null +++ b/arch/sparc/kernel/psycho_common.h | |||
@@ -0,0 +1,48 @@ | |||
1 | #ifndef _PSYCHO_COMMON_H | ||
2 | #define _PSYCHO_COMMON_H | ||
3 | |||
4 | /* U2P Programmer's Manual, page 13-55, configuration space | ||
5 | * address format: | ||
6 | * | ||
7 | * 32 24 23 16 15 11 10 8 7 2 1 0 | ||
8 | * --------------------------------------------------------- | ||
9 | * |0 0 0 0 0 0 0 0 1| bus | device | function | reg | 0 0 | | ||
10 | * --------------------------------------------------------- | ||
11 | */ | ||
12 | #define PSYCHO_CONFIG_BASE(PBM) \ | ||
13 | ((PBM)->config_space | (1UL << 24)) | ||
14 | #define PSYCHO_CONFIG_ENCODE(BUS, DEVFN, REG) \ | ||
15 | (((unsigned long)(BUS) << 16) | \ | ||
16 | ((unsigned long)(DEVFN) << 8) | \ | ||
17 | ((unsigned long)(REG))) | ||
18 | |||
19 | static inline void *psycho_pci_config_mkaddr(struct pci_pbm_info *pbm, | ||
20 | unsigned char bus, | ||
21 | unsigned int devfn, | ||
22 | int where) | ||
23 | { | ||
24 | return (void *) | ||
25 | (PSYCHO_CONFIG_BASE(pbm) | | ||
26 | PSYCHO_CONFIG_ENCODE(bus, devfn, where)); | ||
27 | } | ||
28 | |||
29 | enum psycho_error_type { | ||
30 | UE_ERR, CE_ERR, PCI_ERR | ||
31 | }; | ||
32 | |||
33 | extern void psycho_check_iommu_error(struct pci_pbm_info *pbm, | ||
34 | unsigned long afsr, | ||
35 | unsigned long afar, | ||
36 | enum psycho_error_type type); | ||
37 | |||
38 | extern irqreturn_t psycho_pcierr_intr(int irq, void *dev_id); | ||
39 | |||
40 | extern int psycho_iommu_init(struct pci_pbm_info *pbm, int tsbsize, | ||
41 | u32 dvma_offset, u32 dma_mask, | ||
42 | unsigned long write_complete_offset); | ||
43 | |||
44 | extern void psycho_pbm_init_common(struct pci_pbm_info *pbm, | ||
45 | struct of_device *op, | ||
46 | const char *chip_name, int chip_type); | ||
47 | |||
48 | #endif /* _PSYCHO_COMMON_H */ | ||
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c new file mode 100644 index 000000000000..a941c610e7ce --- /dev/null +++ b/arch/sparc/kernel/ptrace_64.c | |||
@@ -0,0 +1,1090 @@ | |||
1 | /* ptrace.c: Sparc process tracing support. | ||
2 | * | ||
3 | * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net) | ||
4 | * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
5 | * | ||
6 | * Based upon code written by Ross Biro, Linus Torvalds, Bob Manson, | ||
7 | * and David Mosberger. | ||
8 | * | ||
9 | * Added Linux support -miguel (weird, eh?, the original code was meant | ||
10 | * to emulate SunOS). | ||
11 | */ | ||
12 | |||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/ptrace.h> | ||
18 | #include <linux/user.h> | ||
19 | #include <linux/smp.h> | ||
20 | #include <linux/smp_lock.h> | ||
21 | #include <linux/security.h> | ||
22 | #include <linux/seccomp.h> | ||
23 | #include <linux/audit.h> | ||
24 | #include <linux/signal.h> | ||
25 | #include <linux/regset.h> | ||
26 | #include <linux/tracehook.h> | ||
27 | #include <linux/compat.h> | ||
28 | #include <linux/elf.h> | ||
29 | |||
30 | #include <asm/asi.h> | ||
31 | #include <asm/pgtable.h> | ||
32 | #include <asm/system.h> | ||
33 | #include <asm/uaccess.h> | ||
34 | #include <asm/psrcompat.h> | ||
35 | #include <asm/visasm.h> | ||
36 | #include <asm/spitfire.h> | ||
37 | #include <asm/page.h> | ||
38 | #include <asm/cpudata.h> | ||
39 | #include <asm/cacheflush.h> | ||
40 | |||
41 | #include "entry.h" | ||
42 | |||
43 | /* #define ALLOW_INIT_TRACING */ | ||
44 | |||
45 | /* | ||
46 | * Called by kernel/ptrace.c when detaching.. | ||
47 | * | ||
48 | * Make sure single step bits etc are not set. | ||
49 | */ | ||
50 | void ptrace_disable(struct task_struct *child) | ||
51 | { | ||
52 | /* nothing to do */ | ||
53 | } | ||
54 | |||
55 | /* To get the necessary page struct, access_process_vm() first calls | ||
56 | * get_user_pages(). This has done a flush_dcache_page() on the | ||
57 | * accessed page. Then our caller (copy_{to,from}_user_page()) did | ||
58 | * to memcpy to read/write the data from that page. | ||
59 | * | ||
60 | * Now, the only thing we have to do is: | ||
61 | * 1) flush the D-cache if it's possible than an illegal alias | ||
62 | * has been created | ||
63 | * 2) flush the I-cache if this is pre-cheetah and we did a write | ||
64 | */ | ||
65 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | ||
66 | unsigned long uaddr, void *kaddr, | ||
67 | unsigned long len, int write) | ||
68 | { | ||
69 | BUG_ON(len > PAGE_SIZE); | ||
70 | |||
71 | if (tlb_type == hypervisor) | ||
72 | return; | ||
73 | |||
74 | preempt_disable(); | ||
75 | |||
76 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
77 | /* If bit 13 of the kernel address we used to access the | ||
78 | * user page is the same as the virtual address that page | ||
79 | * is mapped to in the user's address space, we can skip the | ||
80 | * D-cache flush. | ||
81 | */ | ||
82 | if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) { | ||
83 | unsigned long start = __pa(kaddr); | ||
84 | unsigned long end = start + len; | ||
85 | unsigned long dcache_line_size; | ||
86 | |||
87 | dcache_line_size = local_cpu_data().dcache_line_size; | ||
88 | |||
89 | if (tlb_type == spitfire) { | ||
90 | for (; start < end; start += dcache_line_size) | ||
91 | spitfire_put_dcache_tag(start & 0x3fe0, 0x0); | ||
92 | } else { | ||
93 | start &= ~(dcache_line_size - 1); | ||
94 | for (; start < end; start += dcache_line_size) | ||
95 | __asm__ __volatile__( | ||
96 | "stxa %%g0, [%0] %1\n\t" | ||
97 | "membar #Sync" | ||
98 | : /* no outputs */ | ||
99 | : "r" (start), | ||
100 | "i" (ASI_DCACHE_INVALIDATE)); | ||
101 | } | ||
102 | } | ||
103 | #endif | ||
104 | if (write && tlb_type == spitfire) { | ||
105 | unsigned long start = (unsigned long) kaddr; | ||
106 | unsigned long end = start + len; | ||
107 | unsigned long icache_line_size; | ||
108 | |||
109 | icache_line_size = local_cpu_data().icache_line_size; | ||
110 | |||
111 | for (; start < end; start += icache_line_size) | ||
112 | flushi(start); | ||
113 | } | ||
114 | |||
115 | preempt_enable(); | ||
116 | } | ||
117 | |||
118 | static int get_from_target(struct task_struct *target, unsigned long uaddr, | ||
119 | void *kbuf, int len) | ||
120 | { | ||
121 | if (target == current) { | ||
122 | if (copy_from_user(kbuf, (void __user *) uaddr, len)) | ||
123 | return -EFAULT; | ||
124 | } else { | ||
125 | int len2 = access_process_vm(target, uaddr, kbuf, len, 0); | ||
126 | if (len2 != len) | ||
127 | return -EFAULT; | ||
128 | } | ||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | static int set_to_target(struct task_struct *target, unsigned long uaddr, | ||
133 | void *kbuf, int len) | ||
134 | { | ||
135 | if (target == current) { | ||
136 | if (copy_to_user((void __user *) uaddr, kbuf, len)) | ||
137 | return -EFAULT; | ||
138 | } else { | ||
139 | int len2 = access_process_vm(target, uaddr, kbuf, len, 1); | ||
140 | if (len2 != len) | ||
141 | return -EFAULT; | ||
142 | } | ||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | static int regwindow64_get(struct task_struct *target, | ||
147 | const struct pt_regs *regs, | ||
148 | struct reg_window *wbuf) | ||
149 | { | ||
150 | unsigned long rw_addr = regs->u_regs[UREG_I6]; | ||
151 | |||
152 | if (test_tsk_thread_flag(current, TIF_32BIT)) { | ||
153 | struct reg_window32 win32; | ||
154 | int i; | ||
155 | |||
156 | if (get_from_target(target, rw_addr, &win32, sizeof(win32))) | ||
157 | return -EFAULT; | ||
158 | for (i = 0; i < 8; i++) | ||
159 | wbuf->locals[i] = win32.locals[i]; | ||
160 | for (i = 0; i < 8; i++) | ||
161 | wbuf->ins[i] = win32.ins[i]; | ||
162 | } else { | ||
163 | rw_addr += STACK_BIAS; | ||
164 | if (get_from_target(target, rw_addr, wbuf, sizeof(*wbuf))) | ||
165 | return -EFAULT; | ||
166 | } | ||
167 | |||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | static int regwindow64_set(struct task_struct *target, | ||
172 | const struct pt_regs *regs, | ||
173 | struct reg_window *wbuf) | ||
174 | { | ||
175 | unsigned long rw_addr = regs->u_regs[UREG_I6]; | ||
176 | |||
177 | if (test_tsk_thread_flag(current, TIF_32BIT)) { | ||
178 | struct reg_window32 win32; | ||
179 | int i; | ||
180 | |||
181 | for (i = 0; i < 8; i++) | ||
182 | win32.locals[i] = wbuf->locals[i]; | ||
183 | for (i = 0; i < 8; i++) | ||
184 | win32.ins[i] = wbuf->ins[i]; | ||
185 | |||
186 | if (set_to_target(target, rw_addr, &win32, sizeof(win32))) | ||
187 | return -EFAULT; | ||
188 | } else { | ||
189 | rw_addr += STACK_BIAS; | ||
190 | if (set_to_target(target, rw_addr, wbuf, sizeof(*wbuf))) | ||
191 | return -EFAULT; | ||
192 | } | ||
193 | |||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | enum sparc_regset { | ||
198 | REGSET_GENERAL, | ||
199 | REGSET_FP, | ||
200 | }; | ||
201 | |||
202 | static int genregs64_get(struct task_struct *target, | ||
203 | const struct user_regset *regset, | ||
204 | unsigned int pos, unsigned int count, | ||
205 | void *kbuf, void __user *ubuf) | ||
206 | { | ||
207 | const struct pt_regs *regs = task_pt_regs(target); | ||
208 | int ret; | ||
209 | |||
210 | if (target == current) | ||
211 | flushw_user(); | ||
212 | |||
213 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
214 | regs->u_regs, | ||
215 | 0, 16 * sizeof(u64)); | ||
216 | if (!ret && count && pos < (32 * sizeof(u64))) { | ||
217 | struct reg_window window; | ||
218 | |||
219 | if (regwindow64_get(target, regs, &window)) | ||
220 | return -EFAULT; | ||
221 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
222 | &window, | ||
223 | 16 * sizeof(u64), | ||
224 | 32 * sizeof(u64)); | ||
225 | } | ||
226 | |||
227 | if (!ret) { | ||
228 | /* TSTATE, TPC, TNPC */ | ||
229 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
230 | ®s->tstate, | ||
231 | 32 * sizeof(u64), | ||
232 | 35 * sizeof(u64)); | ||
233 | } | ||
234 | |||
235 | if (!ret) { | ||
236 | unsigned long y = regs->y; | ||
237 | |||
238 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
239 | &y, | ||
240 | 35 * sizeof(u64), | ||
241 | 36 * sizeof(u64)); | ||
242 | } | ||
243 | |||
244 | if (!ret) { | ||
245 | ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, | ||
246 | 36 * sizeof(u64), -1); | ||
247 | |||
248 | } | ||
249 | return ret; | ||
250 | } | ||
251 | |||
252 | static int genregs64_set(struct task_struct *target, | ||
253 | const struct user_regset *regset, | ||
254 | unsigned int pos, unsigned int count, | ||
255 | const void *kbuf, const void __user *ubuf) | ||
256 | { | ||
257 | struct pt_regs *regs = task_pt_regs(target); | ||
258 | int ret; | ||
259 | |||
260 | if (target == current) | ||
261 | flushw_user(); | ||
262 | |||
263 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
264 | regs->u_regs, | ||
265 | 0, 16 * sizeof(u64)); | ||
266 | if (!ret && count && pos < (32 * sizeof(u64))) { | ||
267 | struct reg_window window; | ||
268 | |||
269 | if (regwindow64_get(target, regs, &window)) | ||
270 | return -EFAULT; | ||
271 | |||
272 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
273 | &window, | ||
274 | 16 * sizeof(u64), | ||
275 | 32 * sizeof(u64)); | ||
276 | |||
277 | if (!ret && | ||
278 | regwindow64_set(target, regs, &window)) | ||
279 | return -EFAULT; | ||
280 | } | ||
281 | |||
282 | if (!ret && count > 0) { | ||
283 | unsigned long tstate; | ||
284 | |||
285 | /* TSTATE */ | ||
286 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
287 | &tstate, | ||
288 | 32 * sizeof(u64), | ||
289 | 33 * sizeof(u64)); | ||
290 | if (!ret) { | ||
291 | /* Only the condition codes and the "in syscall" | ||
292 | * state can be modified in the %tstate register. | ||
293 | */ | ||
294 | tstate &= (TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL); | ||
295 | regs->tstate &= ~(TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL); | ||
296 | regs->tstate |= tstate; | ||
297 | } | ||
298 | } | ||
299 | |||
300 | if (!ret) { | ||
301 | /* TPC, TNPC */ | ||
302 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
303 | ®s->tpc, | ||
304 | 33 * sizeof(u64), | ||
305 | 35 * sizeof(u64)); | ||
306 | } | ||
307 | |||
308 | if (!ret) { | ||
309 | unsigned long y; | ||
310 | |||
311 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
312 | &y, | ||
313 | 35 * sizeof(u64), | ||
314 | 36 * sizeof(u64)); | ||
315 | if (!ret) | ||
316 | regs->y = y; | ||
317 | } | ||
318 | |||
319 | if (!ret) | ||
320 | ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, | ||
321 | 36 * sizeof(u64), -1); | ||
322 | |||
323 | return ret; | ||
324 | } | ||
325 | |||
326 | static int fpregs64_get(struct task_struct *target, | ||
327 | const struct user_regset *regset, | ||
328 | unsigned int pos, unsigned int count, | ||
329 | void *kbuf, void __user *ubuf) | ||
330 | { | ||
331 | const unsigned long *fpregs = task_thread_info(target)->fpregs; | ||
332 | unsigned long fprs, fsr, gsr; | ||
333 | int ret; | ||
334 | |||
335 | if (target == current) | ||
336 | save_and_clear_fpu(); | ||
337 | |||
338 | fprs = task_thread_info(target)->fpsaved[0]; | ||
339 | |||
340 | if (fprs & FPRS_DL) | ||
341 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
342 | fpregs, | ||
343 | 0, 16 * sizeof(u64)); | ||
344 | else | ||
345 | ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, | ||
346 | 0, | ||
347 | 16 * sizeof(u64)); | ||
348 | |||
349 | if (!ret) { | ||
350 | if (fprs & FPRS_DU) | ||
351 | ret = user_regset_copyout(&pos, &count, | ||
352 | &kbuf, &ubuf, | ||
353 | fpregs + 16, | ||
354 | 16 * sizeof(u64), | ||
355 | 32 * sizeof(u64)); | ||
356 | else | ||
357 | ret = user_regset_copyout_zero(&pos, &count, | ||
358 | &kbuf, &ubuf, | ||
359 | 16 * sizeof(u64), | ||
360 | 32 * sizeof(u64)); | ||
361 | } | ||
362 | |||
363 | if (fprs & FPRS_FEF) { | ||
364 | fsr = task_thread_info(target)->xfsr[0]; | ||
365 | gsr = task_thread_info(target)->gsr[0]; | ||
366 | } else { | ||
367 | fsr = gsr = 0; | ||
368 | } | ||
369 | |||
370 | if (!ret) | ||
371 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
372 | &fsr, | ||
373 | 32 * sizeof(u64), | ||
374 | 33 * sizeof(u64)); | ||
375 | if (!ret) | ||
376 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
377 | &gsr, | ||
378 | 33 * sizeof(u64), | ||
379 | 34 * sizeof(u64)); | ||
380 | if (!ret) | ||
381 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
382 | &fprs, | ||
383 | 34 * sizeof(u64), | ||
384 | 35 * sizeof(u64)); | ||
385 | |||
386 | if (!ret) | ||
387 | ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, | ||
388 | 35 * sizeof(u64), -1); | ||
389 | |||
390 | return ret; | ||
391 | } | ||
392 | |||
393 | static int fpregs64_set(struct task_struct *target, | ||
394 | const struct user_regset *regset, | ||
395 | unsigned int pos, unsigned int count, | ||
396 | const void *kbuf, const void __user *ubuf) | ||
397 | { | ||
398 | unsigned long *fpregs = task_thread_info(target)->fpregs; | ||
399 | unsigned long fprs; | ||
400 | int ret; | ||
401 | |||
402 | if (target == current) | ||
403 | save_and_clear_fpu(); | ||
404 | |||
405 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
406 | fpregs, | ||
407 | 0, 32 * sizeof(u64)); | ||
408 | if (!ret) | ||
409 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
410 | task_thread_info(target)->xfsr, | ||
411 | 32 * sizeof(u64), | ||
412 | 33 * sizeof(u64)); | ||
413 | if (!ret) | ||
414 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
415 | task_thread_info(target)->gsr, | ||
416 | 33 * sizeof(u64), | ||
417 | 34 * sizeof(u64)); | ||
418 | |||
419 | fprs = task_thread_info(target)->fpsaved[0]; | ||
420 | if (!ret && count > 0) { | ||
421 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
422 | &fprs, | ||
423 | 34 * sizeof(u64), | ||
424 | 35 * sizeof(u64)); | ||
425 | } | ||
426 | |||
427 | fprs |= (FPRS_FEF | FPRS_DL | FPRS_DU); | ||
428 | task_thread_info(target)->fpsaved[0] = fprs; | ||
429 | |||
430 | if (!ret) | ||
431 | ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, | ||
432 | 35 * sizeof(u64), -1); | ||
433 | return ret; | ||
434 | } | ||
435 | |||
436 | static const struct user_regset sparc64_regsets[] = { | ||
437 | /* Format is: | ||
438 | * G0 --> G7 | ||
439 | * O0 --> O7 | ||
440 | * L0 --> L7 | ||
441 | * I0 --> I7 | ||
442 | * TSTATE, TPC, TNPC, Y | ||
443 | */ | ||
444 | [REGSET_GENERAL] = { | ||
445 | .core_note_type = NT_PRSTATUS, | ||
446 | .n = 36, | ||
447 | .size = sizeof(u64), .align = sizeof(u64), | ||
448 | .get = genregs64_get, .set = genregs64_set | ||
449 | }, | ||
450 | /* Format is: | ||
451 | * F0 --> F63 | ||
452 | * FSR | ||
453 | * GSR | ||
454 | * FPRS | ||
455 | */ | ||
456 | [REGSET_FP] = { | ||
457 | .core_note_type = NT_PRFPREG, | ||
458 | .n = 35, | ||
459 | .size = sizeof(u64), .align = sizeof(u64), | ||
460 | .get = fpregs64_get, .set = fpregs64_set | ||
461 | }, | ||
462 | }; | ||
463 | |||
464 | static const struct user_regset_view user_sparc64_view = { | ||
465 | .name = "sparc64", .e_machine = EM_SPARCV9, | ||
466 | .regsets = sparc64_regsets, .n = ARRAY_SIZE(sparc64_regsets) | ||
467 | }; | ||
468 | |||
469 | #ifdef CONFIG_COMPAT | ||
470 | static int genregs32_get(struct task_struct *target, | ||
471 | const struct user_regset *regset, | ||
472 | unsigned int pos, unsigned int count, | ||
473 | void *kbuf, void __user *ubuf) | ||
474 | { | ||
475 | const struct pt_regs *regs = task_pt_regs(target); | ||
476 | compat_ulong_t __user *reg_window; | ||
477 | compat_ulong_t *k = kbuf; | ||
478 | compat_ulong_t __user *u = ubuf; | ||
479 | compat_ulong_t reg; | ||
480 | |||
481 | if (target == current) | ||
482 | flushw_user(); | ||
483 | |||
484 | pos /= sizeof(reg); | ||
485 | count /= sizeof(reg); | ||
486 | |||
487 | if (kbuf) { | ||
488 | for (; count > 0 && pos < 16; count--) | ||
489 | *k++ = regs->u_regs[pos++]; | ||
490 | |||
491 | reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; | ||
492 | if (target == current) { | ||
493 | for (; count > 0 && pos < 32; count--) { | ||
494 | if (get_user(*k++, ®_window[pos++])) | ||
495 | return -EFAULT; | ||
496 | } | ||
497 | } else { | ||
498 | for (; count > 0 && pos < 32; count--) { | ||
499 | if (access_process_vm(target, | ||
500 | (unsigned long) | ||
501 | ®_window[pos], | ||
502 | k, sizeof(*k), 0) | ||
503 | != sizeof(*k)) | ||
504 | return -EFAULT; | ||
505 | k++; | ||
506 | pos++; | ||
507 | } | ||
508 | } | ||
509 | } else { | ||
510 | for (; count > 0 && pos < 16; count--) { | ||
511 | if (put_user((compat_ulong_t) regs->u_regs[pos++], u++)) | ||
512 | return -EFAULT; | ||
513 | } | ||
514 | |||
515 | reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; | ||
516 | if (target == current) { | ||
517 | for (; count > 0 && pos < 32; count--) { | ||
518 | if (get_user(reg, ®_window[pos++]) || | ||
519 | put_user(reg, u++)) | ||
520 | return -EFAULT; | ||
521 | } | ||
522 | } else { | ||
523 | for (; count > 0 && pos < 32; count--) { | ||
524 | if (access_process_vm(target, | ||
525 | (unsigned long) | ||
526 | ®_window[pos], | ||
527 | ®, sizeof(reg), 0) | ||
528 | != sizeof(reg)) | ||
529 | return -EFAULT; | ||
530 | if (access_process_vm(target, | ||
531 | (unsigned long) u, | ||
532 | ®, sizeof(reg), 1) | ||
533 | != sizeof(reg)) | ||
534 | return -EFAULT; | ||
535 | pos++; | ||
536 | u++; | ||
537 | } | ||
538 | } | ||
539 | } | ||
540 | while (count > 0) { | ||
541 | switch (pos) { | ||
542 | case 32: /* PSR */ | ||
543 | reg = tstate_to_psr(regs->tstate); | ||
544 | break; | ||
545 | case 33: /* PC */ | ||
546 | reg = regs->tpc; | ||
547 | break; | ||
548 | case 34: /* NPC */ | ||
549 | reg = regs->tnpc; | ||
550 | break; | ||
551 | case 35: /* Y */ | ||
552 | reg = regs->y; | ||
553 | break; | ||
554 | case 36: /* WIM */ | ||
555 | case 37: /* TBR */ | ||
556 | reg = 0; | ||
557 | break; | ||
558 | default: | ||
559 | goto finish; | ||
560 | } | ||
561 | |||
562 | if (kbuf) | ||
563 | *k++ = reg; | ||
564 | else if (put_user(reg, u++)) | ||
565 | return -EFAULT; | ||
566 | pos++; | ||
567 | count--; | ||
568 | } | ||
569 | finish: | ||
570 | pos *= sizeof(reg); | ||
571 | count *= sizeof(reg); | ||
572 | |||
573 | return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, | ||
574 | 38 * sizeof(reg), -1); | ||
575 | } | ||
576 | |||
577 | static int genregs32_set(struct task_struct *target, | ||
578 | const struct user_regset *regset, | ||
579 | unsigned int pos, unsigned int count, | ||
580 | const void *kbuf, const void __user *ubuf) | ||
581 | { | ||
582 | struct pt_regs *regs = task_pt_regs(target); | ||
583 | compat_ulong_t __user *reg_window; | ||
584 | const compat_ulong_t *k = kbuf; | ||
585 | const compat_ulong_t __user *u = ubuf; | ||
586 | compat_ulong_t reg; | ||
587 | |||
588 | if (target == current) | ||
589 | flushw_user(); | ||
590 | |||
591 | pos /= sizeof(reg); | ||
592 | count /= sizeof(reg); | ||
593 | |||
594 | if (kbuf) { | ||
595 | for (; count > 0 && pos < 16; count--) | ||
596 | regs->u_regs[pos++] = *k++; | ||
597 | |||
598 | reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; | ||
599 | if (target == current) { | ||
600 | for (; count > 0 && pos < 32; count--) { | ||
601 | if (put_user(*k++, ®_window[pos++])) | ||
602 | return -EFAULT; | ||
603 | } | ||
604 | } else { | ||
605 | for (; count > 0 && pos < 32; count--) { | ||
606 | if (access_process_vm(target, | ||
607 | (unsigned long) | ||
608 | ®_window[pos], | ||
609 | (void *) k, | ||
610 | sizeof(*k), 1) | ||
611 | != sizeof(*k)) | ||
612 | return -EFAULT; | ||
613 | k++; | ||
614 | pos++; | ||
615 | } | ||
616 | } | ||
617 | } else { | ||
618 | for (; count > 0 && pos < 16; count--) { | ||
619 | if (get_user(reg, u++)) | ||
620 | return -EFAULT; | ||
621 | regs->u_regs[pos++] = reg; | ||
622 | } | ||
623 | |||
624 | reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; | ||
625 | if (target == current) { | ||
626 | for (; count > 0 && pos < 32; count--) { | ||
627 | if (get_user(reg, u++) || | ||
628 | put_user(reg, ®_window[pos++])) | ||
629 | return -EFAULT; | ||
630 | } | ||
631 | } else { | ||
632 | for (; count > 0 && pos < 32; count--) { | ||
633 | if (access_process_vm(target, | ||
634 | (unsigned long) | ||
635 | u, | ||
636 | ®, sizeof(reg), 0) | ||
637 | != sizeof(reg)) | ||
638 | return -EFAULT; | ||
639 | if (access_process_vm(target, | ||
640 | (unsigned long) | ||
641 | ®_window[pos], | ||
642 | ®, sizeof(reg), 1) | ||
643 | != sizeof(reg)) | ||
644 | return -EFAULT; | ||
645 | pos++; | ||
646 | u++; | ||
647 | } | ||
648 | } | ||
649 | } | ||
650 | while (count > 0) { | ||
651 | unsigned long tstate; | ||
652 | |||
653 | if (kbuf) | ||
654 | reg = *k++; | ||
655 | else if (get_user(reg, u++)) | ||
656 | return -EFAULT; | ||
657 | |||
658 | switch (pos) { | ||
659 | case 32: /* PSR */ | ||
660 | tstate = regs->tstate; | ||
661 | tstate &= ~(TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL); | ||
662 | tstate |= psr_to_tstate_icc(reg); | ||
663 | if (reg & PSR_SYSCALL) | ||
664 | tstate |= TSTATE_SYSCALL; | ||
665 | regs->tstate = tstate; | ||
666 | break; | ||
667 | case 33: /* PC */ | ||
668 | regs->tpc = reg; | ||
669 | break; | ||
670 | case 34: /* NPC */ | ||
671 | regs->tnpc = reg; | ||
672 | break; | ||
673 | case 35: /* Y */ | ||
674 | regs->y = reg; | ||
675 | break; | ||
676 | case 36: /* WIM */ | ||
677 | case 37: /* TBR */ | ||
678 | break; | ||
679 | default: | ||
680 | goto finish; | ||
681 | } | ||
682 | |||
683 | pos++; | ||
684 | count--; | ||
685 | } | ||
686 | finish: | ||
687 | pos *= sizeof(reg); | ||
688 | count *= sizeof(reg); | ||
689 | |||
690 | return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, | ||
691 | 38 * sizeof(reg), -1); | ||
692 | } | ||
693 | |||
694 | static int fpregs32_get(struct task_struct *target, | ||
695 | const struct user_regset *regset, | ||
696 | unsigned int pos, unsigned int count, | ||
697 | void *kbuf, void __user *ubuf) | ||
698 | { | ||
699 | const unsigned long *fpregs = task_thread_info(target)->fpregs; | ||
700 | compat_ulong_t enabled; | ||
701 | unsigned long fprs; | ||
702 | compat_ulong_t fsr; | ||
703 | int ret = 0; | ||
704 | |||
705 | if (target == current) | ||
706 | save_and_clear_fpu(); | ||
707 | |||
708 | fprs = task_thread_info(target)->fpsaved[0]; | ||
709 | if (fprs & FPRS_FEF) { | ||
710 | fsr = task_thread_info(target)->xfsr[0]; | ||
711 | enabled = 1; | ||
712 | } else { | ||
713 | fsr = 0; | ||
714 | enabled = 0; | ||
715 | } | ||
716 | |||
717 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
718 | fpregs, | ||
719 | 0, 32 * sizeof(u32)); | ||
720 | |||
721 | if (!ret) | ||
722 | ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, | ||
723 | 32 * sizeof(u32), | ||
724 | 33 * sizeof(u32)); | ||
725 | if (!ret) | ||
726 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
727 | &fsr, | ||
728 | 33 * sizeof(u32), | ||
729 | 34 * sizeof(u32)); | ||
730 | |||
731 | if (!ret) { | ||
732 | compat_ulong_t val; | ||
733 | |||
734 | val = (enabled << 8) | (8 << 16); | ||
735 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
736 | &val, | ||
737 | 34 * sizeof(u32), | ||
738 | 35 * sizeof(u32)); | ||
739 | } | ||
740 | |||
741 | if (!ret) | ||
742 | ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, | ||
743 | 35 * sizeof(u32), -1); | ||
744 | |||
745 | return ret; | ||
746 | } | ||
747 | |||
748 | static int fpregs32_set(struct task_struct *target, | ||
749 | const struct user_regset *regset, | ||
750 | unsigned int pos, unsigned int count, | ||
751 | const void *kbuf, const void __user *ubuf) | ||
752 | { | ||
753 | unsigned long *fpregs = task_thread_info(target)->fpregs; | ||
754 | unsigned long fprs; | ||
755 | int ret; | ||
756 | |||
757 | if (target == current) | ||
758 | save_and_clear_fpu(); | ||
759 | |||
760 | fprs = task_thread_info(target)->fpsaved[0]; | ||
761 | |||
762 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
763 | fpregs, | ||
764 | 0, 32 * sizeof(u32)); | ||
765 | if (!ret) | ||
766 | user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, | ||
767 | 32 * sizeof(u32), | ||
768 | 33 * sizeof(u32)); | ||
769 | if (!ret && count > 0) { | ||
770 | compat_ulong_t fsr; | ||
771 | unsigned long val; | ||
772 | |||
773 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
774 | &fsr, | ||
775 | 33 * sizeof(u32), | ||
776 | 34 * sizeof(u32)); | ||
777 | if (!ret) { | ||
778 | val = task_thread_info(target)->xfsr[0]; | ||
779 | val &= 0xffffffff00000000UL; | ||
780 | val |= fsr; | ||
781 | task_thread_info(target)->xfsr[0] = val; | ||
782 | } | ||
783 | } | ||
784 | |||
785 | fprs |= (FPRS_FEF | FPRS_DL); | ||
786 | task_thread_info(target)->fpsaved[0] = fprs; | ||
787 | |||
788 | if (!ret) | ||
789 | ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, | ||
790 | 34 * sizeof(u32), -1); | ||
791 | return ret; | ||
792 | } | ||
793 | |||
794 | static const struct user_regset sparc32_regsets[] = { | ||
795 | /* Format is: | ||
796 | * G0 --> G7 | ||
797 | * O0 --> O7 | ||
798 | * L0 --> L7 | ||
799 | * I0 --> I7 | ||
800 | * PSR, PC, nPC, Y, WIM, TBR | ||
801 | */ | ||
802 | [REGSET_GENERAL] = { | ||
803 | .core_note_type = NT_PRSTATUS, | ||
804 | .n = 38, | ||
805 | .size = sizeof(u32), .align = sizeof(u32), | ||
806 | .get = genregs32_get, .set = genregs32_set | ||
807 | }, | ||
808 | /* Format is: | ||
809 | * F0 --> F31 | ||
810 | * empty 32-bit word | ||
811 | * FSR (32--bit word) | ||
812 | * FPU QUEUE COUNT (8-bit char) | ||
813 | * FPU QUEUE ENTRYSIZE (8-bit char) | ||
814 | * FPU ENABLED (8-bit char) | ||
815 | * empty 8-bit char | ||
816 | * FPU QUEUE (64 32-bit ints) | ||
817 | */ | ||
818 | [REGSET_FP] = { | ||
819 | .core_note_type = NT_PRFPREG, | ||
820 | .n = 99, | ||
821 | .size = sizeof(u32), .align = sizeof(u32), | ||
822 | .get = fpregs32_get, .set = fpregs32_set | ||
823 | }, | ||
824 | }; | ||
825 | |||
826 | static const struct user_regset_view user_sparc32_view = { | ||
827 | .name = "sparc", .e_machine = EM_SPARC, | ||
828 | .regsets = sparc32_regsets, .n = ARRAY_SIZE(sparc32_regsets) | ||
829 | }; | ||
830 | #endif /* CONFIG_COMPAT */ | ||
831 | |||
832 | const struct user_regset_view *task_user_regset_view(struct task_struct *task) | ||
833 | { | ||
834 | #ifdef CONFIG_COMPAT | ||
835 | if (test_tsk_thread_flag(task, TIF_32BIT)) | ||
836 | return &user_sparc32_view; | ||
837 | #endif | ||
838 | return &user_sparc64_view; | ||
839 | } | ||
840 | |||
841 | #ifdef CONFIG_COMPAT | ||
842 | struct compat_fps { | ||
843 | unsigned int regs[32]; | ||
844 | unsigned int fsr; | ||
845 | unsigned int flags; | ||
846 | unsigned int extra; | ||
847 | unsigned int fpqd; | ||
848 | struct compat_fq { | ||
849 | unsigned int insnaddr; | ||
850 | unsigned int insn; | ||
851 | } fpq[16]; | ||
852 | }; | ||
853 | |||
854 | long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | ||
855 | compat_ulong_t caddr, compat_ulong_t cdata) | ||
856 | { | ||
857 | const struct user_regset_view *view = task_user_regset_view(current); | ||
858 | compat_ulong_t caddr2 = task_pt_regs(current)->u_regs[UREG_I4]; | ||
859 | struct pt_regs32 __user *pregs; | ||
860 | struct compat_fps __user *fps; | ||
861 | unsigned long addr2 = caddr2; | ||
862 | unsigned long addr = caddr; | ||
863 | unsigned long data = cdata; | ||
864 | int ret; | ||
865 | |||
866 | pregs = (struct pt_regs32 __user *) addr; | ||
867 | fps = (struct compat_fps __user *) addr; | ||
868 | |||
869 | switch (request) { | ||
870 | case PTRACE_PEEKUSR: | ||
871 | ret = (addr != 0) ? -EIO : 0; | ||
872 | break; | ||
873 | |||
874 | case PTRACE_GETREGS: | ||
875 | ret = copy_regset_to_user(child, view, REGSET_GENERAL, | ||
876 | 32 * sizeof(u32), | ||
877 | 4 * sizeof(u32), | ||
878 | &pregs->psr); | ||
879 | if (!ret) | ||
880 | ret = copy_regset_to_user(child, view, REGSET_GENERAL, | ||
881 | 1 * sizeof(u32), | ||
882 | 15 * sizeof(u32), | ||
883 | &pregs->u_regs[0]); | ||
884 | break; | ||
885 | |||
886 | case PTRACE_SETREGS: | ||
887 | ret = copy_regset_from_user(child, view, REGSET_GENERAL, | ||
888 | 32 * sizeof(u32), | ||
889 | 4 * sizeof(u32), | ||
890 | &pregs->psr); | ||
891 | if (!ret) | ||
892 | ret = copy_regset_from_user(child, view, REGSET_GENERAL, | ||
893 | 1 * sizeof(u32), | ||
894 | 15 * sizeof(u32), | ||
895 | &pregs->u_regs[0]); | ||
896 | break; | ||
897 | |||
898 | case PTRACE_GETFPREGS: | ||
899 | ret = copy_regset_to_user(child, view, REGSET_FP, | ||
900 | 0 * sizeof(u32), | ||
901 | 32 * sizeof(u32), | ||
902 | &fps->regs[0]); | ||
903 | if (!ret) | ||
904 | ret = copy_regset_to_user(child, view, REGSET_FP, | ||
905 | 33 * sizeof(u32), | ||
906 | 1 * sizeof(u32), | ||
907 | &fps->fsr); | ||
908 | if (!ret) { | ||
909 | if (__put_user(0, &fps->flags) || | ||
910 | __put_user(0, &fps->extra) || | ||
911 | __put_user(0, &fps->fpqd) || | ||
912 | clear_user(&fps->fpq[0], 32 * sizeof(unsigned int))) | ||
913 | ret = -EFAULT; | ||
914 | } | ||
915 | break; | ||
916 | |||
917 | case PTRACE_SETFPREGS: | ||
918 | ret = copy_regset_from_user(child, view, REGSET_FP, | ||
919 | 0 * sizeof(u32), | ||
920 | 32 * sizeof(u32), | ||
921 | &fps->regs[0]); | ||
922 | if (!ret) | ||
923 | ret = copy_regset_from_user(child, view, REGSET_FP, | ||
924 | 33 * sizeof(u32), | ||
925 | 1 * sizeof(u32), | ||
926 | &fps->fsr); | ||
927 | break; | ||
928 | |||
929 | case PTRACE_READTEXT: | ||
930 | case PTRACE_READDATA: | ||
931 | ret = ptrace_readdata(child, addr, | ||
932 | (char __user *)addr2, data); | ||
933 | if (ret == data) | ||
934 | ret = 0; | ||
935 | else if (ret >= 0) | ||
936 | ret = -EIO; | ||
937 | break; | ||
938 | |||
939 | case PTRACE_WRITETEXT: | ||
940 | case PTRACE_WRITEDATA: | ||
941 | ret = ptrace_writedata(child, (char __user *) addr2, | ||
942 | addr, data); | ||
943 | if (ret == data) | ||
944 | ret = 0; | ||
945 | else if (ret >= 0) | ||
946 | ret = -EIO; | ||
947 | break; | ||
948 | |||
949 | default: | ||
950 | if (request == PTRACE_SPARC_DETACH) | ||
951 | request = PTRACE_DETACH; | ||
952 | ret = compat_ptrace_request(child, request, addr, data); | ||
953 | break; | ||
954 | } | ||
955 | |||
956 | return ret; | ||
957 | } | ||
958 | #endif /* CONFIG_COMPAT */ | ||
959 | |||
960 | struct fps { | ||
961 | unsigned int regs[64]; | ||
962 | unsigned long fsr; | ||
963 | }; | ||
964 | |||
965 | long arch_ptrace(struct task_struct *child, long request, long addr, long data) | ||
966 | { | ||
967 | const struct user_regset_view *view = task_user_regset_view(current); | ||
968 | unsigned long addr2 = task_pt_regs(current)->u_regs[UREG_I4]; | ||
969 | struct pt_regs __user *pregs; | ||
970 | struct fps __user *fps; | ||
971 | int ret; | ||
972 | |||
973 | pregs = (struct pt_regs __user *) (unsigned long) addr; | ||
974 | fps = (struct fps __user *) (unsigned long) addr; | ||
975 | |||
976 | switch (request) { | ||
977 | case PTRACE_PEEKUSR: | ||
978 | ret = (addr != 0) ? -EIO : 0; | ||
979 | break; | ||
980 | |||
981 | case PTRACE_GETREGS64: | ||
982 | ret = copy_regset_to_user(child, view, REGSET_GENERAL, | ||
983 | 1 * sizeof(u64), | ||
984 | 15 * sizeof(u64), | ||
985 | &pregs->u_regs[0]); | ||
986 | if (!ret) { | ||
987 | /* XXX doesn't handle 'y' register correctly XXX */ | ||
988 | ret = copy_regset_to_user(child, view, REGSET_GENERAL, | ||
989 | 32 * sizeof(u64), | ||
990 | 4 * sizeof(u64), | ||
991 | &pregs->tstate); | ||
992 | } | ||
993 | break; | ||
994 | |||
995 | case PTRACE_SETREGS64: | ||
996 | ret = copy_regset_from_user(child, view, REGSET_GENERAL, | ||
997 | 1 * sizeof(u64), | ||
998 | 15 * sizeof(u64), | ||
999 | &pregs->u_regs[0]); | ||
1000 | if (!ret) { | ||
1001 | /* XXX doesn't handle 'y' register correctly XXX */ | ||
1002 | ret = copy_regset_from_user(child, view, REGSET_GENERAL, | ||
1003 | 32 * sizeof(u64), | ||
1004 | 4 * sizeof(u64), | ||
1005 | &pregs->tstate); | ||
1006 | } | ||
1007 | break; | ||
1008 | |||
1009 | case PTRACE_GETFPREGS64: | ||
1010 | ret = copy_regset_to_user(child, view, REGSET_FP, | ||
1011 | 0 * sizeof(u64), | ||
1012 | 33 * sizeof(u64), | ||
1013 | fps); | ||
1014 | break; | ||
1015 | |||
1016 | case PTRACE_SETFPREGS64: | ||
1017 | ret = copy_regset_from_user(child, view, REGSET_FP, | ||
1018 | 0 * sizeof(u64), | ||
1019 | 33 * sizeof(u64), | ||
1020 | fps); | ||
1021 | break; | ||
1022 | |||
1023 | case PTRACE_READTEXT: | ||
1024 | case PTRACE_READDATA: | ||
1025 | ret = ptrace_readdata(child, addr, | ||
1026 | (char __user *)addr2, data); | ||
1027 | if (ret == data) | ||
1028 | ret = 0; | ||
1029 | else if (ret >= 0) | ||
1030 | ret = -EIO; | ||
1031 | break; | ||
1032 | |||
1033 | case PTRACE_WRITETEXT: | ||
1034 | case PTRACE_WRITEDATA: | ||
1035 | ret = ptrace_writedata(child, (char __user *) addr2, | ||
1036 | addr, data); | ||
1037 | if (ret == data) | ||
1038 | ret = 0; | ||
1039 | else if (ret >= 0) | ||
1040 | ret = -EIO; | ||
1041 | break; | ||
1042 | |||
1043 | default: | ||
1044 | if (request == PTRACE_SPARC_DETACH) | ||
1045 | request = PTRACE_DETACH; | ||
1046 | ret = ptrace_request(child, request, addr, data); | ||
1047 | break; | ||
1048 | } | ||
1049 | |||
1050 | return ret; | ||
1051 | } | ||
1052 | |||
1053 | asmlinkage int syscall_trace_enter(struct pt_regs *regs) | ||
1054 | { | ||
1055 | int ret = 0; | ||
1056 | |||
1057 | /* do the secure computing check first */ | ||
1058 | secure_computing(regs->u_regs[UREG_G1]); | ||
1059 | |||
1060 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | ||
1061 | ret = tracehook_report_syscall_entry(regs); | ||
1062 | |||
1063 | if (unlikely(current->audit_context) && !ret) | ||
1064 | audit_syscall_entry((test_thread_flag(TIF_32BIT) ? | ||
1065 | AUDIT_ARCH_SPARC : | ||
1066 | AUDIT_ARCH_SPARC64), | ||
1067 | regs->u_regs[UREG_G1], | ||
1068 | regs->u_regs[UREG_I0], | ||
1069 | regs->u_regs[UREG_I1], | ||
1070 | regs->u_regs[UREG_I2], | ||
1071 | regs->u_regs[UREG_I3]); | ||
1072 | |||
1073 | return ret; | ||
1074 | } | ||
1075 | |||
1076 | asmlinkage void syscall_trace_leave(struct pt_regs *regs) | ||
1077 | { | ||
1078 | if (unlikely(current->audit_context)) { | ||
1079 | unsigned long tstate = regs->tstate; | ||
1080 | int result = AUDITSC_SUCCESS; | ||
1081 | |||
1082 | if (unlikely(tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) | ||
1083 | result = AUDITSC_FAILURE; | ||
1084 | |||
1085 | audit_syscall_exit(result, regs->u_regs[UREG_I0]); | ||
1086 | } | ||
1087 | |||
1088 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | ||
1089 | tracehook_report_syscall_exit(regs, 0); | ||
1090 | } | ||
diff --git a/arch/sparc/kernel/reboot.c b/arch/sparc/kernel/reboot.c new file mode 100644 index 000000000000..ef89d3d69748 --- /dev/null +++ b/arch/sparc/kernel/reboot.c | |||
@@ -0,0 +1,53 @@ | |||
1 | /* reboot.c: reboot/shutdown/halt/poweroff handling | ||
2 | * | ||
3 | * Copyright (C) 2008 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/reboot.h> | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/pm.h> | ||
9 | |||
10 | #include <asm/system.h> | ||
11 | #include <asm/oplib.h> | ||
12 | #include <asm/prom.h> | ||
13 | |||
14 | /* sysctl - toggle power-off restriction for serial console | ||
15 | * systems in machine_power_off() | ||
16 | */ | ||
17 | int scons_pwroff = 1; | ||
18 | |||
19 | /* This isn't actually used, it exists merely to satisfy the | ||
20 | * reference in kernel/sys.c | ||
21 | */ | ||
22 | void (*pm_power_off)(void) = machine_power_off; | ||
23 | EXPORT_SYMBOL(pm_power_off); | ||
24 | |||
25 | void machine_power_off(void) | ||
26 | { | ||
27 | if (strcmp(of_console_device->type, "serial") || scons_pwroff) | ||
28 | prom_halt_power_off(); | ||
29 | |||
30 | prom_halt(); | ||
31 | } | ||
32 | |||
33 | void machine_halt(void) | ||
34 | { | ||
35 | prom_halt(); | ||
36 | panic("Halt failed!"); | ||
37 | } | ||
38 | |||
39 | void machine_restart(char *cmd) | ||
40 | { | ||
41 | char *p; | ||
42 | |||
43 | p = strchr(reboot_command, '\n'); | ||
44 | if (p) | ||
45 | *p = 0; | ||
46 | if (cmd) | ||
47 | prom_reboot(cmd); | ||
48 | if (*reboot_command) | ||
49 | prom_reboot(reboot_command); | ||
50 | prom_reboot(""); | ||
51 | panic("Reboot failed!"); | ||
52 | } | ||
53 | |||
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S new file mode 100644 index 000000000000..fd3cee4d117c --- /dev/null +++ b/arch/sparc/kernel/rtrap_64.S | |||
@@ -0,0 +1,450 @@ | |||
1 | /* | ||
2 | * rtrap.S: Preparing for return from trap on Sparc V9. | ||
3 | * | ||
4 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
5 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | ||
6 | */ | ||
7 | |||
8 | |||
9 | #include <asm/asi.h> | ||
10 | #include <asm/pstate.h> | ||
11 | #include <asm/ptrace.h> | ||
12 | #include <asm/spitfire.h> | ||
13 | #include <asm/head.h> | ||
14 | #include <asm/visasm.h> | ||
15 | #include <asm/processor.h> | ||
16 | |||
17 | #define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE) | ||
18 | #define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV) | ||
19 | #define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG) | ||
20 | |||
21 | .text | ||
22 | .align 32 | ||
23 | __handle_softirq: | ||
24 | call do_softirq | ||
25 | nop | ||
26 | ba,a,pt %xcc, __handle_softirq_continue | ||
27 | nop | ||
28 | __handle_preemption: | ||
29 | call schedule | ||
30 | wrpr %g0, RTRAP_PSTATE, %pstate | ||
31 | ba,pt %xcc, __handle_preemption_continue | ||
32 | wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate | ||
33 | |||
34 | __handle_user_windows: | ||
35 | call fault_in_user_windows | ||
36 | wrpr %g0, RTRAP_PSTATE, %pstate | ||
37 | wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate | ||
38 | /* Redo sched+sig checks */ | ||
39 | ldx [%g6 + TI_FLAGS], %l0 | ||
40 | andcc %l0, _TIF_NEED_RESCHED, %g0 | ||
41 | |||
42 | be,pt %xcc, 1f | ||
43 | nop | ||
44 | call schedule | ||
45 | wrpr %g0, RTRAP_PSTATE, %pstate | ||
46 | wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate | ||
47 | ldx [%g6 + TI_FLAGS], %l0 | ||
48 | |||
49 | 1: andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0 | ||
50 | be,pt %xcc, __handle_user_windows_continue | ||
51 | nop | ||
52 | mov %l5, %o1 | ||
53 | add %sp, PTREGS_OFF, %o0 | ||
54 | mov %l0, %o2 | ||
55 | |||
56 | call do_notify_resume | ||
57 | wrpr %g0, RTRAP_PSTATE, %pstate | ||
58 | wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate | ||
59 | /* Signal delivery can modify pt_regs tstate, so we must | ||
60 | * reload it. | ||
61 | */ | ||
62 | ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 | ||
63 | sethi %hi(0xf << 20), %l4 | ||
64 | and %l1, %l4, %l4 | ||
65 | ba,pt %xcc, __handle_user_windows_continue | ||
66 | |||
67 | andn %l1, %l4, %l1 | ||
68 | __handle_perfctrs: | ||
69 | call update_perfctrs | ||
70 | wrpr %g0, RTRAP_PSTATE, %pstate | ||
71 | wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate | ||
72 | ldub [%g6 + TI_WSAVED], %o2 | ||
73 | brz,pt %o2, 1f | ||
74 | nop | ||
75 | /* Redo userwin+sched+sig checks */ | ||
76 | call fault_in_user_windows | ||
77 | |||
78 | wrpr %g0, RTRAP_PSTATE, %pstate | ||
79 | wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate | ||
80 | ldx [%g6 + TI_FLAGS], %l0 | ||
81 | andcc %l0, _TIF_NEED_RESCHED, %g0 | ||
82 | be,pt %xcc, 1f | ||
83 | |||
84 | nop | ||
85 | call schedule | ||
86 | wrpr %g0, RTRAP_PSTATE, %pstate | ||
87 | wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate | ||
88 | ldx [%g6 + TI_FLAGS], %l0 | ||
89 | 1: andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0 | ||
90 | |||
91 | be,pt %xcc, __handle_perfctrs_continue | ||
92 | sethi %hi(TSTATE_PEF), %o0 | ||
93 | mov %l5, %o1 | ||
94 | add %sp, PTREGS_OFF, %o0 | ||
95 | mov %l0, %o2 | ||
96 | call do_notify_resume | ||
97 | |||
98 | wrpr %g0, RTRAP_PSTATE, %pstate | ||
99 | wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate | ||
100 | /* Signal delivery can modify pt_regs tstate, so we must | ||
101 | * reload it. | ||
102 | */ | ||
103 | ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 | ||
104 | sethi %hi(0xf << 20), %l4 | ||
105 | and %l1, %l4, %l4 | ||
106 | andn %l1, %l4, %l1 | ||
107 | ba,pt %xcc, __handle_perfctrs_continue | ||
108 | |||
109 | sethi %hi(TSTATE_PEF), %o0 | ||
110 | __handle_userfpu: | ||
111 | rd %fprs, %l5 | ||
112 | andcc %l5, FPRS_FEF, %g0 | ||
113 | sethi %hi(TSTATE_PEF), %o0 | ||
114 | be,a,pn %icc, __handle_userfpu_continue | ||
115 | andn %l1, %o0, %l1 | ||
116 | ba,a,pt %xcc, __handle_userfpu_continue | ||
117 | |||
118 | __handle_signal: | ||
119 | mov %l5, %o1 | ||
120 | add %sp, PTREGS_OFF, %o0 | ||
121 | mov %l0, %o2 | ||
122 | call do_notify_resume | ||
123 | wrpr %g0, RTRAP_PSTATE, %pstate | ||
124 | wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate | ||
125 | |||
126 | /* Signal delivery can modify pt_regs tstate, so we must | ||
127 | * reload it. | ||
128 | */ | ||
129 | ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 | ||
130 | sethi %hi(0xf << 20), %l4 | ||
131 | and %l1, %l4, %l4 | ||
132 | ba,pt %xcc, __handle_signal_continue | ||
133 | andn %l1, %l4, %l1 | ||
134 | |||
135 | /* When returning from a NMI (%pil==15) interrupt we want to | ||
136 | * avoid running softirqs, doing IRQ tracing, preempting, etc. | ||
137 | */ | ||
138 | .globl rtrap_nmi | ||
139 | rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 | ||
140 | sethi %hi(0xf << 20), %l4 | ||
141 | and %l1, %l4, %l4 | ||
142 | andn %l1, %l4, %l1 | ||
143 | srl %l4, 20, %l4 | ||
144 | ba,pt %xcc, rtrap_no_irq_enable | ||
145 | wrpr %l4, %pil | ||
146 | |||
147 | .align 64 | ||
148 | .globl rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall | ||
149 | rtrap_irq: | ||
150 | rtrap: | ||
151 | #ifndef CONFIG_SMP | ||
152 | sethi %hi(per_cpu____cpu_data), %l0 | ||
153 | lduw [%l0 + %lo(per_cpu____cpu_data)], %l1 | ||
154 | #else | ||
155 | sethi %hi(per_cpu____cpu_data), %l0 | ||
156 | or %l0, %lo(per_cpu____cpu_data), %l0 | ||
157 | lduw [%l0 + %g5], %l1 | ||
158 | #endif | ||
159 | cmp %l1, 0 | ||
160 | |||
161 | /* mm/ultra.S:xcall_report_regs KNOWS about this load. */ | ||
162 | bne,pn %icc, __handle_softirq | ||
163 | ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 | ||
164 | __handle_softirq_continue: | ||
165 | rtrap_xcall: | ||
166 | sethi %hi(0xf << 20), %l4 | ||
167 | and %l1, %l4, %l4 | ||
168 | andn %l1, %l4, %l1 | ||
169 | srl %l4, 20, %l4 | ||
170 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
171 | brnz,pn %l4, rtrap_no_irq_enable | ||
172 | nop | ||
173 | call trace_hardirqs_on | ||
174 | nop | ||
175 | wrpr %l4, %pil | ||
176 | #endif | ||
177 | rtrap_no_irq_enable: | ||
178 | andcc %l1, TSTATE_PRIV, %l3 | ||
179 | bne,pn %icc, to_kernel | ||
180 | nop | ||
181 | |||
182 | /* We must hold IRQs off and atomically test schedule+signal | ||
183 | * state, then hold them off all the way back to userspace. | ||
184 | * If we are returning to kernel, none of this matters. Note | ||
185 | * that we are disabling interrupts via PSTATE_IE, not using | ||
186 | * %pil. | ||
187 | * | ||
188 | * If we do not do this, there is a window where we would do | ||
189 | * the tests, later the signal/resched event arrives but we do | ||
190 | * not process it since we are still in kernel mode. It would | ||
191 | * take until the next local IRQ before the signal/resched | ||
192 | * event would be handled. | ||
193 | * | ||
194 | * This also means that if we have to deal with performance | ||
195 | * counters or user windows, we have to redo all of these | ||
196 | * sched+signal checks with IRQs disabled. | ||
197 | */ | ||
198 | to_user: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate | ||
199 | wrpr 0, %pil | ||
200 | __handle_preemption_continue: | ||
201 | ldx [%g6 + TI_FLAGS], %l0 | ||
202 | sethi %hi(_TIF_USER_WORK_MASK), %o0 | ||
203 | or %o0, %lo(_TIF_USER_WORK_MASK), %o0 | ||
204 | andcc %l0, %o0, %g0 | ||
205 | sethi %hi(TSTATE_PEF), %o0 | ||
206 | be,pt %xcc, user_nowork | ||
207 | andcc %l1, %o0, %g0 | ||
208 | andcc %l0, _TIF_NEED_RESCHED, %g0 | ||
209 | bne,pn %xcc, __handle_preemption | ||
210 | andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0 | ||
211 | bne,pn %xcc, __handle_signal | ||
212 | __handle_signal_continue: | ||
213 | ldub [%g6 + TI_WSAVED], %o2 | ||
214 | brnz,pn %o2, __handle_user_windows | ||
215 | nop | ||
216 | __handle_user_windows_continue: | ||
217 | ldx [%g6 + TI_FLAGS], %l5 | ||
218 | andcc %l5, _TIF_PERFCTR, %g0 | ||
219 | sethi %hi(TSTATE_PEF), %o0 | ||
220 | bne,pn %xcc, __handle_perfctrs | ||
221 | __handle_perfctrs_continue: | ||
222 | andcc %l1, %o0, %g0 | ||
223 | |||
224 | /* This fpdepth clear is necessary for non-syscall rtraps only */ | ||
225 | user_nowork: | ||
226 | bne,pn %xcc, __handle_userfpu | ||
227 | stb %g0, [%g6 + TI_FPDEPTH] | ||
228 | __handle_userfpu_continue: | ||
229 | |||
230 | rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 | ||
231 | ldx [%sp + PTREGS_OFF + PT_V9_G2], %g2 | ||
232 | |||
233 | ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3 | ||
234 | ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4 | ||
235 | ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5 | ||
236 | brz,pt %l3, 1f | ||
237 | mov %g6, %l2 | ||
238 | |||
239 | /* Must do this before thread reg is clobbered below. */ | ||
240 | LOAD_PER_CPU_BASE(%g5, %g6, %i0, %i1, %i2) | ||
241 | 1: | ||
242 | ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6 | ||
243 | ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7 | ||
244 | |||
245 | /* Normal globals are restored, go to trap globals. */ | ||
246 | 661: wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate | ||
247 | nop | ||
248 | .section .sun4v_2insn_patch, "ax" | ||
249 | .word 661b | ||
250 | wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate | ||
251 | SET_GL(1) | ||
252 | .previous | ||
253 | |||
254 | mov %l2, %g6 | ||
255 | |||
256 | ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0 | ||
257 | ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1 | ||
258 | |||
259 | ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2 | ||
260 | ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3 | ||
261 | ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4 | ||
262 | ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5 | ||
263 | ldx [%sp + PTREGS_OFF + PT_V9_I6], %i6 | ||
264 | ldx [%sp + PTREGS_OFF + PT_V9_I7], %i7 | ||
265 | ldx [%sp + PTREGS_OFF + PT_V9_TPC], %l2 | ||
266 | ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %o2 | ||
267 | |||
268 | ld [%sp + PTREGS_OFF + PT_V9_Y], %o3 | ||
269 | wr %o3, %g0, %y | ||
270 | wrpr %l4, 0x0, %pil | ||
271 | wrpr %g0, 0x1, %tl | ||
272 | andn %l1, TSTATE_SYSCALL, %l1 | ||
273 | wrpr %l1, %g0, %tstate | ||
274 | wrpr %l2, %g0, %tpc | ||
275 | wrpr %o2, %g0, %tnpc | ||
276 | |||
277 | brnz,pn %l3, kern_rtt | ||
278 | mov PRIMARY_CONTEXT, %l7 | ||
279 | |||
280 | 661: ldxa [%l7 + %l7] ASI_DMMU, %l0 | ||
281 | .section .sun4v_1insn_patch, "ax" | ||
282 | .word 661b | ||
283 | ldxa [%l7 + %l7] ASI_MMU, %l0 | ||
284 | .previous | ||
285 | |||
286 | sethi %hi(sparc64_kern_pri_nuc_bits), %l1 | ||
287 | ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1 | ||
288 | or %l0, %l1, %l0 | ||
289 | |||
290 | 661: stxa %l0, [%l7] ASI_DMMU | ||
291 | .section .sun4v_1insn_patch, "ax" | ||
292 | .word 661b | ||
293 | stxa %l0, [%l7] ASI_MMU | ||
294 | .previous | ||
295 | |||
296 | sethi %hi(KERNBASE), %l7 | ||
297 | flush %l7 | ||
298 | rdpr %wstate, %l1 | ||
299 | rdpr %otherwin, %l2 | ||
300 | srl %l1, 3, %l1 | ||
301 | |||
302 | wrpr %l2, %g0, %canrestore | ||
303 | wrpr %l1, %g0, %wstate | ||
304 | brnz,pt %l2, user_rtt_restore | ||
305 | wrpr %g0, %g0, %otherwin | ||
306 | |||
307 | ldx [%g6 + TI_FLAGS], %g3 | ||
308 | wr %g0, ASI_AIUP, %asi | ||
309 | rdpr %cwp, %g1 | ||
310 | andcc %g3, _TIF_32BIT, %g0 | ||
311 | sub %g1, 1, %g1 | ||
312 | bne,pt %xcc, user_rtt_fill_32bit | ||
313 | wrpr %g1, %cwp | ||
314 | ba,a,pt %xcc, user_rtt_fill_64bit | ||
315 | |||
316 | user_rtt_fill_fixup: | ||
317 | rdpr %cwp, %g1 | ||
318 | add %g1, 1, %g1 | ||
319 | wrpr %g1, 0x0, %cwp | ||
320 | |||
321 | rdpr %wstate, %g2 | ||
322 | sll %g2, 3, %g2 | ||
323 | wrpr %g2, 0x0, %wstate | ||
324 | |||
325 | /* We know %canrestore and %otherwin are both zero. */ | ||
326 | |||
327 | sethi %hi(sparc64_kern_pri_context), %g2 | ||
328 | ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2 | ||
329 | mov PRIMARY_CONTEXT, %g1 | ||
330 | |||
331 | 661: stxa %g2, [%g1] ASI_DMMU | ||
332 | .section .sun4v_1insn_patch, "ax" | ||
333 | .word 661b | ||
334 | stxa %g2, [%g1] ASI_MMU | ||
335 | .previous | ||
336 | |||
337 | sethi %hi(KERNBASE), %g1 | ||
338 | flush %g1 | ||
339 | |||
340 | or %g4, FAULT_CODE_WINFIXUP, %g4 | ||
341 | stb %g4, [%g6 + TI_FAULT_CODE] | ||
342 | stx %g5, [%g6 + TI_FAULT_ADDR] | ||
343 | |||
344 | mov %g6, %l1 | ||
345 | wrpr %g0, 0x0, %tl | ||
346 | |||
347 | 661: nop | ||
348 | .section .sun4v_1insn_patch, "ax" | ||
349 | .word 661b | ||
350 | SET_GL(0) | ||
351 | .previous | ||
352 | |||
353 | wrpr %g0, RTRAP_PSTATE, %pstate | ||
354 | |||
355 | mov %l1, %g6 | ||
356 | ldx [%g6 + TI_TASK], %g4 | ||
357 | LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) | ||
358 | call do_sparc64_fault | ||
359 | add %sp, PTREGS_OFF, %o0 | ||
360 | ba,pt %xcc, rtrap | ||
361 | nop | ||
362 | |||
363 | user_rtt_pre_restore: | ||
364 | add %g1, 1, %g1 | ||
365 | wrpr %g1, 0x0, %cwp | ||
366 | |||
367 | user_rtt_restore: | ||
368 | restore | ||
369 | rdpr %canrestore, %g1 | ||
370 | wrpr %g1, 0x0, %cleanwin | ||
371 | retry | ||
372 | nop | ||
373 | |||
374 | kern_rtt: rdpr %canrestore, %g1 | ||
375 | brz,pn %g1, kern_rtt_fill | ||
376 | nop | ||
377 | kern_rtt_restore: | ||
378 | stw %g0, [%sp + PTREGS_OFF + PT_V9_MAGIC] | ||
379 | restore | ||
380 | retry | ||
381 | |||
382 | to_kernel: | ||
383 | #ifdef CONFIG_PREEMPT | ||
384 | ldsw [%g6 + TI_PRE_COUNT], %l5 | ||
385 | brnz %l5, kern_fpucheck | ||
386 | ldx [%g6 + TI_FLAGS], %l5 | ||
387 | andcc %l5, _TIF_NEED_RESCHED, %g0 | ||
388 | be,pt %xcc, kern_fpucheck | ||
389 | nop | ||
390 | cmp %l4, 0 | ||
391 | bne,pn %xcc, kern_fpucheck | ||
392 | sethi %hi(PREEMPT_ACTIVE), %l6 | ||
393 | stw %l6, [%g6 + TI_PRE_COUNT] | ||
394 | call schedule | ||
395 | nop | ||
396 | ba,pt %xcc, rtrap | ||
397 | stw %g0, [%g6 + TI_PRE_COUNT] | ||
398 | #endif | ||
399 | kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5 | ||
400 | brz,pt %l5, rt_continue | ||
401 | srl %l5, 1, %o0 | ||
402 | add %g6, TI_FPSAVED, %l6 | ||
403 | ldub [%l6 + %o0], %l2 | ||
404 | sub %l5, 2, %l5 | ||
405 | |||
406 | add %g6, TI_GSR, %o1 | ||
407 | andcc %l2, (FPRS_FEF|FPRS_DU), %g0 | ||
408 | be,pt %icc, 2f | ||
409 | and %l2, FPRS_DL, %l6 | ||
410 | andcc %l2, FPRS_FEF, %g0 | ||
411 | be,pn %icc, 5f | ||
412 | sll %o0, 3, %o5 | ||
413 | rd %fprs, %g1 | ||
414 | |||
415 | wr %g1, FPRS_FEF, %fprs | ||
416 | ldx [%o1 + %o5], %g1 | ||
417 | add %g6, TI_XFSR, %o1 | ||
418 | sll %o0, 8, %o2 | ||
419 | add %g6, TI_FPREGS, %o3 | ||
420 | brz,pn %l6, 1f | ||
421 | add %g6, TI_FPREGS+0x40, %o4 | ||
422 | |||
423 | membar #Sync | ||
424 | ldda [%o3 + %o2] ASI_BLK_P, %f0 | ||
425 | ldda [%o4 + %o2] ASI_BLK_P, %f16 | ||
426 | membar #Sync | ||
427 | 1: andcc %l2, FPRS_DU, %g0 | ||
428 | be,pn %icc, 1f | ||
429 | wr %g1, 0, %gsr | ||
430 | add %o2, 0x80, %o2 | ||
431 | membar #Sync | ||
432 | ldda [%o3 + %o2] ASI_BLK_P, %f32 | ||
433 | ldda [%o4 + %o2] ASI_BLK_P, %f48 | ||
434 | 1: membar #Sync | ||
435 | ldx [%o1 + %o5], %fsr | ||
436 | 2: stb %l5, [%g6 + TI_FPDEPTH] | ||
437 | ba,pt %xcc, rt_continue | ||
438 | nop | ||
439 | 5: wr %g0, FPRS_FEF, %fprs | ||
440 | sll %o0, 8, %o2 | ||
441 | |||
442 | add %g6, TI_FPREGS+0x80, %o3 | ||
443 | add %g6, TI_FPREGS+0xc0, %o4 | ||
444 | membar #Sync | ||
445 | ldda [%o3 + %o2] ASI_BLK_P, %f32 | ||
446 | ldda [%o4 + %o2] ASI_BLK_P, %f48 | ||
447 | membar #Sync | ||
448 | wr %g0, FPRS_DU, %fprs | ||
449 | ba,pt %xcc, rt_continue | ||
450 | stb %l5, [%g6 + TI_FPDEPTH] | ||
diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c new file mode 100644 index 000000000000..2ead310066d1 --- /dev/null +++ b/arch/sparc/kernel/sbus.c | |||
@@ -0,0 +1,674 @@ | |||
1 | /* | ||
2 | * sbus.c: UltraSparc SBUS controller support. | ||
3 | * | ||
4 | * Copyright (C) 1999 David S. Miller (davem@redhat.com) | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/types.h> | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/spinlock.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/of.h> | ||
15 | #include <linux/of_device.h> | ||
16 | |||
17 | #include <asm/page.h> | ||
18 | #include <asm/io.h> | ||
19 | #include <asm/upa.h> | ||
20 | #include <asm/cache.h> | ||
21 | #include <asm/dma.h> | ||
22 | #include <asm/irq.h> | ||
23 | #include <asm/prom.h> | ||
24 | #include <asm/oplib.h> | ||
25 | #include <asm/starfire.h> | ||
26 | |||
27 | #include "iommu_common.h" | ||
28 | |||
29 | #define MAP_BASE ((u32)0xc0000000) | ||
30 | |||
31 | /* Offsets from iommu_regs */ | ||
32 | #define SYSIO_IOMMUREG_BASE 0x2400UL | ||
33 | #define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */ | ||
34 | #define IOMMU_TSBBASE (0x2408UL - 0x2400UL) /* TSB base address register */ | ||
35 | #define IOMMU_FLUSH (0x2410UL - 0x2400UL) /* IOMMU flush register */ | ||
36 | #define IOMMU_VADIAG (0x4400UL - 0x2400UL) /* SBUS virtual address diagnostic */ | ||
37 | #define IOMMU_TAGCMP (0x4408UL - 0x2400UL) /* TLB tag compare diagnostics */ | ||
38 | #define IOMMU_LRUDIAG (0x4500UL - 0x2400UL) /* IOMMU LRU queue diagnostics */ | ||
39 | #define IOMMU_TAGDIAG (0x4580UL - 0x2400UL) /* TLB tag diagnostics */ | ||
40 | #define IOMMU_DRAMDIAG (0x4600UL - 0x2400UL) /* TLB data RAM diagnostics */ | ||
41 | |||
42 | #define IOMMU_DRAM_VALID (1UL << 30UL) | ||
43 | |||
44 | /* Offsets from strbuf_regs */ | ||
45 | #define SYSIO_STRBUFREG_BASE 0x2800UL | ||
46 | #define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */ | ||
47 | #define STRBUF_PFLUSH (0x2808UL - 0x2800UL) /* Page flush/invalidate */ | ||
48 | #define STRBUF_FSYNC (0x2810UL - 0x2800UL) /* Flush synchronization */ | ||
49 | #define STRBUF_DRAMDIAG (0x5000UL - 0x2800UL) /* data RAM diagnostic */ | ||
50 | #define STRBUF_ERRDIAG (0x5400UL - 0x2800UL) /* error status diagnostics */ | ||
51 | #define STRBUF_PTAGDIAG (0x5800UL - 0x2800UL) /* Page tag diagnostics */ | ||
52 | #define STRBUF_LTAGDIAG (0x5900UL - 0x2800UL) /* Line tag diagnostics */ | ||
53 | |||
54 | #define STRBUF_TAG_VALID 0x02UL | ||
55 | |||
56 | /* Enable 64-bit DVMA mode for the given device. */ | ||
57 | void sbus_set_sbus64(struct device *dev, int bursts) | ||
58 | { | ||
59 | struct iommu *iommu = dev->archdata.iommu; | ||
60 | struct of_device *op = to_of_device(dev); | ||
61 | const struct linux_prom_registers *regs; | ||
62 | unsigned long cfg_reg; | ||
63 | int slot; | ||
64 | u64 val; | ||
65 | |||
66 | regs = of_get_property(op->node, "reg", NULL); | ||
67 | if (!regs) { | ||
68 | printk(KERN_ERR "sbus_set_sbus64: Cannot find regs for %s\n", | ||
69 | op->node->full_name); | ||
70 | return; | ||
71 | } | ||
72 | slot = regs->which_io; | ||
73 | |||
74 | cfg_reg = iommu->write_complete_reg; | ||
75 | switch (slot) { | ||
76 | case 0: | ||
77 | cfg_reg += 0x20UL; | ||
78 | break; | ||
79 | case 1: | ||
80 | cfg_reg += 0x28UL; | ||
81 | break; | ||
82 | case 2: | ||
83 | cfg_reg += 0x30UL; | ||
84 | break; | ||
85 | case 3: | ||
86 | cfg_reg += 0x38UL; | ||
87 | break; | ||
88 | case 13: | ||
89 | cfg_reg += 0x40UL; | ||
90 | break; | ||
91 | case 14: | ||
92 | cfg_reg += 0x48UL; | ||
93 | break; | ||
94 | case 15: | ||
95 | cfg_reg += 0x50UL; | ||
96 | break; | ||
97 | |||
98 | default: | ||
99 | return; | ||
100 | }; | ||
101 | |||
102 | val = upa_readq(cfg_reg); | ||
103 | if (val & (1UL << 14UL)) { | ||
104 | /* Extended transfer mode already enabled. */ | ||
105 | return; | ||
106 | } | ||
107 | |||
108 | val |= (1UL << 14UL); | ||
109 | |||
110 | if (bursts & DMA_BURST8) | ||
111 | val |= (1UL << 1UL); | ||
112 | if (bursts & DMA_BURST16) | ||
113 | val |= (1UL << 2UL); | ||
114 | if (bursts & DMA_BURST32) | ||
115 | val |= (1UL << 3UL); | ||
116 | if (bursts & DMA_BURST64) | ||
117 | val |= (1UL << 4UL); | ||
118 | upa_writeq(val, cfg_reg); | ||
119 | } | ||
120 | |||
121 | /* INO number to IMAP register offset for SYSIO external IRQ's. | ||
122 | * This should conform to both Sunfire/Wildfire server and Fusion | ||
123 | * desktop designs. | ||
124 | */ | ||
125 | #define SYSIO_IMAP_SLOT0 0x2c00UL | ||
126 | #define SYSIO_IMAP_SLOT1 0x2c08UL | ||
127 | #define SYSIO_IMAP_SLOT2 0x2c10UL | ||
128 | #define SYSIO_IMAP_SLOT3 0x2c18UL | ||
129 | #define SYSIO_IMAP_SCSI 0x3000UL | ||
130 | #define SYSIO_IMAP_ETH 0x3008UL | ||
131 | #define SYSIO_IMAP_BPP 0x3010UL | ||
132 | #define SYSIO_IMAP_AUDIO 0x3018UL | ||
133 | #define SYSIO_IMAP_PFAIL 0x3020UL | ||
134 | #define SYSIO_IMAP_KMS 0x3028UL | ||
135 | #define SYSIO_IMAP_FLPY 0x3030UL | ||
136 | #define SYSIO_IMAP_SHW 0x3038UL | ||
137 | #define SYSIO_IMAP_KBD 0x3040UL | ||
138 | #define SYSIO_IMAP_MS 0x3048UL | ||
139 | #define SYSIO_IMAP_SER 0x3050UL | ||
140 | #define SYSIO_IMAP_TIM0 0x3060UL | ||
141 | #define SYSIO_IMAP_TIM1 0x3068UL | ||
142 | #define SYSIO_IMAP_UE 0x3070UL | ||
143 | #define SYSIO_IMAP_CE 0x3078UL | ||
144 | #define SYSIO_IMAP_SBERR 0x3080UL | ||
145 | #define SYSIO_IMAP_PMGMT 0x3088UL | ||
146 | #define SYSIO_IMAP_GFX 0x3090UL | ||
147 | #define SYSIO_IMAP_EUPA 0x3098UL | ||
148 | |||
149 | #define bogon ((unsigned long) -1) | ||
150 | static unsigned long sysio_irq_offsets[] = { | ||
151 | /* SBUS Slot 0 --> 3, level 1 --> 7 */ | ||
152 | SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, | ||
153 | SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, | ||
154 | SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, | ||
155 | SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, | ||
156 | SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, | ||
157 | SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, | ||
158 | SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, | ||
159 | SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, | ||
160 | |||
161 | /* Onboard devices (not relevant/used on SunFire). */ | ||
162 | SYSIO_IMAP_SCSI, | ||
163 | SYSIO_IMAP_ETH, | ||
164 | SYSIO_IMAP_BPP, | ||
165 | bogon, | ||
166 | SYSIO_IMAP_AUDIO, | ||
167 | SYSIO_IMAP_PFAIL, | ||
168 | bogon, | ||
169 | bogon, | ||
170 | SYSIO_IMAP_KMS, | ||
171 | SYSIO_IMAP_FLPY, | ||
172 | SYSIO_IMAP_SHW, | ||
173 | SYSIO_IMAP_KBD, | ||
174 | SYSIO_IMAP_MS, | ||
175 | SYSIO_IMAP_SER, | ||
176 | bogon, | ||
177 | bogon, | ||
178 | SYSIO_IMAP_TIM0, | ||
179 | SYSIO_IMAP_TIM1, | ||
180 | bogon, | ||
181 | bogon, | ||
182 | SYSIO_IMAP_UE, | ||
183 | SYSIO_IMAP_CE, | ||
184 | SYSIO_IMAP_SBERR, | ||
185 | SYSIO_IMAP_PMGMT, | ||
186 | }; | ||
187 | |||
188 | #undef bogon | ||
189 | |||
190 | #define NUM_SYSIO_OFFSETS ARRAY_SIZE(sysio_irq_offsets) | ||
191 | |||
192 | /* Convert Interrupt Mapping register pointer to associated | ||
193 | * Interrupt Clear register pointer, SYSIO specific version. | ||
194 | */ | ||
195 | #define SYSIO_ICLR_UNUSED0 0x3400UL | ||
196 | #define SYSIO_ICLR_SLOT0 0x3408UL | ||
197 | #define SYSIO_ICLR_SLOT1 0x3448UL | ||
198 | #define SYSIO_ICLR_SLOT2 0x3488UL | ||
199 | #define SYSIO_ICLR_SLOT3 0x34c8UL | ||
200 | static unsigned long sysio_imap_to_iclr(unsigned long imap) | ||
201 | { | ||
202 | unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0; | ||
203 | return imap + diff; | ||
204 | } | ||
205 | |||
206 | static unsigned int sbus_build_irq(struct of_device *op, unsigned int ino) | ||
207 | { | ||
208 | struct iommu *iommu = op->dev.archdata.iommu; | ||
209 | unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; | ||
210 | unsigned long imap, iclr; | ||
211 | int sbus_level = 0; | ||
212 | |||
213 | imap = sysio_irq_offsets[ino]; | ||
214 | if (imap == ((unsigned long)-1)) { | ||
215 | prom_printf("get_irq_translations: Bad SYSIO INO[%x]\n", | ||
216 | ino); | ||
217 | prom_halt(); | ||
218 | } | ||
219 | imap += reg_base; | ||
220 | |||
221 | /* SYSIO inconsistency. For external SLOTS, we have to select | ||
222 | * the right ICLR register based upon the lower SBUS irq level | ||
223 | * bits. | ||
224 | */ | ||
225 | if (ino >= 0x20) { | ||
226 | iclr = sysio_imap_to_iclr(imap); | ||
227 | } else { | ||
228 | int sbus_slot = (ino & 0x18)>>3; | ||
229 | |||
230 | sbus_level = ino & 0x7; | ||
231 | |||
232 | switch(sbus_slot) { | ||
233 | case 0: | ||
234 | iclr = reg_base + SYSIO_ICLR_SLOT0; | ||
235 | break; | ||
236 | case 1: | ||
237 | iclr = reg_base + SYSIO_ICLR_SLOT1; | ||
238 | break; | ||
239 | case 2: | ||
240 | iclr = reg_base + SYSIO_ICLR_SLOT2; | ||
241 | break; | ||
242 | default: | ||
243 | case 3: | ||
244 | iclr = reg_base + SYSIO_ICLR_SLOT3; | ||
245 | break; | ||
246 | }; | ||
247 | |||
248 | iclr += ((unsigned long)sbus_level - 1UL) * 8UL; | ||
249 | } | ||
250 | return build_irq(sbus_level, iclr, imap); | ||
251 | } | ||
252 | |||
253 | /* Error interrupt handling. */ | ||
254 | #define SYSIO_UE_AFSR 0x0030UL | ||
255 | #define SYSIO_UE_AFAR 0x0038UL | ||
256 | #define SYSIO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */ | ||
257 | #define SYSIO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */ | ||
258 | #define SYSIO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */ | ||
259 | #define SYSIO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */ | ||
260 | #define SYSIO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */ | ||
261 | #define SYSIO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/ | ||
262 | #define SYSIO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */ | ||
263 | #define SYSIO_UEAFSR_DOFF 0x0000e00000000000UL /* Doubleword Offset */ | ||
264 | #define SYSIO_UEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */ | ||
265 | #define SYSIO_UEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */ | ||
266 | #define SYSIO_UEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */ | ||
267 | static irqreturn_t sysio_ue_handler(int irq, void *dev_id) | ||
268 | { | ||
269 | struct of_device *op = dev_id; | ||
270 | struct iommu *iommu = op->dev.archdata.iommu; | ||
271 | unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; | ||
272 | unsigned long afsr_reg, afar_reg; | ||
273 | unsigned long afsr, afar, error_bits; | ||
274 | int reported, portid; | ||
275 | |||
276 | afsr_reg = reg_base + SYSIO_UE_AFSR; | ||
277 | afar_reg = reg_base + SYSIO_UE_AFAR; | ||
278 | |||
279 | /* Latch error status. */ | ||
280 | afsr = upa_readq(afsr_reg); | ||
281 | afar = upa_readq(afar_reg); | ||
282 | |||
283 | /* Clear primary/secondary error status bits. */ | ||
284 | error_bits = afsr & | ||
285 | (SYSIO_UEAFSR_PPIO | SYSIO_UEAFSR_PDRD | SYSIO_UEAFSR_PDWR | | ||
286 | SYSIO_UEAFSR_SPIO | SYSIO_UEAFSR_SDRD | SYSIO_UEAFSR_SDWR); | ||
287 | upa_writeq(error_bits, afsr_reg); | ||
288 | |||
289 | portid = of_getintprop_default(op->node, "portid", -1); | ||
290 | |||
291 | /* Log the error. */ | ||
292 | printk("SYSIO[%x]: Uncorrectable ECC Error, primary error type[%s]\n", | ||
293 | portid, | ||
294 | (((error_bits & SYSIO_UEAFSR_PPIO) ? | ||
295 | "PIO" : | ||
296 | ((error_bits & SYSIO_UEAFSR_PDRD) ? | ||
297 | "DVMA Read" : | ||
298 | ((error_bits & SYSIO_UEAFSR_PDWR) ? | ||
299 | "DVMA Write" : "???"))))); | ||
300 | printk("SYSIO[%x]: DOFF[%lx] SIZE[%lx] MID[%lx]\n", | ||
301 | portid, | ||
302 | (afsr & SYSIO_UEAFSR_DOFF) >> 45UL, | ||
303 | (afsr & SYSIO_UEAFSR_SIZE) >> 42UL, | ||
304 | (afsr & SYSIO_UEAFSR_MID) >> 37UL); | ||
305 | printk("SYSIO[%x]: AFAR[%016lx]\n", portid, afar); | ||
306 | printk("SYSIO[%x]: Secondary UE errors [", portid); | ||
307 | reported = 0; | ||
308 | if (afsr & SYSIO_UEAFSR_SPIO) { | ||
309 | reported++; | ||
310 | printk("(PIO)"); | ||
311 | } | ||
312 | if (afsr & SYSIO_UEAFSR_SDRD) { | ||
313 | reported++; | ||
314 | printk("(DVMA Read)"); | ||
315 | } | ||
316 | if (afsr & SYSIO_UEAFSR_SDWR) { | ||
317 | reported++; | ||
318 | printk("(DVMA Write)"); | ||
319 | } | ||
320 | if (!reported) | ||
321 | printk("(none)"); | ||
322 | printk("]\n"); | ||
323 | |||
324 | return IRQ_HANDLED; | ||
325 | } | ||
326 | |||
327 | #define SYSIO_CE_AFSR 0x0040UL | ||
328 | #define SYSIO_CE_AFAR 0x0048UL | ||
329 | #define SYSIO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */ | ||
330 | #define SYSIO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */ | ||
331 | #define SYSIO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */ | ||
332 | #define SYSIO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO cause */ | ||
333 | #define SYSIO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */ | ||
334 | #define SYSIO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/ | ||
335 | #define SYSIO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */ | ||
336 | #define SYSIO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */ | ||
337 | #define SYSIO_CEAFSR_DOFF 0x0000e00000000000UL /* Double Offset */ | ||
338 | #define SYSIO_CEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */ | ||
339 | #define SYSIO_CEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */ | ||
340 | #define SYSIO_CEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */ | ||
341 | static irqreturn_t sysio_ce_handler(int irq, void *dev_id) | ||
342 | { | ||
343 | struct of_device *op = dev_id; | ||
344 | struct iommu *iommu = op->dev.archdata.iommu; | ||
345 | unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; | ||
346 | unsigned long afsr_reg, afar_reg; | ||
347 | unsigned long afsr, afar, error_bits; | ||
348 | int reported, portid; | ||
349 | |||
350 | afsr_reg = reg_base + SYSIO_CE_AFSR; | ||
351 | afar_reg = reg_base + SYSIO_CE_AFAR; | ||
352 | |||
353 | /* Latch error status. */ | ||
354 | afsr = upa_readq(afsr_reg); | ||
355 | afar = upa_readq(afar_reg); | ||
356 | |||
357 | /* Clear primary/secondary error status bits. */ | ||
358 | error_bits = afsr & | ||
359 | (SYSIO_CEAFSR_PPIO | SYSIO_CEAFSR_PDRD | SYSIO_CEAFSR_PDWR | | ||
360 | SYSIO_CEAFSR_SPIO | SYSIO_CEAFSR_SDRD | SYSIO_CEAFSR_SDWR); | ||
361 | upa_writeq(error_bits, afsr_reg); | ||
362 | |||
363 | portid = of_getintprop_default(op->node, "portid", -1); | ||
364 | |||
365 | printk("SYSIO[%x]: Correctable ECC Error, primary error type[%s]\n", | ||
366 | portid, | ||
367 | (((error_bits & SYSIO_CEAFSR_PPIO) ? | ||
368 | "PIO" : | ||
369 | ((error_bits & SYSIO_CEAFSR_PDRD) ? | ||
370 | "DVMA Read" : | ||
371 | ((error_bits & SYSIO_CEAFSR_PDWR) ? | ||
372 | "DVMA Write" : "???"))))); | ||
373 | |||
374 | /* XXX Use syndrome and afar to print out module string just like | ||
375 | * XXX UDB CE trap handler does... -DaveM | ||
376 | */ | ||
377 | printk("SYSIO[%x]: DOFF[%lx] ECC Syndrome[%lx] Size[%lx] MID[%lx]\n", | ||
378 | portid, | ||
379 | (afsr & SYSIO_CEAFSR_DOFF) >> 45UL, | ||
380 | (afsr & SYSIO_CEAFSR_ESYND) >> 48UL, | ||
381 | (afsr & SYSIO_CEAFSR_SIZE) >> 42UL, | ||
382 | (afsr & SYSIO_CEAFSR_MID) >> 37UL); | ||
383 | printk("SYSIO[%x]: AFAR[%016lx]\n", portid, afar); | ||
384 | |||
385 | printk("SYSIO[%x]: Secondary CE errors [", portid); | ||
386 | reported = 0; | ||
387 | if (afsr & SYSIO_CEAFSR_SPIO) { | ||
388 | reported++; | ||
389 | printk("(PIO)"); | ||
390 | } | ||
391 | if (afsr & SYSIO_CEAFSR_SDRD) { | ||
392 | reported++; | ||
393 | printk("(DVMA Read)"); | ||
394 | } | ||
395 | if (afsr & SYSIO_CEAFSR_SDWR) { | ||
396 | reported++; | ||
397 | printk("(DVMA Write)"); | ||
398 | } | ||
399 | if (!reported) | ||
400 | printk("(none)"); | ||
401 | printk("]\n"); | ||
402 | |||
403 | return IRQ_HANDLED; | ||
404 | } | ||
405 | |||
406 | #define SYSIO_SBUS_AFSR 0x2010UL | ||
407 | #define SYSIO_SBUS_AFAR 0x2018UL | ||
408 | #define SYSIO_SBAFSR_PLE 0x8000000000000000UL /* Primary Late PIO Error */ | ||
409 | #define SYSIO_SBAFSR_PTO 0x4000000000000000UL /* Primary SBUS Timeout */ | ||
410 | #define SYSIO_SBAFSR_PBERR 0x2000000000000000UL /* Primary SBUS Error ACK */ | ||
411 | #define SYSIO_SBAFSR_SLE 0x1000000000000000UL /* Secondary Late PIO Error */ | ||
412 | #define SYSIO_SBAFSR_STO 0x0800000000000000UL /* Secondary SBUS Timeout */ | ||
413 | #define SYSIO_SBAFSR_SBERR 0x0400000000000000UL /* Secondary SBUS Error ACK */ | ||
414 | #define SYSIO_SBAFSR_RESV1 0x03ff000000000000UL /* Reserved */ | ||
415 | #define SYSIO_SBAFSR_RD 0x0000800000000000UL /* Primary was late PIO read */ | ||
416 | #define SYSIO_SBAFSR_RESV2 0x0000600000000000UL /* Reserved */ | ||
417 | #define SYSIO_SBAFSR_SIZE 0x00001c0000000000UL /* Size of transfer */ | ||
418 | #define SYSIO_SBAFSR_MID 0x000003e000000000UL /* MID causing the error */ | ||
419 | #define SYSIO_SBAFSR_RESV3 0x0000001fffffffffUL /* Reserved */ | ||
420 | static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id) | ||
421 | { | ||
422 | struct of_device *op = dev_id; | ||
423 | struct iommu *iommu = op->dev.archdata.iommu; | ||
424 | unsigned long afsr_reg, afar_reg, reg_base; | ||
425 | unsigned long afsr, afar, error_bits; | ||
426 | int reported, portid; | ||
427 | |||
428 | reg_base = iommu->write_complete_reg - 0x2000UL; | ||
429 | afsr_reg = reg_base + SYSIO_SBUS_AFSR; | ||
430 | afar_reg = reg_base + SYSIO_SBUS_AFAR; | ||
431 | |||
432 | afsr = upa_readq(afsr_reg); | ||
433 | afar = upa_readq(afar_reg); | ||
434 | |||
435 | /* Clear primary/secondary error status bits. */ | ||
436 | error_bits = afsr & | ||
437 | (SYSIO_SBAFSR_PLE | SYSIO_SBAFSR_PTO | SYSIO_SBAFSR_PBERR | | ||
438 | SYSIO_SBAFSR_SLE | SYSIO_SBAFSR_STO | SYSIO_SBAFSR_SBERR); | ||
439 | upa_writeq(error_bits, afsr_reg); | ||
440 | |||
441 | portid = of_getintprop_default(op->node, "portid", -1); | ||
442 | |||
443 | /* Log the error. */ | ||
444 | printk("SYSIO[%x]: SBUS Error, primary error type[%s] read(%d)\n", | ||
445 | portid, | ||
446 | (((error_bits & SYSIO_SBAFSR_PLE) ? | ||
447 | "Late PIO Error" : | ||
448 | ((error_bits & SYSIO_SBAFSR_PTO) ? | ||
449 | "Time Out" : | ||
450 | ((error_bits & SYSIO_SBAFSR_PBERR) ? | ||
451 | "Error Ack" : "???")))), | ||
452 | (afsr & SYSIO_SBAFSR_RD) ? 1 : 0); | ||
453 | printk("SYSIO[%x]: size[%lx] MID[%lx]\n", | ||
454 | portid, | ||
455 | (afsr & SYSIO_SBAFSR_SIZE) >> 42UL, | ||
456 | (afsr & SYSIO_SBAFSR_MID) >> 37UL); | ||
457 | printk("SYSIO[%x]: AFAR[%016lx]\n", portid, afar); | ||
458 | printk("SYSIO[%x]: Secondary SBUS errors [", portid); | ||
459 | reported = 0; | ||
460 | if (afsr & SYSIO_SBAFSR_SLE) { | ||
461 | reported++; | ||
462 | printk("(Late PIO Error)"); | ||
463 | } | ||
464 | if (afsr & SYSIO_SBAFSR_STO) { | ||
465 | reported++; | ||
466 | printk("(Time Out)"); | ||
467 | } | ||
468 | if (afsr & SYSIO_SBAFSR_SBERR) { | ||
469 | reported++; | ||
470 | printk("(Error Ack)"); | ||
471 | } | ||
472 | if (!reported) | ||
473 | printk("(none)"); | ||
474 | printk("]\n"); | ||
475 | |||
476 | /* XXX check iommu/strbuf for further error status XXX */ | ||
477 | |||
478 | return IRQ_HANDLED; | ||
479 | } | ||
480 | |||
481 | #define ECC_CONTROL 0x0020UL | ||
482 | #define SYSIO_ECNTRL_ECCEN 0x8000000000000000UL /* Enable ECC Checking */ | ||
483 | #define SYSIO_ECNTRL_UEEN 0x4000000000000000UL /* Enable UE Interrupts */ | ||
484 | #define SYSIO_ECNTRL_CEEN 0x2000000000000000UL /* Enable CE Interrupts */ | ||
485 | |||
486 | #define SYSIO_UE_INO 0x34 | ||
487 | #define SYSIO_CE_INO 0x35 | ||
488 | #define SYSIO_SBUSERR_INO 0x36 | ||
489 | |||
490 | static void __init sysio_register_error_handlers(struct of_device *op) | ||
491 | { | ||
492 | struct iommu *iommu = op->dev.archdata.iommu; | ||
493 | unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; | ||
494 | unsigned int irq; | ||
495 | u64 control; | ||
496 | int portid; | ||
497 | |||
498 | portid = of_getintprop_default(op->node, "portid", -1); | ||
499 | |||
500 | irq = sbus_build_irq(op, SYSIO_UE_INO); | ||
501 | if (request_irq(irq, sysio_ue_handler, 0, | ||
502 | "SYSIO_UE", op) < 0) { | ||
503 | prom_printf("SYSIO[%x]: Cannot register UE interrupt.\n", | ||
504 | portid); | ||
505 | prom_halt(); | ||
506 | } | ||
507 | |||
508 | irq = sbus_build_irq(op, SYSIO_CE_INO); | ||
509 | if (request_irq(irq, sysio_ce_handler, 0, | ||
510 | "SYSIO_CE", op) < 0) { | ||
511 | prom_printf("SYSIO[%x]: Cannot register CE interrupt.\n", | ||
512 | portid); | ||
513 | prom_halt(); | ||
514 | } | ||
515 | |||
516 | irq = sbus_build_irq(op, SYSIO_SBUSERR_INO); | ||
517 | if (request_irq(irq, sysio_sbus_error_handler, 0, | ||
518 | "SYSIO_SBERR", op) < 0) { | ||
519 | prom_printf("SYSIO[%x]: Cannot register SBUS Error interrupt.\n", | ||
520 | portid); | ||
521 | prom_halt(); | ||
522 | } | ||
523 | |||
524 | /* Now turn the error interrupts on and also enable ECC checking. */ | ||
525 | upa_writeq((SYSIO_ECNTRL_ECCEN | | ||
526 | SYSIO_ECNTRL_UEEN | | ||
527 | SYSIO_ECNTRL_CEEN), | ||
528 | reg_base + ECC_CONTROL); | ||
529 | |||
530 | control = upa_readq(iommu->write_complete_reg); | ||
531 | control |= 0x100UL; /* SBUS Error Interrupt Enable */ | ||
532 | upa_writeq(control, iommu->write_complete_reg); | ||
533 | } | ||
534 | |||
535 | /* Boot time initialization. */ | ||
536 | static void __init sbus_iommu_init(struct of_device *op) | ||
537 | { | ||
538 | const struct linux_prom64_registers *pr; | ||
539 | struct device_node *dp = op->node; | ||
540 | struct iommu *iommu; | ||
541 | struct strbuf *strbuf; | ||
542 | unsigned long regs, reg_base; | ||
543 | int i, portid; | ||
544 | u64 control; | ||
545 | |||
546 | pr = of_get_property(dp, "reg", NULL); | ||
547 | if (!pr) { | ||
548 | prom_printf("sbus_iommu_init: Cannot map SYSIO " | ||
549 | "control registers.\n"); | ||
550 | prom_halt(); | ||
551 | } | ||
552 | regs = pr->phys_addr; | ||
553 | |||
554 | iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC); | ||
555 | if (!iommu) | ||
556 | goto fatal_memory_error; | ||
557 | strbuf = kzalloc(sizeof(*strbuf), GFP_ATOMIC); | ||
558 | if (!strbuf) | ||
559 | goto fatal_memory_error; | ||
560 | |||
561 | op->dev.archdata.iommu = iommu; | ||
562 | op->dev.archdata.stc = strbuf; | ||
563 | op->dev.archdata.numa_node = -1; | ||
564 | |||
565 | reg_base = regs + SYSIO_IOMMUREG_BASE; | ||
566 | iommu->iommu_control = reg_base + IOMMU_CONTROL; | ||
567 | iommu->iommu_tsbbase = reg_base + IOMMU_TSBBASE; | ||
568 | iommu->iommu_flush = reg_base + IOMMU_FLUSH; | ||
569 | iommu->iommu_tags = iommu->iommu_control + | ||
570 | (IOMMU_TAGDIAG - IOMMU_CONTROL); | ||
571 | |||
572 | reg_base = regs + SYSIO_STRBUFREG_BASE; | ||
573 | strbuf->strbuf_control = reg_base + STRBUF_CONTROL; | ||
574 | strbuf->strbuf_pflush = reg_base + STRBUF_PFLUSH; | ||
575 | strbuf->strbuf_fsync = reg_base + STRBUF_FSYNC; | ||
576 | |||
577 | strbuf->strbuf_enabled = 1; | ||
578 | |||
579 | strbuf->strbuf_flushflag = (volatile unsigned long *) | ||
580 | ((((unsigned long)&strbuf->__flushflag_buf[0]) | ||
581 | + 63UL) | ||
582 | & ~63UL); | ||
583 | strbuf->strbuf_flushflag_pa = (unsigned long) | ||
584 | __pa(strbuf->strbuf_flushflag); | ||
585 | |||
586 | /* The SYSIO SBUS control register is used for dummy reads | ||
587 | * in order to ensure write completion. | ||
588 | */ | ||
589 | iommu->write_complete_reg = regs + 0x2000UL; | ||
590 | |||
591 | portid = of_getintprop_default(op->node, "portid", -1); | ||
592 | printk(KERN_INFO "SYSIO: UPA portID %x, at %016lx\n", | ||
593 | portid, regs); | ||
594 | |||
595 | /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */ | ||
596 | if (iommu_table_init(iommu, IO_TSB_SIZE, MAP_BASE, 0xffffffff, -1)) | ||
597 | goto fatal_memory_error; | ||
598 | |||
599 | control = upa_readq(iommu->iommu_control); | ||
600 | control = ((7UL << 16UL) | | ||
601 | (0UL << 2UL) | | ||
602 | (1UL << 1UL) | | ||
603 | (1UL << 0UL)); | ||
604 | upa_writeq(control, iommu->iommu_control); | ||
605 | |||
606 | /* Clean out any cruft in the IOMMU using | ||
607 | * diagnostic accesses. | ||
608 | */ | ||
609 | for (i = 0; i < 16; i++) { | ||
610 | unsigned long dram, tag; | ||
611 | |||
612 | dram = iommu->iommu_control + (IOMMU_DRAMDIAG - IOMMU_CONTROL); | ||
613 | tag = iommu->iommu_control + (IOMMU_TAGDIAG - IOMMU_CONTROL); | ||
614 | |||
615 | dram += (unsigned long)i * 8UL; | ||
616 | tag += (unsigned long)i * 8UL; | ||
617 | upa_writeq(0, dram); | ||
618 | upa_writeq(0, tag); | ||
619 | } | ||
620 | upa_readq(iommu->write_complete_reg); | ||
621 | |||
622 | /* Give the TSB to SYSIO. */ | ||
623 | upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase); | ||
624 | |||
625 | /* Setup streaming buffer, DE=1 SB_EN=1 */ | ||
626 | control = (1UL << 1UL) | (1UL << 0UL); | ||
627 | upa_writeq(control, strbuf->strbuf_control); | ||
628 | |||
629 | /* Clear out the tags using diagnostics. */ | ||
630 | for (i = 0; i < 16; i++) { | ||
631 | unsigned long ptag, ltag; | ||
632 | |||
633 | ptag = strbuf->strbuf_control + | ||
634 | (STRBUF_PTAGDIAG - STRBUF_CONTROL); | ||
635 | ltag = strbuf->strbuf_control + | ||
636 | (STRBUF_LTAGDIAG - STRBUF_CONTROL); | ||
637 | ptag += (unsigned long)i * 8UL; | ||
638 | ltag += (unsigned long)i * 8UL; | ||
639 | |||
640 | upa_writeq(0UL, ptag); | ||
641 | upa_writeq(0UL, ltag); | ||
642 | } | ||
643 | |||
644 | /* Enable DVMA arbitration for all devices/slots. */ | ||
645 | control = upa_readq(iommu->write_complete_reg); | ||
646 | control |= 0x3fUL; | ||
647 | upa_writeq(control, iommu->write_complete_reg); | ||
648 | |||
649 | /* Now some Xfire specific grot... */ | ||
650 | if (this_is_starfire) | ||
651 | starfire_hookup(portid); | ||
652 | |||
653 | sysio_register_error_handlers(op); | ||
654 | return; | ||
655 | |||
656 | fatal_memory_error: | ||
657 | prom_printf("sbus_iommu_init: Fatal memory allocation error.\n"); | ||
658 | } | ||
659 | |||
660 | static int __init sbus_init(void) | ||
661 | { | ||
662 | struct device_node *dp; | ||
663 | |||
664 | for_each_node_by_name(dp, "sbus") { | ||
665 | struct of_device *op = of_find_device_by_node(dp); | ||
666 | |||
667 | sbus_iommu_init(op); | ||
668 | of_propagate_archdata(op); | ||
669 | } | ||
670 | |||
671 | return 0; | ||
672 | } | ||
673 | |||
674 | subsys_initcall(sbus_init); | ||
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c new file mode 100644 index 000000000000..c8b03a4f68bf --- /dev/null +++ b/arch/sparc/kernel/setup_64.c | |||
@@ -0,0 +1,428 @@ | |||
1 | /* | ||
2 | * linux/arch/sparc64/kernel/setup.c | ||
3 | * | ||
4 | * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu) | ||
5 | * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
6 | */ | ||
7 | |||
8 | #include <linux/errno.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/stddef.h> | ||
13 | #include <linux/unistd.h> | ||
14 | #include <linux/ptrace.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <asm/smp.h> | ||
17 | #include <linux/user.h> | ||
18 | #include <linux/screen_info.h> | ||
19 | #include <linux/delay.h> | ||
20 | #include <linux/fs.h> | ||
21 | #include <linux/seq_file.h> | ||
22 | #include <linux/syscalls.h> | ||
23 | #include <linux/kdev_t.h> | ||
24 | #include <linux/major.h> | ||
25 | #include <linux/string.h> | ||
26 | #include <linux/init.h> | ||
27 | #include <linux/inet.h> | ||
28 | #include <linux/console.h> | ||
29 | #include <linux/root_dev.h> | ||
30 | #include <linux/interrupt.h> | ||
31 | #include <linux/cpu.h> | ||
32 | #include <linux/initrd.h> | ||
33 | |||
34 | #include <asm/system.h> | ||
35 | #include <asm/io.h> | ||
36 | #include <asm/processor.h> | ||
37 | #include <asm/oplib.h> | ||
38 | #include <asm/page.h> | ||
39 | #include <asm/pgtable.h> | ||
40 | #include <asm/idprom.h> | ||
41 | #include <asm/head.h> | ||
42 | #include <asm/starfire.h> | ||
43 | #include <asm/mmu_context.h> | ||
44 | #include <asm/timer.h> | ||
45 | #include <asm/sections.h> | ||
46 | #include <asm/setup.h> | ||
47 | #include <asm/mmu.h> | ||
48 | #include <asm/ns87303.h> | ||
49 | |||
50 | #ifdef CONFIG_IP_PNP | ||
51 | #include <net/ipconfig.h> | ||
52 | #endif | ||
53 | |||
54 | #include "entry.h" | ||
55 | |||
56 | /* Used to synchronize accesses to NatSemi SUPER I/O chip configure | ||
57 | * operations in asm/ns87303.h | ||
58 | */ | ||
59 | DEFINE_SPINLOCK(ns87303_lock); | ||
60 | |||
61 | struct screen_info screen_info = { | ||
62 | 0, 0, /* orig-x, orig-y */ | ||
63 | 0, /* unused */ | ||
64 | 0, /* orig-video-page */ | ||
65 | 0, /* orig-video-mode */ | ||
66 | 128, /* orig-video-cols */ | ||
67 | 0, 0, 0, /* unused, ega_bx, unused */ | ||
68 | 54, /* orig-video-lines */ | ||
69 | 0, /* orig-video-isVGA */ | ||
70 | 16 /* orig-video-points */ | ||
71 | }; | ||
72 | |||
73 | static void | ||
74 | prom_console_write(struct console *con, const char *s, unsigned n) | ||
75 | { | ||
76 | prom_write(s, n); | ||
77 | } | ||
78 | |||
79 | /* Exported for mm/init.c:paging_init. */ | ||
80 | unsigned long cmdline_memory_size = 0; | ||
81 | |||
82 | static struct console prom_early_console = { | ||
83 | .name = "earlyprom", | ||
84 | .write = prom_console_write, | ||
85 | .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME, | ||
86 | .index = -1, | ||
87 | }; | ||
88 | |||
89 | /* | ||
90 | * Process kernel command line switches that are specific to the | ||
91 | * SPARC or that require special low-level processing. | ||
92 | */ | ||
93 | static void __init process_switch(char c) | ||
94 | { | ||
95 | switch (c) { | ||
96 | case 'd': | ||
97 | case 's': | ||
98 | break; | ||
99 | case 'h': | ||
100 | prom_printf("boot_flags_init: Halt!\n"); | ||
101 | prom_halt(); | ||
102 | break; | ||
103 | case 'p': | ||
104 | /* Just ignore, this behavior is now the default. */ | ||
105 | break; | ||
106 | case 'P': | ||
107 | /* Force UltraSPARC-III P-Cache on. */ | ||
108 | if (tlb_type != cheetah) { | ||
109 | printk("BOOT: Ignoring P-Cache force option.\n"); | ||
110 | break; | ||
111 | } | ||
112 | cheetah_pcache_forced_on = 1; | ||
113 | add_taint(TAINT_MACHINE_CHECK); | ||
114 | cheetah_enable_pcache(); | ||
115 | break; | ||
116 | |||
117 | default: | ||
118 | printk("Unknown boot switch (-%c)\n", c); | ||
119 | break; | ||
120 | } | ||
121 | } | ||
122 | |||
123 | static void __init boot_flags_init(char *commands) | ||
124 | { | ||
125 | while (*commands) { | ||
126 | /* Move to the start of the next "argument". */ | ||
127 | while (*commands && *commands == ' ') | ||
128 | commands++; | ||
129 | |||
130 | /* Process any command switches, otherwise skip it. */ | ||
131 | if (*commands == '\0') | ||
132 | break; | ||
133 | if (*commands == '-') { | ||
134 | commands++; | ||
135 | while (*commands && *commands != ' ') | ||
136 | process_switch(*commands++); | ||
137 | continue; | ||
138 | } | ||
139 | if (!strncmp(commands, "mem=", 4)) { | ||
140 | /* | ||
141 | * "mem=XXX[kKmM]" overrides the PROM-reported | ||
142 | * memory size. | ||
143 | */ | ||
144 | cmdline_memory_size = simple_strtoul(commands + 4, | ||
145 | &commands, 0); | ||
146 | if (*commands == 'K' || *commands == 'k') { | ||
147 | cmdline_memory_size <<= 10; | ||
148 | commands++; | ||
149 | } else if (*commands=='M' || *commands=='m') { | ||
150 | cmdline_memory_size <<= 20; | ||
151 | commands++; | ||
152 | } | ||
153 | } | ||
154 | while (*commands && *commands != ' ') | ||
155 | commands++; | ||
156 | } | ||
157 | } | ||
158 | |||
159 | extern unsigned short root_flags; | ||
160 | extern unsigned short root_dev; | ||
161 | extern unsigned short ram_flags; | ||
162 | #define RAMDISK_IMAGE_START_MASK 0x07FF | ||
163 | #define RAMDISK_PROMPT_FLAG 0x8000 | ||
164 | #define RAMDISK_LOAD_FLAG 0x4000 | ||
165 | |||
166 | extern int root_mountflags; | ||
167 | |||
168 | char reboot_command[COMMAND_LINE_SIZE]; | ||
169 | |||
170 | static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 }; | ||
171 | |||
172 | void __init per_cpu_patch(void) | ||
173 | { | ||
174 | struct cpuid_patch_entry *p; | ||
175 | unsigned long ver; | ||
176 | int is_jbus; | ||
177 | |||
178 | if (tlb_type == spitfire && !this_is_starfire) | ||
179 | return; | ||
180 | |||
181 | is_jbus = 0; | ||
182 | if (tlb_type != hypervisor) { | ||
183 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | ||
184 | is_jbus = ((ver >> 32UL) == __JALAPENO_ID || | ||
185 | (ver >> 32UL) == __SERRANO_ID); | ||
186 | } | ||
187 | |||
188 | p = &__cpuid_patch; | ||
189 | while (p < &__cpuid_patch_end) { | ||
190 | unsigned long addr = p->addr; | ||
191 | unsigned int *insns; | ||
192 | |||
193 | switch (tlb_type) { | ||
194 | case spitfire: | ||
195 | insns = &p->starfire[0]; | ||
196 | break; | ||
197 | case cheetah: | ||
198 | case cheetah_plus: | ||
199 | if (is_jbus) | ||
200 | insns = &p->cheetah_jbus[0]; | ||
201 | else | ||
202 | insns = &p->cheetah_safari[0]; | ||
203 | break; | ||
204 | case hypervisor: | ||
205 | insns = &p->sun4v[0]; | ||
206 | break; | ||
207 | default: | ||
208 | prom_printf("Unknown cpu type, halting.\n"); | ||
209 | prom_halt(); | ||
210 | }; | ||
211 | |||
212 | *(unsigned int *) (addr + 0) = insns[0]; | ||
213 | wmb(); | ||
214 | __asm__ __volatile__("flush %0" : : "r" (addr + 0)); | ||
215 | |||
216 | *(unsigned int *) (addr + 4) = insns[1]; | ||
217 | wmb(); | ||
218 | __asm__ __volatile__("flush %0" : : "r" (addr + 4)); | ||
219 | |||
220 | *(unsigned int *) (addr + 8) = insns[2]; | ||
221 | wmb(); | ||
222 | __asm__ __volatile__("flush %0" : : "r" (addr + 8)); | ||
223 | |||
224 | *(unsigned int *) (addr + 12) = insns[3]; | ||
225 | wmb(); | ||
226 | __asm__ __volatile__("flush %0" : : "r" (addr + 12)); | ||
227 | |||
228 | p++; | ||
229 | } | ||
230 | } | ||
231 | |||
232 | void __init sun4v_patch(void) | ||
233 | { | ||
234 | extern void sun4v_hvapi_init(void); | ||
235 | struct sun4v_1insn_patch_entry *p1; | ||
236 | struct sun4v_2insn_patch_entry *p2; | ||
237 | |||
238 | if (tlb_type != hypervisor) | ||
239 | return; | ||
240 | |||
241 | p1 = &__sun4v_1insn_patch; | ||
242 | while (p1 < &__sun4v_1insn_patch_end) { | ||
243 | unsigned long addr = p1->addr; | ||
244 | |||
245 | *(unsigned int *) (addr + 0) = p1->insn; | ||
246 | wmb(); | ||
247 | __asm__ __volatile__("flush %0" : : "r" (addr + 0)); | ||
248 | |||
249 | p1++; | ||
250 | } | ||
251 | |||
252 | p2 = &__sun4v_2insn_patch; | ||
253 | while (p2 < &__sun4v_2insn_patch_end) { | ||
254 | unsigned long addr = p2->addr; | ||
255 | |||
256 | *(unsigned int *) (addr + 0) = p2->insns[0]; | ||
257 | wmb(); | ||
258 | __asm__ __volatile__("flush %0" : : "r" (addr + 0)); | ||
259 | |||
260 | *(unsigned int *) (addr + 4) = p2->insns[1]; | ||
261 | wmb(); | ||
262 | __asm__ __volatile__("flush %0" : : "r" (addr + 4)); | ||
263 | |||
264 | p2++; | ||
265 | } | ||
266 | |||
267 | sun4v_hvapi_init(); | ||
268 | } | ||
269 | |||
270 | #ifdef CONFIG_SMP | ||
271 | void __init boot_cpu_id_too_large(int cpu) | ||
272 | { | ||
273 | prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n", | ||
274 | cpu, NR_CPUS); | ||
275 | prom_halt(); | ||
276 | } | ||
277 | #endif | ||
278 | |||
279 | void __init setup_arch(char **cmdline_p) | ||
280 | { | ||
281 | /* Initialize PROM console and command line. */ | ||
282 | *cmdline_p = prom_getbootargs(); | ||
283 | strcpy(boot_command_line, *cmdline_p); | ||
284 | parse_early_param(); | ||
285 | |||
286 | boot_flags_init(*cmdline_p); | ||
287 | register_console(&prom_early_console); | ||
288 | |||
289 | if (tlb_type == hypervisor) | ||
290 | printk("ARCH: SUN4V\n"); | ||
291 | else | ||
292 | printk("ARCH: SUN4U\n"); | ||
293 | |||
294 | #ifdef CONFIG_DUMMY_CONSOLE | ||
295 | conswitchp = &dummy_con; | ||
296 | #elif defined(CONFIG_PROM_CONSOLE) | ||
297 | conswitchp = &prom_con; | ||
298 | #endif | ||
299 | |||
300 | idprom_init(); | ||
301 | |||
302 | if (!root_flags) | ||
303 | root_mountflags &= ~MS_RDONLY; | ||
304 | ROOT_DEV = old_decode_dev(root_dev); | ||
305 | #ifdef CONFIG_BLK_DEV_RAM | ||
306 | rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK; | ||
307 | rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0); | ||
308 | rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0); | ||
309 | #endif | ||
310 | |||
311 | task_thread_info(&init_task)->kregs = &fake_swapper_regs; | ||
312 | |||
313 | #ifdef CONFIG_IP_PNP | ||
314 | if (!ic_set_manually) { | ||
315 | int chosen = prom_finddevice ("/chosen"); | ||
316 | u32 cl, sv, gw; | ||
317 | |||
318 | cl = prom_getintdefault (chosen, "client-ip", 0); | ||
319 | sv = prom_getintdefault (chosen, "server-ip", 0); | ||
320 | gw = prom_getintdefault (chosen, "gateway-ip", 0); | ||
321 | if (cl && sv) { | ||
322 | ic_myaddr = cl; | ||
323 | ic_servaddr = sv; | ||
324 | if (gw) | ||
325 | ic_gateway = gw; | ||
326 | #if defined(CONFIG_IP_PNP_BOOTP) || defined(CONFIG_IP_PNP_RARP) | ||
327 | ic_proto_enabled = 0; | ||
328 | #endif | ||
329 | } | ||
330 | } | ||
331 | #endif | ||
332 | |||
333 | /* Get boot processor trap_block[] setup. */ | ||
334 | init_cur_cpu_trap(current_thread_info()); | ||
335 | |||
336 | paging_init(); | ||
337 | } | ||
338 | |||
339 | /* BUFFER is PAGE_SIZE bytes long. */ | ||
340 | |||
341 | extern void smp_info(struct seq_file *); | ||
342 | extern void smp_bogo(struct seq_file *); | ||
343 | extern void mmu_info(struct seq_file *); | ||
344 | |||
345 | unsigned int dcache_parity_tl1_occurred; | ||
346 | unsigned int icache_parity_tl1_occurred; | ||
347 | |||
348 | int ncpus_probed; | ||
349 | |||
350 | static int show_cpuinfo(struct seq_file *m, void *__unused) | ||
351 | { | ||
352 | seq_printf(m, | ||
353 | "cpu\t\t: %s\n" | ||
354 | "fpu\t\t: %s\n" | ||
355 | "prom\t\t: %s\n" | ||
356 | "type\t\t: %s\n" | ||
357 | "ncpus probed\t: %d\n" | ||
358 | "ncpus active\t: %d\n" | ||
359 | "D$ parity tl1\t: %u\n" | ||
360 | "I$ parity tl1\t: %u\n" | ||
361 | #ifndef CONFIG_SMP | ||
362 | "Cpu0ClkTck\t: %016lx\n" | ||
363 | #endif | ||
364 | , | ||
365 | sparc_cpu_type, | ||
366 | sparc_fpu_type, | ||
367 | prom_version, | ||
368 | ((tlb_type == hypervisor) ? | ||
369 | "sun4v" : | ||
370 | "sun4u"), | ||
371 | ncpus_probed, | ||
372 | num_online_cpus(), | ||
373 | dcache_parity_tl1_occurred, | ||
374 | icache_parity_tl1_occurred | ||
375 | #ifndef CONFIG_SMP | ||
376 | , cpu_data(0).clock_tick | ||
377 | #endif | ||
378 | ); | ||
379 | #ifdef CONFIG_SMP | ||
380 | smp_bogo(m); | ||
381 | #endif | ||
382 | mmu_info(m); | ||
383 | #ifdef CONFIG_SMP | ||
384 | smp_info(m); | ||
385 | #endif | ||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | static void *c_start(struct seq_file *m, loff_t *pos) | ||
390 | { | ||
391 | /* The pointer we are returning is arbitrary, | ||
392 | * it just has to be non-NULL and not IS_ERR | ||
393 | * in the success case. | ||
394 | */ | ||
395 | return *pos == 0 ? &c_start : NULL; | ||
396 | } | ||
397 | |||
398 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) | ||
399 | { | ||
400 | ++*pos; | ||
401 | return c_start(m, pos); | ||
402 | } | ||
403 | |||
404 | static void c_stop(struct seq_file *m, void *v) | ||
405 | { | ||
406 | } | ||
407 | |||
408 | const struct seq_operations cpuinfo_op = { | ||
409 | .start =c_start, | ||
410 | .next = c_next, | ||
411 | .stop = c_stop, | ||
412 | .show = show_cpuinfo, | ||
413 | }; | ||
414 | |||
415 | extern int stop_a_enabled; | ||
416 | |||
417 | void sun_do_break(void) | ||
418 | { | ||
419 | if (!stop_a_enabled) | ||
420 | return; | ||
421 | |||
422 | prom_printf("\n"); | ||
423 | flush_user_windows(); | ||
424 | |||
425 | prom_cmdline(); | ||
426 | } | ||
427 | |||
428 | int stop_a_enabled = 1; | ||
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c new file mode 100644 index 000000000000..ba5b09ad6666 --- /dev/null +++ b/arch/sparc/kernel/signal32.c | |||
@@ -0,0 +1,899 @@ | |||
1 | /* arch/sparc64/kernel/signal32.c | ||
2 | * | ||
3 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | ||
5 | * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) | ||
6 | * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) | ||
7 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
8 | */ | ||
9 | |||
10 | #include <linux/sched.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/signal.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/wait.h> | ||
15 | #include <linux/ptrace.h> | ||
16 | #include <linux/unistd.h> | ||
17 | #include <linux/mm.h> | ||
18 | #include <linux/tty.h> | ||
19 | #include <linux/binfmts.h> | ||
20 | #include <linux/compat.h> | ||
21 | #include <linux/bitops.h> | ||
22 | #include <linux/tracehook.h> | ||
23 | |||
24 | #include <asm/uaccess.h> | ||
25 | #include <asm/ptrace.h> | ||
26 | #include <asm/pgtable.h> | ||
27 | #include <asm/psrcompat.h> | ||
28 | #include <asm/fpumacro.h> | ||
29 | #include <asm/visasm.h> | ||
30 | #include <asm/compat_signal.h> | ||
31 | |||
32 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
33 | |||
34 | /* This magic should be in g_upper[0] for all upper parts | ||
35 | * to be valid. | ||
36 | */ | ||
37 | #define SIGINFO_EXTRA_V8PLUS_MAGIC 0x130e269 | ||
38 | typedef struct { | ||
39 | unsigned int g_upper[8]; | ||
40 | unsigned int o_upper[8]; | ||
41 | unsigned int asi; | ||
42 | } siginfo_extra_v8plus_t; | ||
43 | |||
44 | struct signal_frame32 { | ||
45 | struct sparc_stackf32 ss; | ||
46 | __siginfo32_t info; | ||
47 | /* __siginfo_fpu32_t * */ u32 fpu_save; | ||
48 | unsigned int insns[2]; | ||
49 | unsigned int extramask[_COMPAT_NSIG_WORDS - 1]; | ||
50 | unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */ | ||
51 | /* Only valid if (info.si_regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */ | ||
52 | siginfo_extra_v8plus_t v8plus; | ||
53 | __siginfo_fpu_t fpu_state; | ||
54 | }; | ||
55 | |||
56 | typedef struct compat_siginfo{ | ||
57 | int si_signo; | ||
58 | int si_errno; | ||
59 | int si_code; | ||
60 | |||
61 | union { | ||
62 | int _pad[SI_PAD_SIZE32]; | ||
63 | |||
64 | /* kill() */ | ||
65 | struct { | ||
66 | compat_pid_t _pid; /* sender's pid */ | ||
67 | unsigned int _uid; /* sender's uid */ | ||
68 | } _kill; | ||
69 | |||
70 | /* POSIX.1b timers */ | ||
71 | struct { | ||
72 | compat_timer_t _tid; /* timer id */ | ||
73 | int _overrun; /* overrun count */ | ||
74 | compat_sigval_t _sigval; /* same as below */ | ||
75 | int _sys_private; /* not to be passed to user */ | ||
76 | } _timer; | ||
77 | |||
78 | /* POSIX.1b signals */ | ||
79 | struct { | ||
80 | compat_pid_t _pid; /* sender's pid */ | ||
81 | unsigned int _uid; /* sender's uid */ | ||
82 | compat_sigval_t _sigval; | ||
83 | } _rt; | ||
84 | |||
85 | /* SIGCHLD */ | ||
86 | struct { | ||
87 | compat_pid_t _pid; /* which child */ | ||
88 | unsigned int _uid; /* sender's uid */ | ||
89 | int _status; /* exit code */ | ||
90 | compat_clock_t _utime; | ||
91 | compat_clock_t _stime; | ||
92 | } _sigchld; | ||
93 | |||
94 | /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */ | ||
95 | struct { | ||
96 | u32 _addr; /* faulting insn/memory ref. */ | ||
97 | int _trapno; | ||
98 | } _sigfault; | ||
99 | |||
100 | /* SIGPOLL */ | ||
101 | struct { | ||
102 | int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ | ||
103 | int _fd; | ||
104 | } _sigpoll; | ||
105 | } _sifields; | ||
106 | }compat_siginfo_t; | ||
107 | |||
108 | struct rt_signal_frame32 { | ||
109 | struct sparc_stackf32 ss; | ||
110 | compat_siginfo_t info; | ||
111 | struct pt_regs32 regs; | ||
112 | compat_sigset_t mask; | ||
113 | /* __siginfo_fpu32_t * */ u32 fpu_save; | ||
114 | unsigned int insns[2]; | ||
115 | stack_t32 stack; | ||
116 | unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */ | ||
117 | /* Only valid if (regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */ | ||
118 | siginfo_extra_v8plus_t v8plus; | ||
119 | __siginfo_fpu_t fpu_state; | ||
120 | }; | ||
121 | |||
122 | /* Align macros */ | ||
123 | #define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 7) & (~7))) | ||
124 | #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 7) & (~7))) | ||
125 | |||
126 | int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) | ||
127 | { | ||
128 | int err; | ||
129 | |||
130 | if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t))) | ||
131 | return -EFAULT; | ||
132 | |||
133 | /* If you change siginfo_t structure, please be sure | ||
134 | this code is fixed accordingly. | ||
135 | It should never copy any pad contained in the structure | ||
136 | to avoid security leaks, but must copy the generic | ||
137 | 3 ints plus the relevant union member. | ||
138 | This routine must convert siginfo from 64bit to 32bit as well | ||
139 | at the same time. */ | ||
140 | err = __put_user(from->si_signo, &to->si_signo); | ||
141 | err |= __put_user(from->si_errno, &to->si_errno); | ||
142 | err |= __put_user((short)from->si_code, &to->si_code); | ||
143 | if (from->si_code < 0) | ||
144 | err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); | ||
145 | else { | ||
146 | switch (from->si_code >> 16) { | ||
147 | case __SI_TIMER >> 16: | ||
148 | err |= __put_user(from->si_tid, &to->si_tid); | ||
149 | err |= __put_user(from->si_overrun, &to->si_overrun); | ||
150 | err |= __put_user(from->si_int, &to->si_int); | ||
151 | break; | ||
152 | case __SI_CHLD >> 16: | ||
153 | err |= __put_user(from->si_utime, &to->si_utime); | ||
154 | err |= __put_user(from->si_stime, &to->si_stime); | ||
155 | err |= __put_user(from->si_status, &to->si_status); | ||
156 | default: | ||
157 | err |= __put_user(from->si_pid, &to->si_pid); | ||
158 | err |= __put_user(from->si_uid, &to->si_uid); | ||
159 | break; | ||
160 | case __SI_FAULT >> 16: | ||
161 | err |= __put_user(from->si_trapno, &to->si_trapno); | ||
162 | err |= __put_user((unsigned long)from->si_addr, &to->si_addr); | ||
163 | break; | ||
164 | case __SI_POLL >> 16: | ||
165 | err |= __put_user(from->si_band, &to->si_band); | ||
166 | err |= __put_user(from->si_fd, &to->si_fd); | ||
167 | break; | ||
168 | case __SI_RT >> 16: /* This is not generated by the kernel as of now. */ | ||
169 | case __SI_MESGQ >> 16: | ||
170 | err |= __put_user(from->si_pid, &to->si_pid); | ||
171 | err |= __put_user(from->si_uid, &to->si_uid); | ||
172 | err |= __put_user(from->si_int, &to->si_int); | ||
173 | break; | ||
174 | } | ||
175 | } | ||
176 | return err; | ||
177 | } | ||
178 | |||
179 | /* CAUTION: This is just a very minimalist implementation for the | ||
180 | * sake of compat_sys_rt_sigqueueinfo() | ||
181 | */ | ||
182 | int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) | ||
183 | { | ||
184 | if (!access_ok(VERIFY_WRITE, from, sizeof(compat_siginfo_t))) | ||
185 | return -EFAULT; | ||
186 | |||
187 | if (copy_from_user(to, from, 3*sizeof(int)) || | ||
188 | copy_from_user(to->_sifields._pad, from->_sifields._pad, | ||
189 | SI_PAD_SIZE)) | ||
190 | return -EFAULT; | ||
191 | |||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | static int restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) | ||
196 | { | ||
197 | unsigned long *fpregs = current_thread_info()->fpregs; | ||
198 | unsigned long fprs; | ||
199 | int err; | ||
200 | |||
201 | err = __get_user(fprs, &fpu->si_fprs); | ||
202 | fprs_write(0); | ||
203 | regs->tstate &= ~TSTATE_PEF; | ||
204 | if (fprs & FPRS_DL) | ||
205 | err |= copy_from_user(fpregs, &fpu->si_float_regs[0], (sizeof(unsigned int) * 32)); | ||
206 | if (fprs & FPRS_DU) | ||
207 | err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32], (sizeof(unsigned int) * 32)); | ||
208 | err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr); | ||
209 | err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr); | ||
210 | current_thread_info()->fpsaved[0] |= fprs; | ||
211 | return err; | ||
212 | } | ||
213 | |||
214 | void do_sigreturn32(struct pt_regs *regs) | ||
215 | { | ||
216 | struct signal_frame32 __user *sf; | ||
217 | unsigned int psr; | ||
218 | unsigned pc, npc, fpu_save; | ||
219 | sigset_t set; | ||
220 | unsigned seta[_COMPAT_NSIG_WORDS]; | ||
221 | int err, i; | ||
222 | |||
223 | /* Always make any pending restarted system calls return -EINTR */ | ||
224 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
225 | |||
226 | synchronize_user_stack(); | ||
227 | |||
228 | regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL; | ||
229 | sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP]; | ||
230 | |||
231 | /* 1. Make sure we are not getting garbage from the user */ | ||
232 | if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || | ||
233 | (((unsigned long) sf) & 3)) | ||
234 | goto segv; | ||
235 | |||
236 | get_user(pc, &sf->info.si_regs.pc); | ||
237 | __get_user(npc, &sf->info.si_regs.npc); | ||
238 | |||
239 | if ((pc | npc) & 3) | ||
240 | goto segv; | ||
241 | |||
242 | if (test_thread_flag(TIF_32BIT)) { | ||
243 | pc &= 0xffffffff; | ||
244 | npc &= 0xffffffff; | ||
245 | } | ||
246 | regs->tpc = pc; | ||
247 | regs->tnpc = npc; | ||
248 | |||
249 | /* 2. Restore the state */ | ||
250 | err = __get_user(regs->y, &sf->info.si_regs.y); | ||
251 | err |= __get_user(psr, &sf->info.si_regs.psr); | ||
252 | |||
253 | for (i = UREG_G1; i <= UREG_I7; i++) | ||
254 | err |= __get_user(regs->u_regs[i], &sf->info.si_regs.u_regs[i]); | ||
255 | if ((psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS) { | ||
256 | err |= __get_user(i, &sf->v8plus.g_upper[0]); | ||
257 | if (i == SIGINFO_EXTRA_V8PLUS_MAGIC) { | ||
258 | unsigned long asi; | ||
259 | |||
260 | for (i = UREG_G1; i <= UREG_I7; i++) | ||
261 | err |= __get_user(((u32 *)regs->u_regs)[2*i], &sf->v8plus.g_upper[i]); | ||
262 | err |= __get_user(asi, &sf->v8plus.asi); | ||
263 | regs->tstate &= ~TSTATE_ASI; | ||
264 | regs->tstate |= ((asi & 0xffUL) << 24UL); | ||
265 | } | ||
266 | } | ||
267 | |||
268 | /* User can only change condition codes in %tstate. */ | ||
269 | regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC); | ||
270 | regs->tstate |= psr_to_tstate_icc(psr); | ||
271 | |||
272 | /* Prevent syscall restart. */ | ||
273 | pt_regs_clear_syscall(regs); | ||
274 | |||
275 | err |= __get_user(fpu_save, &sf->fpu_save); | ||
276 | if (fpu_save) | ||
277 | err |= restore_fpu_state32(regs, &sf->fpu_state); | ||
278 | err |= __get_user(seta[0], &sf->info.si_mask); | ||
279 | err |= copy_from_user(seta+1, &sf->extramask, | ||
280 | (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int)); | ||
281 | if (err) | ||
282 | goto segv; | ||
283 | switch (_NSIG_WORDS) { | ||
284 | case 4: set.sig[3] = seta[6] + (((long)seta[7]) << 32); | ||
285 | case 3: set.sig[2] = seta[4] + (((long)seta[5]) << 32); | ||
286 | case 2: set.sig[1] = seta[2] + (((long)seta[3]) << 32); | ||
287 | case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32); | ||
288 | } | ||
289 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
290 | spin_lock_irq(¤t->sighand->siglock); | ||
291 | current->blocked = set; | ||
292 | recalc_sigpending(); | ||
293 | spin_unlock_irq(¤t->sighand->siglock); | ||
294 | return; | ||
295 | |||
296 | segv: | ||
297 | force_sig(SIGSEGV, current); | ||
298 | } | ||
299 | |||
300 | asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) | ||
301 | { | ||
302 | struct rt_signal_frame32 __user *sf; | ||
303 | unsigned int psr, pc, npc, fpu_save, u_ss_sp; | ||
304 | mm_segment_t old_fs; | ||
305 | sigset_t set; | ||
306 | compat_sigset_t seta; | ||
307 | stack_t st; | ||
308 | int err, i; | ||
309 | |||
310 | /* Always make any pending restarted system calls return -EINTR */ | ||
311 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
312 | |||
313 | synchronize_user_stack(); | ||
314 | regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL; | ||
315 | sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP]; | ||
316 | |||
317 | /* 1. Make sure we are not getting garbage from the user */ | ||
318 | if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || | ||
319 | (((unsigned long) sf) & 3)) | ||
320 | goto segv; | ||
321 | |||
322 | get_user(pc, &sf->regs.pc); | ||
323 | __get_user(npc, &sf->regs.npc); | ||
324 | |||
325 | if ((pc | npc) & 3) | ||
326 | goto segv; | ||
327 | |||
328 | if (test_thread_flag(TIF_32BIT)) { | ||
329 | pc &= 0xffffffff; | ||
330 | npc &= 0xffffffff; | ||
331 | } | ||
332 | regs->tpc = pc; | ||
333 | regs->tnpc = npc; | ||
334 | |||
335 | /* 2. Restore the state */ | ||
336 | err = __get_user(regs->y, &sf->regs.y); | ||
337 | err |= __get_user(psr, &sf->regs.psr); | ||
338 | |||
339 | for (i = UREG_G1; i <= UREG_I7; i++) | ||
340 | err |= __get_user(regs->u_regs[i], &sf->regs.u_regs[i]); | ||
341 | if ((psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS) { | ||
342 | err |= __get_user(i, &sf->v8plus.g_upper[0]); | ||
343 | if (i == SIGINFO_EXTRA_V8PLUS_MAGIC) { | ||
344 | unsigned long asi; | ||
345 | |||
346 | for (i = UREG_G1; i <= UREG_I7; i++) | ||
347 | err |= __get_user(((u32 *)regs->u_regs)[2*i], &sf->v8plus.g_upper[i]); | ||
348 | err |= __get_user(asi, &sf->v8plus.asi); | ||
349 | regs->tstate &= ~TSTATE_ASI; | ||
350 | regs->tstate |= ((asi & 0xffUL) << 24UL); | ||
351 | } | ||
352 | } | ||
353 | |||
354 | /* User can only change condition codes in %tstate. */ | ||
355 | regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC); | ||
356 | regs->tstate |= psr_to_tstate_icc(psr); | ||
357 | |||
358 | /* Prevent syscall restart. */ | ||
359 | pt_regs_clear_syscall(regs); | ||
360 | |||
361 | err |= __get_user(fpu_save, &sf->fpu_save); | ||
362 | if (fpu_save) | ||
363 | err |= restore_fpu_state32(regs, &sf->fpu_state); | ||
364 | err |= copy_from_user(&seta, &sf->mask, sizeof(compat_sigset_t)); | ||
365 | err |= __get_user(u_ss_sp, &sf->stack.ss_sp); | ||
366 | st.ss_sp = compat_ptr(u_ss_sp); | ||
367 | err |= __get_user(st.ss_flags, &sf->stack.ss_flags); | ||
368 | err |= __get_user(st.ss_size, &sf->stack.ss_size); | ||
369 | if (err) | ||
370 | goto segv; | ||
371 | |||
372 | /* It is more difficult to avoid calling this function than to | ||
373 | call it and ignore errors. */ | ||
374 | old_fs = get_fs(); | ||
375 | set_fs(KERNEL_DS); | ||
376 | do_sigaltstack((stack_t __user *) &st, NULL, (unsigned long)sf); | ||
377 | set_fs(old_fs); | ||
378 | |||
379 | switch (_NSIG_WORDS) { | ||
380 | case 4: set.sig[3] = seta.sig[6] + (((long)seta.sig[7]) << 32); | ||
381 | case 3: set.sig[2] = seta.sig[4] + (((long)seta.sig[5]) << 32); | ||
382 | case 2: set.sig[1] = seta.sig[2] + (((long)seta.sig[3]) << 32); | ||
383 | case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32); | ||
384 | } | ||
385 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
386 | spin_lock_irq(¤t->sighand->siglock); | ||
387 | current->blocked = set; | ||
388 | recalc_sigpending(); | ||
389 | spin_unlock_irq(¤t->sighand->siglock); | ||
390 | return; | ||
391 | segv: | ||
392 | force_sig(SIGSEGV, current); | ||
393 | } | ||
394 | |||
395 | /* Checks if the fp is valid */ | ||
396 | static int invalid_frame_pointer(void __user *fp, int fplen) | ||
397 | { | ||
398 | if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen) | ||
399 | return 1; | ||
400 | return 0; | ||
401 | } | ||
402 | |||
403 | static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, unsigned long framesize) | ||
404 | { | ||
405 | unsigned long sp; | ||
406 | |||
407 | regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL; | ||
408 | sp = regs->u_regs[UREG_FP]; | ||
409 | |||
410 | /* | ||
411 | * If we are on the alternate signal stack and would overflow it, don't. | ||
412 | * Return an always-bogus address instead so we will die with SIGSEGV. | ||
413 | */ | ||
414 | if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize))) | ||
415 | return (void __user *) -1L; | ||
416 | |||
417 | /* This is the X/Open sanctioned signal stack switching. */ | ||
418 | if (sa->sa_flags & SA_ONSTACK) { | ||
419 | if (sas_ss_flags(sp) == 0) | ||
420 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
421 | } | ||
422 | |||
423 | /* Always align the stack frame. This handles two cases. First, | ||
424 | * sigaltstack need not be mindful of platform specific stack | ||
425 | * alignment. Second, if we took this signal because the stack | ||
426 | * is not aligned properly, we'd like to take the signal cleanly | ||
427 | * and report that. | ||
428 | */ | ||
429 | sp &= ~7UL; | ||
430 | |||
431 | return (void __user *)(sp - framesize); | ||
432 | } | ||
433 | |||
434 | static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) | ||
435 | { | ||
436 | unsigned long *fpregs = current_thread_info()->fpregs; | ||
437 | unsigned long fprs; | ||
438 | int err = 0; | ||
439 | |||
440 | fprs = current_thread_info()->fpsaved[0]; | ||
441 | if (fprs & FPRS_DL) | ||
442 | err |= copy_to_user(&fpu->si_float_regs[0], fpregs, | ||
443 | (sizeof(unsigned int) * 32)); | ||
444 | if (fprs & FPRS_DU) | ||
445 | err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16, | ||
446 | (sizeof(unsigned int) * 32)); | ||
447 | err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr); | ||
448 | err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr); | ||
449 | err |= __put_user(fprs, &fpu->si_fprs); | ||
450 | |||
451 | return err; | ||
452 | } | ||
453 | |||
454 | static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, | ||
455 | int signo, sigset_t *oldset) | ||
456 | { | ||
457 | struct signal_frame32 __user *sf; | ||
458 | int sigframe_size; | ||
459 | u32 psr; | ||
460 | int i, err; | ||
461 | unsigned int seta[_COMPAT_NSIG_WORDS]; | ||
462 | |||
463 | /* 1. Make sure everything is clean */ | ||
464 | synchronize_user_stack(); | ||
465 | save_and_clear_fpu(); | ||
466 | |||
467 | sigframe_size = SF_ALIGNEDSZ; | ||
468 | if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) | ||
469 | sigframe_size -= sizeof(__siginfo_fpu_t); | ||
470 | |||
471 | sf = (struct signal_frame32 __user *) | ||
472 | get_sigframe(&ka->sa, regs, sigframe_size); | ||
473 | |||
474 | if (invalid_frame_pointer(sf, sigframe_size)) | ||
475 | goto sigill; | ||
476 | |||
477 | if (get_thread_wsaved() != 0) | ||
478 | goto sigill; | ||
479 | |||
480 | /* 2. Save the current process state */ | ||
481 | if (test_thread_flag(TIF_32BIT)) { | ||
482 | regs->tpc &= 0xffffffff; | ||
483 | regs->tnpc &= 0xffffffff; | ||
484 | } | ||
485 | err = put_user(regs->tpc, &sf->info.si_regs.pc); | ||
486 | err |= __put_user(regs->tnpc, &sf->info.si_regs.npc); | ||
487 | err |= __put_user(regs->y, &sf->info.si_regs.y); | ||
488 | psr = tstate_to_psr(regs->tstate); | ||
489 | if (current_thread_info()->fpsaved[0] & FPRS_FEF) | ||
490 | psr |= PSR_EF; | ||
491 | err |= __put_user(psr, &sf->info.si_regs.psr); | ||
492 | for (i = 0; i < 16; i++) | ||
493 | err |= __put_user(regs->u_regs[i], &sf->info.si_regs.u_regs[i]); | ||
494 | err |= __put_user(sizeof(siginfo_extra_v8plus_t), &sf->extra_size); | ||
495 | err |= __put_user(SIGINFO_EXTRA_V8PLUS_MAGIC, &sf->v8plus.g_upper[0]); | ||
496 | for (i = 1; i < 16; i++) | ||
497 | err |= __put_user(((u32 *)regs->u_regs)[2*i], | ||
498 | &sf->v8plus.g_upper[i]); | ||
499 | err |= __put_user((regs->tstate & TSTATE_ASI) >> 24UL, | ||
500 | &sf->v8plus.asi); | ||
501 | |||
502 | if (psr & PSR_EF) { | ||
503 | err |= save_fpu_state32(regs, &sf->fpu_state); | ||
504 | err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save); | ||
505 | } else { | ||
506 | err |= __put_user(0, &sf->fpu_save); | ||
507 | } | ||
508 | |||
509 | switch (_NSIG_WORDS) { | ||
510 | case 4: seta[7] = (oldset->sig[3] >> 32); | ||
511 | seta[6] = oldset->sig[3]; | ||
512 | case 3: seta[5] = (oldset->sig[2] >> 32); | ||
513 | seta[4] = oldset->sig[2]; | ||
514 | case 2: seta[3] = (oldset->sig[1] >> 32); | ||
515 | seta[2] = oldset->sig[1]; | ||
516 | case 1: seta[1] = (oldset->sig[0] >> 32); | ||
517 | seta[0] = oldset->sig[0]; | ||
518 | } | ||
519 | err |= __put_user(seta[0], &sf->info.si_mask); | ||
520 | err |= __copy_to_user(sf->extramask, seta + 1, | ||
521 | (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int)); | ||
522 | |||
523 | err |= copy_in_user((u32 __user *)sf, | ||
524 | (u32 __user *)(regs->u_regs[UREG_FP]), | ||
525 | sizeof(struct reg_window32)); | ||
526 | |||
527 | if (err) | ||
528 | goto sigsegv; | ||
529 | |||
530 | /* 3. signal handler back-trampoline and parameters */ | ||
531 | regs->u_regs[UREG_FP] = (unsigned long) sf; | ||
532 | regs->u_regs[UREG_I0] = signo; | ||
533 | regs->u_regs[UREG_I1] = (unsigned long) &sf->info; | ||
534 | regs->u_regs[UREG_I2] = (unsigned long) &sf->info; | ||
535 | |||
536 | /* 4. signal handler */ | ||
537 | regs->tpc = (unsigned long) ka->sa.sa_handler; | ||
538 | regs->tnpc = (regs->tpc + 4); | ||
539 | if (test_thread_flag(TIF_32BIT)) { | ||
540 | regs->tpc &= 0xffffffff; | ||
541 | regs->tnpc &= 0xffffffff; | ||
542 | } | ||
543 | |||
544 | /* 5. return to kernel instructions */ | ||
545 | if (ka->ka_restorer) { | ||
546 | regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; | ||
547 | } else { | ||
548 | /* Flush instruction space. */ | ||
549 | unsigned long address = ((unsigned long)&(sf->insns[0])); | ||
550 | pgd_t *pgdp = pgd_offset(current->mm, address); | ||
551 | pud_t *pudp = pud_offset(pgdp, address); | ||
552 | pmd_t *pmdp = pmd_offset(pudp, address); | ||
553 | pte_t *ptep; | ||
554 | pte_t pte; | ||
555 | |||
556 | regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2); | ||
557 | |||
558 | err = __put_user(0x821020d8, &sf->insns[0]); /*mov __NR_sigreturn, %g1*/ | ||
559 | err |= __put_user(0x91d02010, &sf->insns[1]); /*t 0x10*/ | ||
560 | if (err) | ||
561 | goto sigsegv; | ||
562 | |||
563 | preempt_disable(); | ||
564 | ptep = pte_offset_map(pmdp, address); | ||
565 | pte = *ptep; | ||
566 | if (pte_present(pte)) { | ||
567 | unsigned long page = (unsigned long) | ||
568 | page_address(pte_page(pte)); | ||
569 | |||
570 | wmb(); | ||
571 | __asm__ __volatile__("flush %0 + %1" | ||
572 | : /* no outputs */ | ||
573 | : "r" (page), | ||
574 | "r" (address & (PAGE_SIZE - 1)) | ||
575 | : "memory"); | ||
576 | } | ||
577 | pte_unmap(ptep); | ||
578 | preempt_enable(); | ||
579 | } | ||
580 | return; | ||
581 | |||
582 | sigill: | ||
583 | do_exit(SIGILL); | ||
584 | sigsegv: | ||
585 | force_sigsegv(signo, current); | ||
586 | } | ||
587 | |||
588 | static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, | ||
589 | unsigned long signr, sigset_t *oldset, | ||
590 | siginfo_t *info) | ||
591 | { | ||
592 | struct rt_signal_frame32 __user *sf; | ||
593 | int sigframe_size; | ||
594 | u32 psr; | ||
595 | int i, err; | ||
596 | compat_sigset_t seta; | ||
597 | |||
598 | /* 1. Make sure everything is clean */ | ||
599 | synchronize_user_stack(); | ||
600 | save_and_clear_fpu(); | ||
601 | |||
602 | sigframe_size = RT_ALIGNEDSZ; | ||
603 | if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) | ||
604 | sigframe_size -= sizeof(__siginfo_fpu_t); | ||
605 | |||
606 | sf = (struct rt_signal_frame32 __user *) | ||
607 | get_sigframe(&ka->sa, regs, sigframe_size); | ||
608 | |||
609 | if (invalid_frame_pointer(sf, sigframe_size)) | ||
610 | goto sigill; | ||
611 | |||
612 | if (get_thread_wsaved() != 0) | ||
613 | goto sigill; | ||
614 | |||
615 | /* 2. Save the current process state */ | ||
616 | if (test_thread_flag(TIF_32BIT)) { | ||
617 | regs->tpc &= 0xffffffff; | ||
618 | regs->tnpc &= 0xffffffff; | ||
619 | } | ||
620 | err = put_user(regs->tpc, &sf->regs.pc); | ||
621 | err |= __put_user(regs->tnpc, &sf->regs.npc); | ||
622 | err |= __put_user(regs->y, &sf->regs.y); | ||
623 | psr = tstate_to_psr(regs->tstate); | ||
624 | if (current_thread_info()->fpsaved[0] & FPRS_FEF) | ||
625 | psr |= PSR_EF; | ||
626 | err |= __put_user(psr, &sf->regs.psr); | ||
627 | for (i = 0; i < 16; i++) | ||
628 | err |= __put_user(regs->u_regs[i], &sf->regs.u_regs[i]); | ||
629 | err |= __put_user(sizeof(siginfo_extra_v8plus_t), &sf->extra_size); | ||
630 | err |= __put_user(SIGINFO_EXTRA_V8PLUS_MAGIC, &sf->v8plus.g_upper[0]); | ||
631 | for (i = 1; i < 16; i++) | ||
632 | err |= __put_user(((u32 *)regs->u_regs)[2*i], | ||
633 | &sf->v8plus.g_upper[i]); | ||
634 | err |= __put_user((regs->tstate & TSTATE_ASI) >> 24UL, | ||
635 | &sf->v8plus.asi); | ||
636 | |||
637 | if (psr & PSR_EF) { | ||
638 | err |= save_fpu_state32(regs, &sf->fpu_state); | ||
639 | err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save); | ||
640 | } else { | ||
641 | err |= __put_user(0, &sf->fpu_save); | ||
642 | } | ||
643 | |||
644 | /* Update the siginfo structure. */ | ||
645 | err |= copy_siginfo_to_user32(&sf->info, info); | ||
646 | |||
647 | /* Setup sigaltstack */ | ||
648 | err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp); | ||
649 | err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags); | ||
650 | err |= __put_user(current->sas_ss_size, &sf->stack.ss_size); | ||
651 | |||
652 | switch (_NSIG_WORDS) { | ||
653 | case 4: seta.sig[7] = (oldset->sig[3] >> 32); | ||
654 | seta.sig[6] = oldset->sig[3]; | ||
655 | case 3: seta.sig[5] = (oldset->sig[2] >> 32); | ||
656 | seta.sig[4] = oldset->sig[2]; | ||
657 | case 2: seta.sig[3] = (oldset->sig[1] >> 32); | ||
658 | seta.sig[2] = oldset->sig[1]; | ||
659 | case 1: seta.sig[1] = (oldset->sig[0] >> 32); | ||
660 | seta.sig[0] = oldset->sig[0]; | ||
661 | } | ||
662 | err |= __copy_to_user(&sf->mask, &seta, sizeof(compat_sigset_t)); | ||
663 | |||
664 | err |= copy_in_user((u32 __user *)sf, | ||
665 | (u32 __user *)(regs->u_regs[UREG_FP]), | ||
666 | sizeof(struct reg_window32)); | ||
667 | if (err) | ||
668 | goto sigsegv; | ||
669 | |||
670 | /* 3. signal handler back-trampoline and parameters */ | ||
671 | regs->u_regs[UREG_FP] = (unsigned long) sf; | ||
672 | regs->u_regs[UREG_I0] = signr; | ||
673 | regs->u_regs[UREG_I1] = (unsigned long) &sf->info; | ||
674 | regs->u_regs[UREG_I2] = (unsigned long) &sf->regs; | ||
675 | |||
676 | /* 4. signal handler */ | ||
677 | regs->tpc = (unsigned long) ka->sa.sa_handler; | ||
678 | regs->tnpc = (regs->tpc + 4); | ||
679 | if (test_thread_flag(TIF_32BIT)) { | ||
680 | regs->tpc &= 0xffffffff; | ||
681 | regs->tnpc &= 0xffffffff; | ||
682 | } | ||
683 | |||
684 | /* 5. return to kernel instructions */ | ||
685 | if (ka->ka_restorer) | ||
686 | regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; | ||
687 | else { | ||
688 | /* Flush instruction space. */ | ||
689 | unsigned long address = ((unsigned long)&(sf->insns[0])); | ||
690 | pgd_t *pgdp = pgd_offset(current->mm, address); | ||
691 | pud_t *pudp = pud_offset(pgdp, address); | ||
692 | pmd_t *pmdp = pmd_offset(pudp, address); | ||
693 | pte_t *ptep; | ||
694 | |||
695 | regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2); | ||
696 | |||
697 | /* mov __NR_rt_sigreturn, %g1 */ | ||
698 | err |= __put_user(0x82102065, &sf->insns[0]); | ||
699 | |||
700 | /* t 0x10 */ | ||
701 | err |= __put_user(0x91d02010, &sf->insns[1]); | ||
702 | if (err) | ||
703 | goto sigsegv; | ||
704 | |||
705 | preempt_disable(); | ||
706 | ptep = pte_offset_map(pmdp, address); | ||
707 | if (pte_present(*ptep)) { | ||
708 | unsigned long page = (unsigned long) | ||
709 | page_address(pte_page(*ptep)); | ||
710 | |||
711 | wmb(); | ||
712 | __asm__ __volatile__("flush %0 + %1" | ||
713 | : /* no outputs */ | ||
714 | : "r" (page), | ||
715 | "r" (address & (PAGE_SIZE - 1)) | ||
716 | : "memory"); | ||
717 | } | ||
718 | pte_unmap(ptep); | ||
719 | preempt_enable(); | ||
720 | } | ||
721 | return; | ||
722 | |||
723 | sigill: | ||
724 | do_exit(SIGILL); | ||
725 | sigsegv: | ||
726 | force_sigsegv(signr, current); | ||
727 | } | ||
728 | |||
729 | static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka, | ||
730 | siginfo_t *info, | ||
731 | sigset_t *oldset, struct pt_regs *regs) | ||
732 | { | ||
733 | if (ka->sa.sa_flags & SA_SIGINFO) | ||
734 | setup_rt_frame32(ka, regs, signr, oldset, info); | ||
735 | else | ||
736 | setup_frame32(ka, regs, signr, oldset); | ||
737 | |||
738 | spin_lock_irq(¤t->sighand->siglock); | ||
739 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | ||
740 | if (!(ka->sa.sa_flags & SA_NOMASK)) | ||
741 | sigaddset(¤t->blocked,signr); | ||
742 | recalc_sigpending(); | ||
743 | spin_unlock_irq(¤t->sighand->siglock); | ||
744 | } | ||
745 | |||
746 | static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs, | ||
747 | struct sigaction *sa) | ||
748 | { | ||
749 | switch (regs->u_regs[UREG_I0]) { | ||
750 | case ERESTART_RESTARTBLOCK: | ||
751 | case ERESTARTNOHAND: | ||
752 | no_system_call_restart: | ||
753 | regs->u_regs[UREG_I0] = EINTR; | ||
754 | regs->tstate |= TSTATE_ICARRY; | ||
755 | break; | ||
756 | case ERESTARTSYS: | ||
757 | if (!(sa->sa_flags & SA_RESTART)) | ||
758 | goto no_system_call_restart; | ||
759 | /* fallthrough */ | ||
760 | case ERESTARTNOINTR: | ||
761 | regs->u_regs[UREG_I0] = orig_i0; | ||
762 | regs->tpc -= 4; | ||
763 | regs->tnpc -= 4; | ||
764 | } | ||
765 | } | ||
766 | |||
767 | /* Note that 'init' is a special process: it doesn't get signals it doesn't | ||
768 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | ||
769 | * mistake. | ||
770 | */ | ||
771 | void do_signal32(sigset_t *oldset, struct pt_regs * regs, | ||
772 | int restart_syscall, unsigned long orig_i0) | ||
773 | { | ||
774 | struct k_sigaction ka; | ||
775 | siginfo_t info; | ||
776 | int signr; | ||
777 | |||
778 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | ||
779 | |||
780 | /* If the debugger messes with the program counter, it clears | ||
781 | * the "in syscall" bit, directing us to not perform a syscall | ||
782 | * restart. | ||
783 | */ | ||
784 | if (restart_syscall && !pt_regs_is_syscall(regs)) | ||
785 | restart_syscall = 0; | ||
786 | |||
787 | if (signr > 0) { | ||
788 | if (restart_syscall) | ||
789 | syscall_restart32(orig_i0, regs, &ka.sa); | ||
790 | handle_signal32(signr, &ka, &info, oldset, regs); | ||
791 | |||
792 | /* A signal was successfully delivered; the saved | ||
793 | * sigmask will have been stored in the signal frame, | ||
794 | * and will be restored by sigreturn, so we can simply | ||
795 | * clear the TS_RESTORE_SIGMASK flag. | ||
796 | */ | ||
797 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; | ||
798 | |||
799 | tracehook_signal_handler(signr, &info, &ka, regs, 0); | ||
800 | return; | ||
801 | } | ||
802 | if (restart_syscall && | ||
803 | (regs->u_regs[UREG_I0] == ERESTARTNOHAND || | ||
804 | regs->u_regs[UREG_I0] == ERESTARTSYS || | ||
805 | regs->u_regs[UREG_I0] == ERESTARTNOINTR)) { | ||
806 | /* replay the system call when we are done */ | ||
807 | regs->u_regs[UREG_I0] = orig_i0; | ||
808 | regs->tpc -= 4; | ||
809 | regs->tnpc -= 4; | ||
810 | } | ||
811 | if (restart_syscall && | ||
812 | regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { | ||
813 | regs->u_regs[UREG_G1] = __NR_restart_syscall; | ||
814 | regs->tpc -= 4; | ||
815 | regs->tnpc -= 4; | ||
816 | } | ||
817 | |||
818 | /* If there's no signal to deliver, we just put the saved sigmask | ||
819 | * back | ||
820 | */ | ||
821 | if (current_thread_info()->status & TS_RESTORE_SIGMASK) { | ||
822 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; | ||
823 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | ||
824 | } | ||
825 | } | ||
826 | |||
827 | struct sigstack32 { | ||
828 | u32 the_stack; | ||
829 | int cur_status; | ||
830 | }; | ||
831 | |||
832 | asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp) | ||
833 | { | ||
834 | struct sigstack32 __user *ssptr = | ||
835 | (struct sigstack32 __user *)((unsigned long)(u_ssptr)); | ||
836 | struct sigstack32 __user *ossptr = | ||
837 | (struct sigstack32 __user *)((unsigned long)(u_ossptr)); | ||
838 | int ret = -EFAULT; | ||
839 | |||
840 | /* First see if old state is wanted. */ | ||
841 | if (ossptr) { | ||
842 | if (put_user(current->sas_ss_sp + current->sas_ss_size, | ||
843 | &ossptr->the_stack) || | ||
844 | __put_user(on_sig_stack(sp), &ossptr->cur_status)) | ||
845 | goto out; | ||
846 | } | ||
847 | |||
848 | /* Now see if we want to update the new state. */ | ||
849 | if (ssptr) { | ||
850 | u32 ss_sp; | ||
851 | |||
852 | if (get_user(ss_sp, &ssptr->the_stack)) | ||
853 | goto out; | ||
854 | |||
855 | /* If the current stack was set with sigaltstack, don't | ||
856 | * swap stacks while we are on it. | ||
857 | */ | ||
858 | ret = -EPERM; | ||
859 | if (current->sas_ss_sp && on_sig_stack(sp)) | ||
860 | goto out; | ||
861 | |||
862 | /* Since we don't know the extent of the stack, and we don't | ||
863 | * track onstack-ness, but rather calculate it, we must | ||
864 | * presume a size. Ho hum this interface is lossy. | ||
865 | */ | ||
866 | current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ; | ||
867 | current->sas_ss_size = SIGSTKSZ; | ||
868 | } | ||
869 | |||
870 | ret = 0; | ||
871 | out: | ||
872 | return ret; | ||
873 | } | ||
874 | |||
875 | asmlinkage long do_sys32_sigaltstack(u32 ussa, u32 uossa, unsigned long sp) | ||
876 | { | ||
877 | stack_t uss, uoss; | ||
878 | u32 u_ss_sp = 0; | ||
879 | int ret; | ||
880 | mm_segment_t old_fs; | ||
881 | stack_t32 __user *uss32 = compat_ptr(ussa); | ||
882 | stack_t32 __user *uoss32 = compat_ptr(uossa); | ||
883 | |||
884 | if (ussa && (get_user(u_ss_sp, &uss32->ss_sp) || | ||
885 | __get_user(uss.ss_flags, &uss32->ss_flags) || | ||
886 | __get_user(uss.ss_size, &uss32->ss_size))) | ||
887 | return -EFAULT; | ||
888 | uss.ss_sp = compat_ptr(u_ss_sp); | ||
889 | old_fs = get_fs(); | ||
890 | set_fs(KERNEL_DS); | ||
891 | ret = do_sigaltstack(ussa ? (stack_t __user *) &uss : NULL, | ||
892 | uossa ? (stack_t __user *) &uoss : NULL, sp); | ||
893 | set_fs(old_fs); | ||
894 | if (!ret && uossa && (put_user(ptr_to_compat(uoss.ss_sp), &uoss32->ss_sp) || | ||
895 | __put_user(uoss.ss_flags, &uoss32->ss_flags) || | ||
896 | __put_user(uoss.ss_size, &uoss32->ss_size))) | ||
897 | return -EFAULT; | ||
898 | return ret; | ||
899 | } | ||
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c new file mode 100644 index 000000000000..ec82d76dc6f2 --- /dev/null +++ b/arch/sparc/kernel/signal_64.c | |||
@@ -0,0 +1,617 @@ | |||
1 | /* | ||
2 | * arch/sparc64/kernel/signal.c | ||
3 | * | ||
4 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
5 | * Copyright (C) 1995, 2008 David S. Miller (davem@davemloft.net) | ||
6 | * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) | ||
7 | * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) | ||
8 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
9 | */ | ||
10 | |||
11 | #ifdef CONFIG_COMPAT | ||
12 | #include <linux/compat.h> /* for compat_old_sigset_t */ | ||
13 | #endif | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/signal.h> | ||
17 | #include <linux/errno.h> | ||
18 | #include <linux/wait.h> | ||
19 | #include <linux/ptrace.h> | ||
20 | #include <linux/tracehook.h> | ||
21 | #include <linux/unistd.h> | ||
22 | #include <linux/mm.h> | ||
23 | #include <linux/tty.h> | ||
24 | #include <linux/binfmts.h> | ||
25 | #include <linux/bitops.h> | ||
26 | |||
27 | #include <asm/uaccess.h> | ||
28 | #include <asm/ptrace.h> | ||
29 | #include <asm/pgtable.h> | ||
30 | #include <asm/fpumacro.h> | ||
31 | #include <asm/uctx.h> | ||
32 | #include <asm/siginfo.h> | ||
33 | #include <asm/visasm.h> | ||
34 | |||
35 | #include "entry.h" | ||
36 | #include "systbls.h" | ||
37 | |||
38 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
39 | |||
40 | /* {set, get}context() needed for 64-bit SparcLinux userland. */ | ||
41 | asmlinkage void sparc64_set_context(struct pt_regs *regs) | ||
42 | { | ||
43 | struct ucontext __user *ucp = (struct ucontext __user *) | ||
44 | regs->u_regs[UREG_I0]; | ||
45 | mc_gregset_t __user *grp; | ||
46 | unsigned long pc, npc, tstate; | ||
47 | unsigned long fp, i7; | ||
48 | unsigned char fenab; | ||
49 | int err; | ||
50 | |||
51 | flush_user_windows(); | ||
52 | if (get_thread_wsaved() || | ||
53 | (((unsigned long)ucp) & (sizeof(unsigned long)-1)) || | ||
54 | (!__access_ok(ucp, sizeof(*ucp)))) | ||
55 | goto do_sigsegv; | ||
56 | grp = &ucp->uc_mcontext.mc_gregs; | ||
57 | err = __get_user(pc, &((*grp)[MC_PC])); | ||
58 | err |= __get_user(npc, &((*grp)[MC_NPC])); | ||
59 | if (err || ((pc | npc) & 3)) | ||
60 | goto do_sigsegv; | ||
61 | if (regs->u_regs[UREG_I1]) { | ||
62 | sigset_t set; | ||
63 | |||
64 | if (_NSIG_WORDS == 1) { | ||
65 | if (__get_user(set.sig[0], &ucp->uc_sigmask.sig[0])) | ||
66 | goto do_sigsegv; | ||
67 | } else { | ||
68 | if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t))) | ||
69 | goto do_sigsegv; | ||
70 | } | ||
71 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
72 | spin_lock_irq(¤t->sighand->siglock); | ||
73 | current->blocked = set; | ||
74 | recalc_sigpending(); | ||
75 | spin_unlock_irq(¤t->sighand->siglock); | ||
76 | } | ||
77 | if (test_thread_flag(TIF_32BIT)) { | ||
78 | pc &= 0xffffffff; | ||
79 | npc &= 0xffffffff; | ||
80 | } | ||
81 | regs->tpc = pc; | ||
82 | regs->tnpc = npc; | ||
83 | err |= __get_user(regs->y, &((*grp)[MC_Y])); | ||
84 | err |= __get_user(tstate, &((*grp)[MC_TSTATE])); | ||
85 | regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC); | ||
86 | regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC)); | ||
87 | err |= __get_user(regs->u_regs[UREG_G1], (&(*grp)[MC_G1])); | ||
88 | err |= __get_user(regs->u_regs[UREG_G2], (&(*grp)[MC_G2])); | ||
89 | err |= __get_user(regs->u_regs[UREG_G3], (&(*grp)[MC_G3])); | ||
90 | err |= __get_user(regs->u_regs[UREG_G4], (&(*grp)[MC_G4])); | ||
91 | err |= __get_user(regs->u_regs[UREG_G5], (&(*grp)[MC_G5])); | ||
92 | err |= __get_user(regs->u_regs[UREG_G6], (&(*grp)[MC_G6])); | ||
93 | |||
94 | /* Skip %g7 as that's the thread register in userspace. */ | ||
95 | |||
96 | err |= __get_user(regs->u_regs[UREG_I0], (&(*grp)[MC_O0])); | ||
97 | err |= __get_user(regs->u_regs[UREG_I1], (&(*grp)[MC_O1])); | ||
98 | err |= __get_user(regs->u_regs[UREG_I2], (&(*grp)[MC_O2])); | ||
99 | err |= __get_user(regs->u_regs[UREG_I3], (&(*grp)[MC_O3])); | ||
100 | err |= __get_user(regs->u_regs[UREG_I4], (&(*grp)[MC_O4])); | ||
101 | err |= __get_user(regs->u_regs[UREG_I5], (&(*grp)[MC_O5])); | ||
102 | err |= __get_user(regs->u_regs[UREG_I6], (&(*grp)[MC_O6])); | ||
103 | err |= __get_user(regs->u_regs[UREG_I7], (&(*grp)[MC_O7])); | ||
104 | |||
105 | err |= __get_user(fp, &(ucp->uc_mcontext.mc_fp)); | ||
106 | err |= __get_user(i7, &(ucp->uc_mcontext.mc_i7)); | ||
107 | err |= __put_user(fp, | ||
108 | (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6]))); | ||
109 | err |= __put_user(i7, | ||
110 | (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7]))); | ||
111 | |||
112 | err |= __get_user(fenab, &(ucp->uc_mcontext.mc_fpregs.mcfpu_enab)); | ||
113 | if (fenab) { | ||
114 | unsigned long *fpregs = current_thread_info()->fpregs; | ||
115 | unsigned long fprs; | ||
116 | |||
117 | fprs_write(0); | ||
118 | err |= __get_user(fprs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fprs)); | ||
119 | if (fprs & FPRS_DL) | ||
120 | err |= copy_from_user(fpregs, | ||
121 | &(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs), | ||
122 | (sizeof(unsigned int) * 32)); | ||
123 | if (fprs & FPRS_DU) | ||
124 | err |= copy_from_user(fpregs+16, | ||
125 | ((unsigned long __user *)&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs))+16, | ||
126 | (sizeof(unsigned int) * 32)); | ||
127 | err |= __get_user(current_thread_info()->xfsr[0], | ||
128 | &(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr)); | ||
129 | err |= __get_user(current_thread_info()->gsr[0], | ||
130 | &(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr)); | ||
131 | regs->tstate &= ~TSTATE_PEF; | ||
132 | } | ||
133 | if (err) | ||
134 | goto do_sigsegv; | ||
135 | |||
136 | return; | ||
137 | do_sigsegv: | ||
138 | force_sig(SIGSEGV, current); | ||
139 | } | ||
140 | |||
141 | asmlinkage void sparc64_get_context(struct pt_regs *regs) | ||
142 | { | ||
143 | struct ucontext __user *ucp = (struct ucontext __user *) | ||
144 | regs->u_regs[UREG_I0]; | ||
145 | mc_gregset_t __user *grp; | ||
146 | mcontext_t __user *mcp; | ||
147 | unsigned long fp, i7; | ||
148 | unsigned char fenab; | ||
149 | int err; | ||
150 | |||
151 | synchronize_user_stack(); | ||
152 | if (get_thread_wsaved() || clear_user(ucp, sizeof(*ucp))) | ||
153 | goto do_sigsegv; | ||
154 | |||
155 | #if 1 | ||
156 | fenab = 0; /* IMO get_context is like any other system call, thus modifies FPU state -jj */ | ||
157 | #else | ||
158 | fenab = (current_thread_info()->fpsaved[0] & FPRS_FEF); | ||
159 | #endif | ||
160 | |||
161 | mcp = &ucp->uc_mcontext; | ||
162 | grp = &mcp->mc_gregs; | ||
163 | |||
164 | /* Skip over the trap instruction, first. */ | ||
165 | if (test_thread_flag(TIF_32BIT)) { | ||
166 | regs->tpc = (regs->tnpc & 0xffffffff); | ||
167 | regs->tnpc = (regs->tnpc + 4) & 0xffffffff; | ||
168 | } else { | ||
169 | regs->tpc = regs->tnpc; | ||
170 | regs->tnpc += 4; | ||
171 | } | ||
172 | err = 0; | ||
173 | if (_NSIG_WORDS == 1) | ||
174 | err |= __put_user(current->blocked.sig[0], | ||
175 | (unsigned long __user *)&ucp->uc_sigmask); | ||
176 | else | ||
177 | err |= __copy_to_user(&ucp->uc_sigmask, ¤t->blocked, | ||
178 | sizeof(sigset_t)); | ||
179 | |||
180 | err |= __put_user(regs->tstate, &((*grp)[MC_TSTATE])); | ||
181 | err |= __put_user(regs->tpc, &((*grp)[MC_PC])); | ||
182 | err |= __put_user(regs->tnpc, &((*grp)[MC_NPC])); | ||
183 | err |= __put_user(regs->y, &((*grp)[MC_Y])); | ||
184 | err |= __put_user(regs->u_regs[UREG_G1], &((*grp)[MC_G1])); | ||
185 | err |= __put_user(regs->u_regs[UREG_G2], &((*grp)[MC_G2])); | ||
186 | err |= __put_user(regs->u_regs[UREG_G3], &((*grp)[MC_G3])); | ||
187 | err |= __put_user(regs->u_regs[UREG_G4], &((*grp)[MC_G4])); | ||
188 | err |= __put_user(regs->u_regs[UREG_G5], &((*grp)[MC_G5])); | ||
189 | err |= __put_user(regs->u_regs[UREG_G6], &((*grp)[MC_G6])); | ||
190 | err |= __put_user(regs->u_regs[UREG_G7], &((*grp)[MC_G7])); | ||
191 | err |= __put_user(regs->u_regs[UREG_I0], &((*grp)[MC_O0])); | ||
192 | err |= __put_user(regs->u_regs[UREG_I1], &((*grp)[MC_O1])); | ||
193 | err |= __put_user(regs->u_regs[UREG_I2], &((*grp)[MC_O2])); | ||
194 | err |= __put_user(regs->u_regs[UREG_I3], &((*grp)[MC_O3])); | ||
195 | err |= __put_user(regs->u_regs[UREG_I4], &((*grp)[MC_O4])); | ||
196 | err |= __put_user(regs->u_regs[UREG_I5], &((*grp)[MC_O5])); | ||
197 | err |= __put_user(regs->u_regs[UREG_I6], &((*grp)[MC_O6])); | ||
198 | err |= __put_user(regs->u_regs[UREG_I7], &((*grp)[MC_O7])); | ||
199 | |||
200 | err |= __get_user(fp, | ||
201 | (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6]))); | ||
202 | err |= __get_user(i7, | ||
203 | (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7]))); | ||
204 | err |= __put_user(fp, &(mcp->mc_fp)); | ||
205 | err |= __put_user(i7, &(mcp->mc_i7)); | ||
206 | |||
207 | err |= __put_user(fenab, &(mcp->mc_fpregs.mcfpu_enab)); | ||
208 | if (fenab) { | ||
209 | unsigned long *fpregs = current_thread_info()->fpregs; | ||
210 | unsigned long fprs; | ||
211 | |||
212 | fprs = current_thread_info()->fpsaved[0]; | ||
213 | if (fprs & FPRS_DL) | ||
214 | err |= copy_to_user(&(mcp->mc_fpregs.mcfpu_fregs), fpregs, | ||
215 | (sizeof(unsigned int) * 32)); | ||
216 | if (fprs & FPRS_DU) | ||
217 | err |= copy_to_user( | ||
218 | ((unsigned long __user *)&(mcp->mc_fpregs.mcfpu_fregs))+16, fpregs+16, | ||
219 | (sizeof(unsigned int) * 32)); | ||
220 | err |= __put_user(current_thread_info()->xfsr[0], &(mcp->mc_fpregs.mcfpu_fsr)); | ||
221 | err |= __put_user(current_thread_info()->gsr[0], &(mcp->mc_fpregs.mcfpu_gsr)); | ||
222 | err |= __put_user(fprs, &(mcp->mc_fpregs.mcfpu_fprs)); | ||
223 | } | ||
224 | if (err) | ||
225 | goto do_sigsegv; | ||
226 | |||
227 | return; | ||
228 | do_sigsegv: | ||
229 | force_sig(SIGSEGV, current); | ||
230 | } | ||
231 | |||
232 | struct rt_signal_frame { | ||
233 | struct sparc_stackf ss; | ||
234 | siginfo_t info; | ||
235 | struct pt_regs regs; | ||
236 | __siginfo_fpu_t __user *fpu_save; | ||
237 | stack_t stack; | ||
238 | sigset_t mask; | ||
239 | __siginfo_fpu_t fpu_state; | ||
240 | }; | ||
241 | |||
242 | static long _sigpause_common(old_sigset_t set) | ||
243 | { | ||
244 | set &= _BLOCKABLE; | ||
245 | spin_lock_irq(¤t->sighand->siglock); | ||
246 | current->saved_sigmask = current->blocked; | ||
247 | siginitset(¤t->blocked, set); | ||
248 | recalc_sigpending(); | ||
249 | spin_unlock_irq(¤t->sighand->siglock); | ||
250 | |||
251 | current->state = TASK_INTERRUPTIBLE; | ||
252 | schedule(); | ||
253 | |||
254 | set_restore_sigmask(); | ||
255 | |||
256 | return -ERESTARTNOHAND; | ||
257 | } | ||
258 | |||
259 | asmlinkage long sys_sigpause(unsigned int set) | ||
260 | { | ||
261 | return _sigpause_common(set); | ||
262 | } | ||
263 | |||
264 | asmlinkage long sys_sigsuspend(old_sigset_t set) | ||
265 | { | ||
266 | return _sigpause_common(set); | ||
267 | } | ||
268 | |||
269 | static inline int | ||
270 | restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) | ||
271 | { | ||
272 | unsigned long *fpregs = current_thread_info()->fpregs; | ||
273 | unsigned long fprs; | ||
274 | int err; | ||
275 | |||
276 | err = __get_user(fprs, &fpu->si_fprs); | ||
277 | fprs_write(0); | ||
278 | regs->tstate &= ~TSTATE_PEF; | ||
279 | if (fprs & FPRS_DL) | ||
280 | err |= copy_from_user(fpregs, &fpu->si_float_regs[0], | ||
281 | (sizeof(unsigned int) * 32)); | ||
282 | if (fprs & FPRS_DU) | ||
283 | err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32], | ||
284 | (sizeof(unsigned int) * 32)); | ||
285 | err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr); | ||
286 | err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr); | ||
287 | current_thread_info()->fpsaved[0] |= fprs; | ||
288 | return err; | ||
289 | } | ||
290 | |||
291 | void do_rt_sigreturn(struct pt_regs *regs) | ||
292 | { | ||
293 | struct rt_signal_frame __user *sf; | ||
294 | unsigned long tpc, tnpc, tstate; | ||
295 | __siginfo_fpu_t __user *fpu_save; | ||
296 | sigset_t set; | ||
297 | int err; | ||
298 | |||
299 | /* Always make any pending restarted system calls return -EINTR */ | ||
300 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
301 | |||
302 | synchronize_user_stack (); | ||
303 | sf = (struct rt_signal_frame __user *) | ||
304 | (regs->u_regs [UREG_FP] + STACK_BIAS); | ||
305 | |||
306 | /* 1. Make sure we are not getting garbage from the user */ | ||
307 | if (((unsigned long) sf) & 3) | ||
308 | goto segv; | ||
309 | |||
310 | err = get_user(tpc, &sf->regs.tpc); | ||
311 | err |= __get_user(tnpc, &sf->regs.tnpc); | ||
312 | if (test_thread_flag(TIF_32BIT)) { | ||
313 | tpc &= 0xffffffff; | ||
314 | tnpc &= 0xffffffff; | ||
315 | } | ||
316 | err |= ((tpc | tnpc) & 3); | ||
317 | |||
318 | /* 2. Restore the state */ | ||
319 | err |= __get_user(regs->y, &sf->regs.y); | ||
320 | err |= __get_user(tstate, &sf->regs.tstate); | ||
321 | err |= copy_from_user(regs->u_regs, sf->regs.u_regs, sizeof(regs->u_regs)); | ||
322 | |||
323 | /* User can only change condition codes and %asi in %tstate. */ | ||
324 | regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC); | ||
325 | regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC)); | ||
326 | |||
327 | err |= __get_user(fpu_save, &sf->fpu_save); | ||
328 | if (fpu_save) | ||
329 | err |= restore_fpu_state(regs, &sf->fpu_state); | ||
330 | |||
331 | err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); | ||
332 | err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf); | ||
333 | |||
334 | if (err) | ||
335 | goto segv; | ||
336 | |||
337 | regs->tpc = tpc; | ||
338 | regs->tnpc = tnpc; | ||
339 | |||
340 | /* Prevent syscall restart. */ | ||
341 | pt_regs_clear_syscall(regs); | ||
342 | |||
343 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
344 | spin_lock_irq(¤t->sighand->siglock); | ||
345 | current->blocked = set; | ||
346 | recalc_sigpending(); | ||
347 | spin_unlock_irq(¤t->sighand->siglock); | ||
348 | return; | ||
349 | segv: | ||
350 | force_sig(SIGSEGV, current); | ||
351 | } | ||
352 | |||
353 | /* Checks if the fp is valid */ | ||
354 | static int invalid_frame_pointer(void __user *fp, int fplen) | ||
355 | { | ||
356 | if (((unsigned long) fp) & 7) | ||
357 | return 1; | ||
358 | return 0; | ||
359 | } | ||
360 | |||
361 | static inline int | ||
362 | save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) | ||
363 | { | ||
364 | unsigned long *fpregs = current_thread_info()->fpregs; | ||
365 | unsigned long fprs; | ||
366 | int err = 0; | ||
367 | |||
368 | fprs = current_thread_info()->fpsaved[0]; | ||
369 | if (fprs & FPRS_DL) | ||
370 | err |= copy_to_user(&fpu->si_float_regs[0], fpregs, | ||
371 | (sizeof(unsigned int) * 32)); | ||
372 | if (fprs & FPRS_DU) | ||
373 | err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16, | ||
374 | (sizeof(unsigned int) * 32)); | ||
375 | err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr); | ||
376 | err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr); | ||
377 | err |= __put_user(fprs, &fpu->si_fprs); | ||
378 | |||
379 | return err; | ||
380 | } | ||
381 | |||
382 | static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, unsigned long framesize) | ||
383 | { | ||
384 | unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS; | ||
385 | |||
386 | /* | ||
387 | * If we are on the alternate signal stack and would overflow it, don't. | ||
388 | * Return an always-bogus address instead so we will die with SIGSEGV. | ||
389 | */ | ||
390 | if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize))) | ||
391 | return (void __user *) -1L; | ||
392 | |||
393 | /* This is the X/Open sanctioned signal stack switching. */ | ||
394 | if (ka->sa.sa_flags & SA_ONSTACK) { | ||
395 | if (sas_ss_flags(sp) == 0) | ||
396 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
397 | } | ||
398 | |||
399 | /* Always align the stack frame. This handles two cases. First, | ||
400 | * sigaltstack need not be mindful of platform specific stack | ||
401 | * alignment. Second, if we took this signal because the stack | ||
402 | * is not aligned properly, we'd like to take the signal cleanly | ||
403 | * and report that. | ||
404 | */ | ||
405 | sp &= ~7UL; | ||
406 | |||
407 | return (void __user *)(sp - framesize); | ||
408 | } | ||
409 | |||
410 | static inline void | ||
411 | setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, | ||
412 | int signo, sigset_t *oldset, siginfo_t *info) | ||
413 | { | ||
414 | struct rt_signal_frame __user *sf; | ||
415 | int sigframe_size, err; | ||
416 | |||
417 | /* 1. Make sure everything is clean */ | ||
418 | synchronize_user_stack(); | ||
419 | save_and_clear_fpu(); | ||
420 | |||
421 | sigframe_size = sizeof(struct rt_signal_frame); | ||
422 | if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) | ||
423 | sigframe_size -= sizeof(__siginfo_fpu_t); | ||
424 | |||
425 | sf = (struct rt_signal_frame __user *) | ||
426 | get_sigframe(ka, regs, sigframe_size); | ||
427 | |||
428 | if (invalid_frame_pointer (sf, sigframe_size)) | ||
429 | goto sigill; | ||
430 | |||
431 | if (get_thread_wsaved() != 0) | ||
432 | goto sigill; | ||
433 | |||
434 | /* 2. Save the current process state */ | ||
435 | err = copy_to_user(&sf->regs, regs, sizeof (*regs)); | ||
436 | |||
437 | if (current_thread_info()->fpsaved[0] & FPRS_FEF) { | ||
438 | err |= save_fpu_state(regs, &sf->fpu_state); | ||
439 | err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save); | ||
440 | } else { | ||
441 | err |= __put_user(0, &sf->fpu_save); | ||
442 | } | ||
443 | |||
444 | /* Setup sigaltstack */ | ||
445 | err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp); | ||
446 | err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags); | ||
447 | err |= __put_user(current->sas_ss_size, &sf->stack.ss_size); | ||
448 | |||
449 | err |= copy_to_user(&sf->mask, oldset, sizeof(sigset_t)); | ||
450 | |||
451 | err |= copy_in_user((u64 __user *)sf, | ||
452 | (u64 __user *)(regs->u_regs[UREG_FP]+STACK_BIAS), | ||
453 | sizeof(struct reg_window)); | ||
454 | |||
455 | if (info) | ||
456 | err |= copy_siginfo_to_user(&sf->info, info); | ||
457 | else { | ||
458 | err |= __put_user(signo, &sf->info.si_signo); | ||
459 | err |= __put_user(SI_NOINFO, &sf->info.si_code); | ||
460 | } | ||
461 | if (err) | ||
462 | goto sigsegv; | ||
463 | |||
464 | /* 3. signal handler back-trampoline and parameters */ | ||
465 | regs->u_regs[UREG_FP] = ((unsigned long) sf) - STACK_BIAS; | ||
466 | regs->u_regs[UREG_I0] = signo; | ||
467 | regs->u_regs[UREG_I1] = (unsigned long) &sf->info; | ||
468 | |||
469 | /* The sigcontext is passed in this way because of how it | ||
470 | * is defined in GLIBC's /usr/include/bits/sigcontext.h | ||
471 | * for sparc64. It includes the 128 bytes of siginfo_t. | ||
472 | */ | ||
473 | regs->u_regs[UREG_I2] = (unsigned long) &sf->info; | ||
474 | |||
475 | /* 5. signal handler */ | ||
476 | regs->tpc = (unsigned long) ka->sa.sa_handler; | ||
477 | regs->tnpc = (regs->tpc + 4); | ||
478 | if (test_thread_flag(TIF_32BIT)) { | ||
479 | regs->tpc &= 0xffffffff; | ||
480 | regs->tnpc &= 0xffffffff; | ||
481 | } | ||
482 | /* 4. return to kernel instructions */ | ||
483 | regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; | ||
484 | return; | ||
485 | |||
486 | sigill: | ||
487 | do_exit(SIGILL); | ||
488 | sigsegv: | ||
489 | force_sigsegv(signo, current); | ||
490 | } | ||
491 | |||
492 | static inline void handle_signal(unsigned long signr, struct k_sigaction *ka, | ||
493 | siginfo_t *info, | ||
494 | sigset_t *oldset, struct pt_regs *regs) | ||
495 | { | ||
496 | setup_rt_frame(ka, regs, signr, oldset, | ||
497 | (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL); | ||
498 | spin_lock_irq(¤t->sighand->siglock); | ||
499 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | ||
500 | if (!(ka->sa.sa_flags & SA_NOMASK)) | ||
501 | sigaddset(¤t->blocked,signr); | ||
502 | recalc_sigpending(); | ||
503 | spin_unlock_irq(¤t->sighand->siglock); | ||
504 | } | ||
505 | |||
506 | static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, | ||
507 | struct sigaction *sa) | ||
508 | { | ||
509 | switch (regs->u_regs[UREG_I0]) { | ||
510 | case ERESTART_RESTARTBLOCK: | ||
511 | case ERESTARTNOHAND: | ||
512 | no_system_call_restart: | ||
513 | regs->u_regs[UREG_I0] = EINTR; | ||
514 | regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY); | ||
515 | break; | ||
516 | case ERESTARTSYS: | ||
517 | if (!(sa->sa_flags & SA_RESTART)) | ||
518 | goto no_system_call_restart; | ||
519 | /* fallthrough */ | ||
520 | case ERESTARTNOINTR: | ||
521 | regs->u_regs[UREG_I0] = orig_i0; | ||
522 | regs->tpc -= 4; | ||
523 | regs->tnpc -= 4; | ||
524 | } | ||
525 | } | ||
526 | |||
527 | /* Note that 'init' is a special process: it doesn't get signals it doesn't | ||
528 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | ||
529 | * mistake. | ||
530 | */ | ||
531 | static void do_signal(struct pt_regs *regs, unsigned long orig_i0) | ||
532 | { | ||
533 | struct k_sigaction ka; | ||
534 | int restart_syscall; | ||
535 | sigset_t *oldset; | ||
536 | siginfo_t info; | ||
537 | int signr; | ||
538 | |||
539 | if (pt_regs_is_syscall(regs) && | ||
540 | (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) { | ||
541 | restart_syscall = 1; | ||
542 | } else | ||
543 | restart_syscall = 0; | ||
544 | |||
545 | if (current_thread_info()->status & TS_RESTORE_SIGMASK) | ||
546 | oldset = ¤t->saved_sigmask; | ||
547 | else | ||
548 | oldset = ¤t->blocked; | ||
549 | |||
550 | #ifdef CONFIG_COMPAT | ||
551 | if (test_thread_flag(TIF_32BIT)) { | ||
552 | extern void do_signal32(sigset_t *, struct pt_regs *, | ||
553 | int restart_syscall, | ||
554 | unsigned long orig_i0); | ||
555 | do_signal32(oldset, regs, restart_syscall, orig_i0); | ||
556 | return; | ||
557 | } | ||
558 | #endif | ||
559 | |||
560 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | ||
561 | |||
562 | /* If the debugger messes with the program counter, it clears | ||
563 | * the software "in syscall" bit, directing us to not perform | ||
564 | * a syscall restart. | ||
565 | */ | ||
566 | if (restart_syscall && !pt_regs_is_syscall(regs)) | ||
567 | restart_syscall = 0; | ||
568 | |||
569 | if (signr > 0) { | ||
570 | if (restart_syscall) | ||
571 | syscall_restart(orig_i0, regs, &ka.sa); | ||
572 | handle_signal(signr, &ka, &info, oldset, regs); | ||
573 | |||
574 | /* A signal was successfully delivered; the saved | ||
575 | * sigmask will have been stored in the signal frame, | ||
576 | * and will be restored by sigreturn, so we can simply | ||
577 | * clear the TS_RESTORE_SIGMASK flag. | ||
578 | */ | ||
579 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; | ||
580 | |||
581 | tracehook_signal_handler(signr, &info, &ka, regs, 0); | ||
582 | return; | ||
583 | } | ||
584 | if (restart_syscall && | ||
585 | (regs->u_regs[UREG_I0] == ERESTARTNOHAND || | ||
586 | regs->u_regs[UREG_I0] == ERESTARTSYS || | ||
587 | regs->u_regs[UREG_I0] == ERESTARTNOINTR)) { | ||
588 | /* replay the system call when we are done */ | ||
589 | regs->u_regs[UREG_I0] = orig_i0; | ||
590 | regs->tpc -= 4; | ||
591 | regs->tnpc -= 4; | ||
592 | } | ||
593 | if (restart_syscall && | ||
594 | regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { | ||
595 | regs->u_regs[UREG_G1] = __NR_restart_syscall; | ||
596 | regs->tpc -= 4; | ||
597 | regs->tnpc -= 4; | ||
598 | } | ||
599 | |||
600 | /* If there's no signal to deliver, we just put the saved sigmask | ||
601 | * back | ||
602 | */ | ||
603 | if (current_thread_info()->status & TS_RESTORE_SIGMASK) { | ||
604 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; | ||
605 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | ||
606 | } | ||
607 | } | ||
608 | |||
609 | void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags) | ||
610 | { | ||
611 | if (thread_info_flags & _TIF_SIGPENDING) | ||
612 | do_signal(regs, orig_i0); | ||
613 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | ||
614 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
615 | tracehook_notify_resume(regs); | ||
616 | } | ||
617 | } | ||
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c new file mode 100644 index 000000000000..b5225c81556c --- /dev/null +++ b/arch/sparc/kernel/smp_64.c | |||
@@ -0,0 +1,1412 @@ | |||
1 | /* smp.c: Sparc64 SMP support. | ||
2 | * | ||
3 | * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #include <linux/module.h> | ||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/sched.h> | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/pagemap.h> | ||
11 | #include <linux/threads.h> | ||
12 | #include <linux/smp.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/kernel_stat.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | #include <linux/fs.h> | ||
19 | #include <linux/seq_file.h> | ||
20 | #include <linux/cache.h> | ||
21 | #include <linux/jiffies.h> | ||
22 | #include <linux/profile.h> | ||
23 | #include <linux/lmb.h> | ||
24 | #include <linux/cpu.h> | ||
25 | |||
26 | #include <asm/head.h> | ||
27 | #include <asm/ptrace.h> | ||
28 | #include <asm/atomic.h> | ||
29 | #include <asm/tlbflush.h> | ||
30 | #include <asm/mmu_context.h> | ||
31 | #include <asm/cpudata.h> | ||
32 | #include <asm/hvtramp.h> | ||
33 | #include <asm/io.h> | ||
34 | #include <asm/timer.h> | ||
35 | |||
36 | #include <asm/irq.h> | ||
37 | #include <asm/irq_regs.h> | ||
38 | #include <asm/page.h> | ||
39 | #include <asm/pgtable.h> | ||
40 | #include <asm/oplib.h> | ||
41 | #include <asm/uaccess.h> | ||
42 | #include <asm/starfire.h> | ||
43 | #include <asm/tlb.h> | ||
44 | #include <asm/sections.h> | ||
45 | #include <asm/prom.h> | ||
46 | #include <asm/mdesc.h> | ||
47 | #include <asm/ldc.h> | ||
48 | #include <asm/hypervisor.h> | ||
49 | |||
50 | int sparc64_multi_core __read_mostly; | ||
51 | |||
52 | cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE; | ||
53 | cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; | ||
54 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; | ||
55 | cpumask_t cpu_core_map[NR_CPUS] __read_mostly = | ||
56 | { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; | ||
57 | |||
58 | EXPORT_SYMBOL(cpu_possible_map); | ||
59 | EXPORT_SYMBOL(cpu_online_map); | ||
60 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); | ||
61 | EXPORT_SYMBOL(cpu_core_map); | ||
62 | |||
63 | static cpumask_t smp_commenced_mask; | ||
64 | |||
65 | void smp_info(struct seq_file *m) | ||
66 | { | ||
67 | int i; | ||
68 | |||
69 | seq_printf(m, "State:\n"); | ||
70 | for_each_online_cpu(i) | ||
71 | seq_printf(m, "CPU%d:\t\tonline\n", i); | ||
72 | } | ||
73 | |||
74 | void smp_bogo(struct seq_file *m) | ||
75 | { | ||
76 | int i; | ||
77 | |||
78 | for_each_online_cpu(i) | ||
79 | seq_printf(m, | ||
80 | "Cpu%dClkTck\t: %016lx\n", | ||
81 | i, cpu_data(i).clock_tick); | ||
82 | } | ||
83 | |||
84 | extern void setup_sparc64_timer(void); | ||
85 | |||
86 | static volatile unsigned long callin_flag = 0; | ||
87 | |||
88 | void __cpuinit smp_callin(void) | ||
89 | { | ||
90 | int cpuid = hard_smp_processor_id(); | ||
91 | |||
92 | __local_per_cpu_offset = __per_cpu_offset(cpuid); | ||
93 | |||
94 | if (tlb_type == hypervisor) | ||
95 | sun4v_ktsb_register(); | ||
96 | |||
97 | __flush_tlb_all(); | ||
98 | |||
99 | setup_sparc64_timer(); | ||
100 | |||
101 | if (cheetah_pcache_forced_on) | ||
102 | cheetah_enable_pcache(); | ||
103 | |||
104 | local_irq_enable(); | ||
105 | |||
106 | callin_flag = 1; | ||
107 | __asm__ __volatile__("membar #Sync\n\t" | ||
108 | "flush %%g6" : : : "memory"); | ||
109 | |||
110 | /* Clear this or we will die instantly when we | ||
111 | * schedule back to this idler... | ||
112 | */ | ||
113 | current_thread_info()->new_child = 0; | ||
114 | |||
115 | /* Attach to the address space of init_task. */ | ||
116 | atomic_inc(&init_mm.mm_count); | ||
117 | current->active_mm = &init_mm; | ||
118 | |||
119 | /* inform the notifiers about the new cpu */ | ||
120 | notify_cpu_starting(cpuid); | ||
121 | |||
122 | while (!cpu_isset(cpuid, smp_commenced_mask)) | ||
123 | rmb(); | ||
124 | |||
125 | ipi_call_lock(); | ||
126 | cpu_set(cpuid, cpu_online_map); | ||
127 | ipi_call_unlock(); | ||
128 | |||
129 | /* idle thread is expected to have preempt disabled */ | ||
130 | preempt_disable(); | ||
131 | } | ||
132 | |||
133 | void cpu_panic(void) | ||
134 | { | ||
135 | printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id()); | ||
136 | panic("SMP bolixed\n"); | ||
137 | } | ||
138 | |||
139 | /* This tick register synchronization scheme is taken entirely from | ||
140 | * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. | ||
141 | * | ||
142 | * The only change I've made is to rework it so that the master | ||
143 | * initiates the synchonization instead of the slave. -DaveM | ||
144 | */ | ||
145 | |||
146 | #define MASTER 0 | ||
147 | #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long)) | ||
148 | |||
149 | #define NUM_ROUNDS 64 /* magic value */ | ||
150 | #define NUM_ITERS 5 /* likewise */ | ||
151 | |||
152 | static DEFINE_SPINLOCK(itc_sync_lock); | ||
153 | static unsigned long go[SLAVE + 1]; | ||
154 | |||
155 | #define DEBUG_TICK_SYNC 0 | ||
156 | |||
157 | static inline long get_delta (long *rt, long *master) | ||
158 | { | ||
159 | unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0; | ||
160 | unsigned long tcenter, t0, t1, tm; | ||
161 | unsigned long i; | ||
162 | |||
163 | for (i = 0; i < NUM_ITERS; i++) { | ||
164 | t0 = tick_ops->get_tick(); | ||
165 | go[MASTER] = 1; | ||
166 | membar_safe("#StoreLoad"); | ||
167 | while (!(tm = go[SLAVE])) | ||
168 | rmb(); | ||
169 | go[SLAVE] = 0; | ||
170 | wmb(); | ||
171 | t1 = tick_ops->get_tick(); | ||
172 | |||
173 | if (t1 - t0 < best_t1 - best_t0) | ||
174 | best_t0 = t0, best_t1 = t1, best_tm = tm; | ||
175 | } | ||
176 | |||
177 | *rt = best_t1 - best_t0; | ||
178 | *master = best_tm - best_t0; | ||
179 | |||
180 | /* average best_t0 and best_t1 without overflow: */ | ||
181 | tcenter = (best_t0/2 + best_t1/2); | ||
182 | if (best_t0 % 2 + best_t1 % 2 == 2) | ||
183 | tcenter++; | ||
184 | return tcenter - best_tm; | ||
185 | } | ||
186 | |||
187 | void smp_synchronize_tick_client(void) | ||
188 | { | ||
189 | long i, delta, adj, adjust_latency = 0, done = 0; | ||
190 | unsigned long flags, rt, master_time_stamp, bound; | ||
191 | #if DEBUG_TICK_SYNC | ||
192 | struct { | ||
193 | long rt; /* roundtrip time */ | ||
194 | long master; /* master's timestamp */ | ||
195 | long diff; /* difference between midpoint and master's timestamp */ | ||
196 | long lat; /* estimate of itc adjustment latency */ | ||
197 | } t[NUM_ROUNDS]; | ||
198 | #endif | ||
199 | |||
200 | go[MASTER] = 1; | ||
201 | |||
202 | while (go[MASTER]) | ||
203 | rmb(); | ||
204 | |||
205 | local_irq_save(flags); | ||
206 | { | ||
207 | for (i = 0; i < NUM_ROUNDS; i++) { | ||
208 | delta = get_delta(&rt, &master_time_stamp); | ||
209 | if (delta == 0) { | ||
210 | done = 1; /* let's lock on to this... */ | ||
211 | bound = rt; | ||
212 | } | ||
213 | |||
214 | if (!done) { | ||
215 | if (i > 0) { | ||
216 | adjust_latency += -delta; | ||
217 | adj = -delta + adjust_latency/4; | ||
218 | } else | ||
219 | adj = -delta; | ||
220 | |||
221 | tick_ops->add_tick(adj); | ||
222 | } | ||
223 | #if DEBUG_TICK_SYNC | ||
224 | t[i].rt = rt; | ||
225 | t[i].master = master_time_stamp; | ||
226 | t[i].diff = delta; | ||
227 | t[i].lat = adjust_latency/4; | ||
228 | #endif | ||
229 | } | ||
230 | } | ||
231 | local_irq_restore(flags); | ||
232 | |||
233 | #if DEBUG_TICK_SYNC | ||
234 | for (i = 0; i < NUM_ROUNDS; i++) | ||
235 | printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n", | ||
236 | t[i].rt, t[i].master, t[i].diff, t[i].lat); | ||
237 | #endif | ||
238 | |||
239 | printk(KERN_INFO "CPU %d: synchronized TICK with master CPU " | ||
240 | "(last diff %ld cycles, maxerr %lu cycles)\n", | ||
241 | smp_processor_id(), delta, rt); | ||
242 | } | ||
243 | |||
244 | static void smp_start_sync_tick_client(int cpu); | ||
245 | |||
246 | static void smp_synchronize_one_tick(int cpu) | ||
247 | { | ||
248 | unsigned long flags, i; | ||
249 | |||
250 | go[MASTER] = 0; | ||
251 | |||
252 | smp_start_sync_tick_client(cpu); | ||
253 | |||
254 | /* wait for client to be ready */ | ||
255 | while (!go[MASTER]) | ||
256 | rmb(); | ||
257 | |||
258 | /* now let the client proceed into his loop */ | ||
259 | go[MASTER] = 0; | ||
260 | membar_safe("#StoreLoad"); | ||
261 | |||
262 | spin_lock_irqsave(&itc_sync_lock, flags); | ||
263 | { | ||
264 | for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { | ||
265 | while (!go[MASTER]) | ||
266 | rmb(); | ||
267 | go[MASTER] = 0; | ||
268 | wmb(); | ||
269 | go[SLAVE] = tick_ops->get_tick(); | ||
270 | membar_safe("#StoreLoad"); | ||
271 | } | ||
272 | } | ||
273 | spin_unlock_irqrestore(&itc_sync_lock, flags); | ||
274 | } | ||
275 | |||
276 | #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) | ||
277 | /* XXX Put this in some common place. XXX */ | ||
278 | static unsigned long kimage_addr_to_ra(void *p) | ||
279 | { | ||
280 | unsigned long val = (unsigned long) p; | ||
281 | |||
282 | return kern_base + (val - KERNBASE); | ||
283 | } | ||
284 | |||
285 | static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg) | ||
286 | { | ||
287 | extern unsigned long sparc64_ttable_tl0; | ||
288 | extern unsigned long kern_locked_tte_data; | ||
289 | struct hvtramp_descr *hdesc; | ||
290 | unsigned long trampoline_ra; | ||
291 | struct trap_per_cpu *tb; | ||
292 | u64 tte_vaddr, tte_data; | ||
293 | unsigned long hv_err; | ||
294 | int i; | ||
295 | |||
296 | hdesc = kzalloc(sizeof(*hdesc) + | ||
297 | (sizeof(struct hvtramp_mapping) * | ||
298 | num_kernel_image_mappings - 1), | ||
299 | GFP_KERNEL); | ||
300 | if (!hdesc) { | ||
301 | printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate " | ||
302 | "hvtramp_descr.\n"); | ||
303 | return; | ||
304 | } | ||
305 | |||
306 | hdesc->cpu = cpu; | ||
307 | hdesc->num_mappings = num_kernel_image_mappings; | ||
308 | |||
309 | tb = &trap_block[cpu]; | ||
310 | tb->hdesc = hdesc; | ||
311 | |||
312 | hdesc->fault_info_va = (unsigned long) &tb->fault_info; | ||
313 | hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info); | ||
314 | |||
315 | hdesc->thread_reg = thread_reg; | ||
316 | |||
317 | tte_vaddr = (unsigned long) KERNBASE; | ||
318 | tte_data = kern_locked_tte_data; | ||
319 | |||
320 | for (i = 0; i < hdesc->num_mappings; i++) { | ||
321 | hdesc->maps[i].vaddr = tte_vaddr; | ||
322 | hdesc->maps[i].tte = tte_data; | ||
323 | tte_vaddr += 0x400000; | ||
324 | tte_data += 0x400000; | ||
325 | } | ||
326 | |||
327 | trampoline_ra = kimage_addr_to_ra(hv_cpu_startup); | ||
328 | |||
329 | hv_err = sun4v_cpu_start(cpu, trampoline_ra, | ||
330 | kimage_addr_to_ra(&sparc64_ttable_tl0), | ||
331 | __pa(hdesc)); | ||
332 | if (hv_err) | ||
333 | printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() " | ||
334 | "gives error %lu\n", hv_err); | ||
335 | } | ||
336 | #endif | ||
337 | |||
338 | extern unsigned long sparc64_cpu_startup; | ||
339 | |||
340 | /* The OBP cpu startup callback truncates the 3rd arg cookie to | ||
341 | * 32-bits (I think) so to be safe we have it read the pointer | ||
342 | * contained here so we work on >4GB machines. -DaveM | ||
343 | */ | ||
344 | static struct thread_info *cpu_new_thread = NULL; | ||
345 | |||
346 | static int __cpuinit smp_boot_one_cpu(unsigned int cpu) | ||
347 | { | ||
348 | struct trap_per_cpu *tb = &trap_block[cpu]; | ||
349 | unsigned long entry = | ||
350 | (unsigned long)(&sparc64_cpu_startup); | ||
351 | unsigned long cookie = | ||
352 | (unsigned long)(&cpu_new_thread); | ||
353 | struct task_struct *p; | ||
354 | int timeout, ret; | ||
355 | |||
356 | p = fork_idle(cpu); | ||
357 | if (IS_ERR(p)) | ||
358 | return PTR_ERR(p); | ||
359 | callin_flag = 0; | ||
360 | cpu_new_thread = task_thread_info(p); | ||
361 | |||
362 | if (tlb_type == hypervisor) { | ||
363 | #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) | ||
364 | if (ldom_domaining_enabled) | ||
365 | ldom_startcpu_cpuid(cpu, | ||
366 | (unsigned long) cpu_new_thread); | ||
367 | else | ||
368 | #endif | ||
369 | prom_startcpu_cpuid(cpu, entry, cookie); | ||
370 | } else { | ||
371 | struct device_node *dp = of_find_node_by_cpuid(cpu); | ||
372 | |||
373 | prom_startcpu(dp->node, entry, cookie); | ||
374 | } | ||
375 | |||
376 | for (timeout = 0; timeout < 50000; timeout++) { | ||
377 | if (callin_flag) | ||
378 | break; | ||
379 | udelay(100); | ||
380 | } | ||
381 | |||
382 | if (callin_flag) { | ||
383 | ret = 0; | ||
384 | } else { | ||
385 | printk("Processor %d is stuck.\n", cpu); | ||
386 | ret = -ENODEV; | ||
387 | } | ||
388 | cpu_new_thread = NULL; | ||
389 | |||
390 | if (tb->hdesc) { | ||
391 | kfree(tb->hdesc); | ||
392 | tb->hdesc = NULL; | ||
393 | } | ||
394 | |||
395 | return ret; | ||
396 | } | ||
397 | |||
398 | static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu) | ||
399 | { | ||
400 | u64 result, target; | ||
401 | int stuck, tmp; | ||
402 | |||
403 | if (this_is_starfire) { | ||
404 | /* map to real upaid */ | ||
405 | cpu = (((cpu & 0x3c) << 1) | | ||
406 | ((cpu & 0x40) >> 4) | | ||
407 | (cpu & 0x3)); | ||
408 | } | ||
409 | |||
410 | target = (cpu << 14) | 0x70; | ||
411 | again: | ||
412 | /* Ok, this is the real Spitfire Errata #54. | ||
413 | * One must read back from a UDB internal register | ||
414 | * after writes to the UDB interrupt dispatch, but | ||
415 | * before the membar Sync for that write. | ||
416 | * So we use the high UDB control register (ASI 0x7f, | ||
417 | * ADDR 0x20) for the dummy read. -DaveM | ||
418 | */ | ||
419 | tmp = 0x40; | ||
420 | __asm__ __volatile__( | ||
421 | "wrpr %1, %2, %%pstate\n\t" | ||
422 | "stxa %4, [%0] %3\n\t" | ||
423 | "stxa %5, [%0+%8] %3\n\t" | ||
424 | "add %0, %8, %0\n\t" | ||
425 | "stxa %6, [%0+%8] %3\n\t" | ||
426 | "membar #Sync\n\t" | ||
427 | "stxa %%g0, [%7] %3\n\t" | ||
428 | "membar #Sync\n\t" | ||
429 | "mov 0x20, %%g1\n\t" | ||
430 | "ldxa [%%g1] 0x7f, %%g0\n\t" | ||
431 | "membar #Sync" | ||
432 | : "=r" (tmp) | ||
433 | : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W), | ||
434 | "r" (data0), "r" (data1), "r" (data2), "r" (target), | ||
435 | "r" (0x10), "0" (tmp) | ||
436 | : "g1"); | ||
437 | |||
438 | /* NOTE: PSTATE_IE is still clear. */ | ||
439 | stuck = 100000; | ||
440 | do { | ||
441 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | ||
442 | : "=r" (result) | ||
443 | : "i" (ASI_INTR_DISPATCH_STAT)); | ||
444 | if (result == 0) { | ||
445 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | ||
446 | : : "r" (pstate)); | ||
447 | return; | ||
448 | } | ||
449 | stuck -= 1; | ||
450 | if (stuck == 0) | ||
451 | break; | ||
452 | } while (result & 0x1); | ||
453 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | ||
454 | : : "r" (pstate)); | ||
455 | if (stuck == 0) { | ||
456 | printk("CPU[%d]: mondo stuckage result[%016lx]\n", | ||
457 | smp_processor_id(), result); | ||
458 | } else { | ||
459 | udelay(2); | ||
460 | goto again; | ||
461 | } | ||
462 | } | ||
463 | |||
464 | static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt) | ||
465 | { | ||
466 | u64 *mondo, data0, data1, data2; | ||
467 | u16 *cpu_list; | ||
468 | u64 pstate; | ||
469 | int i; | ||
470 | |||
471 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); | ||
472 | cpu_list = __va(tb->cpu_list_pa); | ||
473 | mondo = __va(tb->cpu_mondo_block_pa); | ||
474 | data0 = mondo[0]; | ||
475 | data1 = mondo[1]; | ||
476 | data2 = mondo[2]; | ||
477 | for (i = 0; i < cnt; i++) | ||
478 | spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]); | ||
479 | } | ||
480 | |||
481 | /* Cheetah now allows to send the whole 64-bytes of data in the interrupt | ||
482 | * packet, but we have no use for that. However we do take advantage of | ||
483 | * the new pipelining feature (ie. dispatch to multiple cpus simultaneously). | ||
484 | */ | ||
485 | static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt) | ||
486 | { | ||
487 | int nack_busy_id, is_jbus, need_more; | ||
488 | u64 *mondo, pstate, ver, busy_mask; | ||
489 | u16 *cpu_list; | ||
490 | |||
491 | cpu_list = __va(tb->cpu_list_pa); | ||
492 | mondo = __va(tb->cpu_mondo_block_pa); | ||
493 | |||
494 | /* Unfortunately, someone at Sun had the brilliant idea to make the | ||
495 | * busy/nack fields hard-coded by ITID number for this Ultra-III | ||
496 | * derivative processor. | ||
497 | */ | ||
498 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | ||
499 | is_jbus = ((ver >> 32) == __JALAPENO_ID || | ||
500 | (ver >> 32) == __SERRANO_ID); | ||
501 | |||
502 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); | ||
503 | |||
504 | retry: | ||
505 | need_more = 0; | ||
506 | __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t" | ||
507 | : : "r" (pstate), "i" (PSTATE_IE)); | ||
508 | |||
509 | /* Setup the dispatch data registers. */ | ||
510 | __asm__ __volatile__("stxa %0, [%3] %6\n\t" | ||
511 | "stxa %1, [%4] %6\n\t" | ||
512 | "stxa %2, [%5] %6\n\t" | ||
513 | "membar #Sync\n\t" | ||
514 | : /* no outputs */ | ||
515 | : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]), | ||
516 | "r" (0x40), "r" (0x50), "r" (0x60), | ||
517 | "i" (ASI_INTR_W)); | ||
518 | |||
519 | nack_busy_id = 0; | ||
520 | busy_mask = 0; | ||
521 | { | ||
522 | int i; | ||
523 | |||
524 | for (i = 0; i < cnt; i++) { | ||
525 | u64 target, nr; | ||
526 | |||
527 | nr = cpu_list[i]; | ||
528 | if (nr == 0xffff) | ||
529 | continue; | ||
530 | |||
531 | target = (nr << 14) | 0x70; | ||
532 | if (is_jbus) { | ||
533 | busy_mask |= (0x1UL << (nr * 2)); | ||
534 | } else { | ||
535 | target |= (nack_busy_id << 24); | ||
536 | busy_mask |= (0x1UL << | ||
537 | (nack_busy_id * 2)); | ||
538 | } | ||
539 | __asm__ __volatile__( | ||
540 | "stxa %%g0, [%0] %1\n\t" | ||
541 | "membar #Sync\n\t" | ||
542 | : /* no outputs */ | ||
543 | : "r" (target), "i" (ASI_INTR_W)); | ||
544 | nack_busy_id++; | ||
545 | if (nack_busy_id == 32) { | ||
546 | need_more = 1; | ||
547 | break; | ||
548 | } | ||
549 | } | ||
550 | } | ||
551 | |||
552 | /* Now, poll for completion. */ | ||
553 | { | ||
554 | u64 dispatch_stat, nack_mask; | ||
555 | long stuck; | ||
556 | |||
557 | stuck = 100000 * nack_busy_id; | ||
558 | nack_mask = busy_mask << 1; | ||
559 | do { | ||
560 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | ||
561 | : "=r" (dispatch_stat) | ||
562 | : "i" (ASI_INTR_DISPATCH_STAT)); | ||
563 | if (!(dispatch_stat & (busy_mask | nack_mask))) { | ||
564 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | ||
565 | : : "r" (pstate)); | ||
566 | if (unlikely(need_more)) { | ||
567 | int i, this_cnt = 0; | ||
568 | for (i = 0; i < cnt; i++) { | ||
569 | if (cpu_list[i] == 0xffff) | ||
570 | continue; | ||
571 | cpu_list[i] = 0xffff; | ||
572 | this_cnt++; | ||
573 | if (this_cnt == 32) | ||
574 | break; | ||
575 | } | ||
576 | goto retry; | ||
577 | } | ||
578 | return; | ||
579 | } | ||
580 | if (!--stuck) | ||
581 | break; | ||
582 | } while (dispatch_stat & busy_mask); | ||
583 | |||
584 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | ||
585 | : : "r" (pstate)); | ||
586 | |||
587 | if (dispatch_stat & busy_mask) { | ||
588 | /* Busy bits will not clear, continue instead | ||
589 | * of freezing up on this cpu. | ||
590 | */ | ||
591 | printk("CPU[%d]: mondo stuckage result[%016lx]\n", | ||
592 | smp_processor_id(), dispatch_stat); | ||
593 | } else { | ||
594 | int i, this_busy_nack = 0; | ||
595 | |||
596 | /* Delay some random time with interrupts enabled | ||
597 | * to prevent deadlock. | ||
598 | */ | ||
599 | udelay(2 * nack_busy_id); | ||
600 | |||
601 | /* Clear out the mask bits for cpus which did not | ||
602 | * NACK us. | ||
603 | */ | ||
604 | for (i = 0; i < cnt; i++) { | ||
605 | u64 check_mask, nr; | ||
606 | |||
607 | nr = cpu_list[i]; | ||
608 | if (nr == 0xffff) | ||
609 | continue; | ||
610 | |||
611 | if (is_jbus) | ||
612 | check_mask = (0x2UL << (2*nr)); | ||
613 | else | ||
614 | check_mask = (0x2UL << | ||
615 | this_busy_nack); | ||
616 | if ((dispatch_stat & check_mask) == 0) | ||
617 | cpu_list[i] = 0xffff; | ||
618 | this_busy_nack += 2; | ||
619 | if (this_busy_nack == 64) | ||
620 | break; | ||
621 | } | ||
622 | |||
623 | goto retry; | ||
624 | } | ||
625 | } | ||
626 | } | ||
627 | |||
628 | /* Multi-cpu list version. */ | ||
629 | static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt) | ||
630 | { | ||
631 | int retries, this_cpu, prev_sent, i, saw_cpu_error; | ||
632 | unsigned long status; | ||
633 | u16 *cpu_list; | ||
634 | |||
635 | this_cpu = smp_processor_id(); | ||
636 | |||
637 | cpu_list = __va(tb->cpu_list_pa); | ||
638 | |||
639 | saw_cpu_error = 0; | ||
640 | retries = 0; | ||
641 | prev_sent = 0; | ||
642 | do { | ||
643 | int forward_progress, n_sent; | ||
644 | |||
645 | status = sun4v_cpu_mondo_send(cnt, | ||
646 | tb->cpu_list_pa, | ||
647 | tb->cpu_mondo_block_pa); | ||
648 | |||
649 | /* HV_EOK means all cpus received the xcall, we're done. */ | ||
650 | if (likely(status == HV_EOK)) | ||
651 | break; | ||
652 | |||
653 | /* First, see if we made any forward progress. | ||
654 | * | ||
655 | * The hypervisor indicates successful sends by setting | ||
656 | * cpu list entries to the value 0xffff. | ||
657 | */ | ||
658 | n_sent = 0; | ||
659 | for (i = 0; i < cnt; i++) { | ||
660 | if (likely(cpu_list[i] == 0xffff)) | ||
661 | n_sent++; | ||
662 | } | ||
663 | |||
664 | forward_progress = 0; | ||
665 | if (n_sent > prev_sent) | ||
666 | forward_progress = 1; | ||
667 | |||
668 | prev_sent = n_sent; | ||
669 | |||
670 | /* If we get a HV_ECPUERROR, then one or more of the cpus | ||
671 | * in the list are in error state. Use the cpu_state() | ||
672 | * hypervisor call to find out which cpus are in error state. | ||
673 | */ | ||
674 | if (unlikely(status == HV_ECPUERROR)) { | ||
675 | for (i = 0; i < cnt; i++) { | ||
676 | long err; | ||
677 | u16 cpu; | ||
678 | |||
679 | cpu = cpu_list[i]; | ||
680 | if (cpu == 0xffff) | ||
681 | continue; | ||
682 | |||
683 | err = sun4v_cpu_state(cpu); | ||
684 | if (err == HV_CPU_STATE_ERROR) { | ||
685 | saw_cpu_error = (cpu + 1); | ||
686 | cpu_list[i] = 0xffff; | ||
687 | } | ||
688 | } | ||
689 | } else if (unlikely(status != HV_EWOULDBLOCK)) | ||
690 | goto fatal_mondo_error; | ||
691 | |||
692 | /* Don't bother rewriting the CPU list, just leave the | ||
693 | * 0xffff and non-0xffff entries in there and the | ||
694 | * hypervisor will do the right thing. | ||
695 | * | ||
696 | * Only advance timeout state if we didn't make any | ||
697 | * forward progress. | ||
698 | */ | ||
699 | if (unlikely(!forward_progress)) { | ||
700 | if (unlikely(++retries > 10000)) | ||
701 | goto fatal_mondo_timeout; | ||
702 | |||
703 | /* Delay a little bit to let other cpus catch up | ||
704 | * on their cpu mondo queue work. | ||
705 | */ | ||
706 | udelay(2 * cnt); | ||
707 | } | ||
708 | } while (1); | ||
709 | |||
710 | if (unlikely(saw_cpu_error)) | ||
711 | goto fatal_mondo_cpu_error; | ||
712 | |||
713 | return; | ||
714 | |||
715 | fatal_mondo_cpu_error: | ||
716 | printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus " | ||
717 | "(including %d) were in error state\n", | ||
718 | this_cpu, saw_cpu_error - 1); | ||
719 | return; | ||
720 | |||
721 | fatal_mondo_timeout: | ||
722 | printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward " | ||
723 | " progress after %d retries.\n", | ||
724 | this_cpu, retries); | ||
725 | goto dump_cpu_list_and_out; | ||
726 | |||
727 | fatal_mondo_error: | ||
728 | printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n", | ||
729 | this_cpu, status); | ||
730 | printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) " | ||
731 | "mondo_block_pa(%lx)\n", | ||
732 | this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa); | ||
733 | |||
734 | dump_cpu_list_and_out: | ||
735 | printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu); | ||
736 | for (i = 0; i < cnt; i++) | ||
737 | printk("%u ", cpu_list[i]); | ||
738 | printk("]\n"); | ||
739 | } | ||
740 | |||
741 | static void (*xcall_deliver_impl)(struct trap_per_cpu *, int); | ||
742 | |||
743 | static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask) | ||
744 | { | ||
745 | struct trap_per_cpu *tb; | ||
746 | int this_cpu, i, cnt; | ||
747 | unsigned long flags; | ||
748 | u16 *cpu_list; | ||
749 | u64 *mondo; | ||
750 | |||
751 | /* We have to do this whole thing with interrupts fully disabled. | ||
752 | * Otherwise if we send an xcall from interrupt context it will | ||
753 | * corrupt both our mondo block and cpu list state. | ||
754 | * | ||
755 | * One consequence of this is that we cannot use timeout mechanisms | ||
756 | * that depend upon interrupts being delivered locally. So, for | ||
757 | * example, we cannot sample jiffies and expect it to advance. | ||
758 | * | ||
759 | * Fortunately, udelay() uses %stick/%tick so we can use that. | ||
760 | */ | ||
761 | local_irq_save(flags); | ||
762 | |||
763 | this_cpu = smp_processor_id(); | ||
764 | tb = &trap_block[this_cpu]; | ||
765 | |||
766 | mondo = __va(tb->cpu_mondo_block_pa); | ||
767 | mondo[0] = data0; | ||
768 | mondo[1] = data1; | ||
769 | mondo[2] = data2; | ||
770 | wmb(); | ||
771 | |||
772 | cpu_list = __va(tb->cpu_list_pa); | ||
773 | |||
774 | /* Setup the initial cpu list. */ | ||
775 | cnt = 0; | ||
776 | for_each_cpu_mask_nr(i, *mask) { | ||
777 | if (i == this_cpu || !cpu_online(i)) | ||
778 | continue; | ||
779 | cpu_list[cnt++] = i; | ||
780 | } | ||
781 | |||
782 | if (cnt) | ||
783 | xcall_deliver_impl(tb, cnt); | ||
784 | |||
785 | local_irq_restore(flags); | ||
786 | } | ||
787 | |||
788 | /* Send cross call to all processors mentioned in MASK_P | ||
789 | * except self. Really, there are only two cases currently, | ||
790 | * "&cpu_online_map" and "&mm->cpu_vm_mask". | ||
791 | */ | ||
792 | static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask) | ||
793 | { | ||
794 | u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff)); | ||
795 | |||
796 | xcall_deliver(data0, data1, data2, mask); | ||
797 | } | ||
798 | |||
799 | /* Send cross call to all processors except self. */ | ||
800 | static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2) | ||
801 | { | ||
802 | smp_cross_call_masked(func, ctx, data1, data2, &cpu_online_map); | ||
803 | } | ||
804 | |||
805 | extern unsigned long xcall_sync_tick; | ||
806 | |||
807 | static void smp_start_sync_tick_client(int cpu) | ||
808 | { | ||
809 | xcall_deliver((u64) &xcall_sync_tick, 0, 0, | ||
810 | &cpumask_of_cpu(cpu)); | ||
811 | } | ||
812 | |||
813 | extern unsigned long xcall_call_function; | ||
814 | |||
815 | void arch_send_call_function_ipi(cpumask_t mask) | ||
816 | { | ||
817 | xcall_deliver((u64) &xcall_call_function, 0, 0, &mask); | ||
818 | } | ||
819 | |||
820 | extern unsigned long xcall_call_function_single; | ||
821 | |||
822 | void arch_send_call_function_single_ipi(int cpu) | ||
823 | { | ||
824 | xcall_deliver((u64) &xcall_call_function_single, 0, 0, | ||
825 | &cpumask_of_cpu(cpu)); | ||
826 | } | ||
827 | |||
828 | void smp_call_function_client(int irq, struct pt_regs *regs) | ||
829 | { | ||
830 | clear_softint(1 << irq); | ||
831 | generic_smp_call_function_interrupt(); | ||
832 | } | ||
833 | |||
834 | void smp_call_function_single_client(int irq, struct pt_regs *regs) | ||
835 | { | ||
836 | clear_softint(1 << irq); | ||
837 | generic_smp_call_function_single_interrupt(); | ||
838 | } | ||
839 | |||
840 | static void tsb_sync(void *info) | ||
841 | { | ||
842 | struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()]; | ||
843 | struct mm_struct *mm = info; | ||
844 | |||
845 | /* It is not valid to test "currrent->active_mm == mm" here. | ||
846 | * | ||
847 | * The value of "current" is not changed atomically with | ||
848 | * switch_mm(). But that's OK, we just need to check the | ||
849 | * current cpu's trap block PGD physical address. | ||
850 | */ | ||
851 | if (tp->pgd_paddr == __pa(mm->pgd)) | ||
852 | tsb_context_switch(mm); | ||
853 | } | ||
854 | |||
855 | void smp_tsb_sync(struct mm_struct *mm) | ||
856 | { | ||
857 | smp_call_function_mask(mm->cpu_vm_mask, tsb_sync, mm, 1); | ||
858 | } | ||
859 | |||
860 | extern unsigned long xcall_flush_tlb_mm; | ||
861 | extern unsigned long xcall_flush_tlb_pending; | ||
862 | extern unsigned long xcall_flush_tlb_kernel_range; | ||
863 | extern unsigned long xcall_fetch_glob_regs; | ||
864 | extern unsigned long xcall_receive_signal; | ||
865 | extern unsigned long xcall_new_mmu_context_version; | ||
866 | #ifdef CONFIG_KGDB | ||
867 | extern unsigned long xcall_kgdb_capture; | ||
868 | #endif | ||
869 | |||
870 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
871 | extern unsigned long xcall_flush_dcache_page_cheetah; | ||
872 | #endif | ||
873 | extern unsigned long xcall_flush_dcache_page_spitfire; | ||
874 | |||
875 | #ifdef CONFIG_DEBUG_DCFLUSH | ||
876 | extern atomic_t dcpage_flushes; | ||
877 | extern atomic_t dcpage_flushes_xcall; | ||
878 | #endif | ||
879 | |||
880 | static inline void __local_flush_dcache_page(struct page *page) | ||
881 | { | ||
882 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
883 | __flush_dcache_page(page_address(page), | ||
884 | ((tlb_type == spitfire) && | ||
885 | page_mapping(page) != NULL)); | ||
886 | #else | ||
887 | if (page_mapping(page) != NULL && | ||
888 | tlb_type == spitfire) | ||
889 | __flush_icache_page(__pa(page_address(page))); | ||
890 | #endif | ||
891 | } | ||
892 | |||
893 | void smp_flush_dcache_page_impl(struct page *page, int cpu) | ||
894 | { | ||
895 | int this_cpu; | ||
896 | |||
897 | if (tlb_type == hypervisor) | ||
898 | return; | ||
899 | |||
900 | #ifdef CONFIG_DEBUG_DCFLUSH | ||
901 | atomic_inc(&dcpage_flushes); | ||
902 | #endif | ||
903 | |||
904 | this_cpu = get_cpu(); | ||
905 | |||
906 | if (cpu == this_cpu) { | ||
907 | __local_flush_dcache_page(page); | ||
908 | } else if (cpu_online(cpu)) { | ||
909 | void *pg_addr = page_address(page); | ||
910 | u64 data0 = 0; | ||
911 | |||
912 | if (tlb_type == spitfire) { | ||
913 | data0 = ((u64)&xcall_flush_dcache_page_spitfire); | ||
914 | if (page_mapping(page) != NULL) | ||
915 | data0 |= ((u64)1 << 32); | ||
916 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
917 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
918 | data0 = ((u64)&xcall_flush_dcache_page_cheetah); | ||
919 | #endif | ||
920 | } | ||
921 | if (data0) { | ||
922 | xcall_deliver(data0, __pa(pg_addr), | ||
923 | (u64) pg_addr, &cpumask_of_cpu(cpu)); | ||
924 | #ifdef CONFIG_DEBUG_DCFLUSH | ||
925 | atomic_inc(&dcpage_flushes_xcall); | ||
926 | #endif | ||
927 | } | ||
928 | } | ||
929 | |||
930 | put_cpu(); | ||
931 | } | ||
932 | |||
933 | void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | ||
934 | { | ||
935 | void *pg_addr; | ||
936 | int this_cpu; | ||
937 | u64 data0; | ||
938 | |||
939 | if (tlb_type == hypervisor) | ||
940 | return; | ||
941 | |||
942 | this_cpu = get_cpu(); | ||
943 | |||
944 | #ifdef CONFIG_DEBUG_DCFLUSH | ||
945 | atomic_inc(&dcpage_flushes); | ||
946 | #endif | ||
947 | data0 = 0; | ||
948 | pg_addr = page_address(page); | ||
949 | if (tlb_type == spitfire) { | ||
950 | data0 = ((u64)&xcall_flush_dcache_page_spitfire); | ||
951 | if (page_mapping(page) != NULL) | ||
952 | data0 |= ((u64)1 << 32); | ||
953 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
954 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
955 | data0 = ((u64)&xcall_flush_dcache_page_cheetah); | ||
956 | #endif | ||
957 | } | ||
958 | if (data0) { | ||
959 | xcall_deliver(data0, __pa(pg_addr), | ||
960 | (u64) pg_addr, &cpu_online_map); | ||
961 | #ifdef CONFIG_DEBUG_DCFLUSH | ||
962 | atomic_inc(&dcpage_flushes_xcall); | ||
963 | #endif | ||
964 | } | ||
965 | __local_flush_dcache_page(page); | ||
966 | |||
967 | put_cpu(); | ||
968 | } | ||
969 | |||
970 | void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) | ||
971 | { | ||
972 | struct mm_struct *mm; | ||
973 | unsigned long flags; | ||
974 | |||
975 | clear_softint(1 << irq); | ||
976 | |||
977 | /* See if we need to allocate a new TLB context because | ||
978 | * the version of the one we are using is now out of date. | ||
979 | */ | ||
980 | mm = current->active_mm; | ||
981 | if (unlikely(!mm || (mm == &init_mm))) | ||
982 | return; | ||
983 | |||
984 | spin_lock_irqsave(&mm->context.lock, flags); | ||
985 | |||
986 | if (unlikely(!CTX_VALID(mm->context))) | ||
987 | get_new_mmu_context(mm); | ||
988 | |||
989 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
990 | |||
991 | load_secondary_context(mm); | ||
992 | __flush_tlb_mm(CTX_HWBITS(mm->context), | ||
993 | SECONDARY_CONTEXT); | ||
994 | } | ||
995 | |||
996 | void smp_new_mmu_context_version(void) | ||
997 | { | ||
998 | smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0); | ||
999 | } | ||
1000 | |||
1001 | #ifdef CONFIG_KGDB | ||
1002 | void kgdb_roundup_cpus(unsigned long flags) | ||
1003 | { | ||
1004 | smp_cross_call(&xcall_kgdb_capture, 0, 0, 0); | ||
1005 | } | ||
1006 | #endif | ||
1007 | |||
1008 | void smp_fetch_global_regs(void) | ||
1009 | { | ||
1010 | smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0); | ||
1011 | } | ||
1012 | |||
1013 | /* We know that the window frames of the user have been flushed | ||
1014 | * to the stack before we get here because all callers of us | ||
1015 | * are flush_tlb_*() routines, and these run after flush_cache_*() | ||
1016 | * which performs the flushw. | ||
1017 | * | ||
1018 | * The SMP TLB coherency scheme we use works as follows: | ||
1019 | * | ||
1020 | * 1) mm->cpu_vm_mask is a bit mask of which cpus an address | ||
1021 | * space has (potentially) executed on, this is the heuristic | ||
1022 | * we use to avoid doing cross calls. | ||
1023 | * | ||
1024 | * Also, for flushing from kswapd and also for clones, we | ||
1025 | * use cpu_vm_mask as the list of cpus to make run the TLB. | ||
1026 | * | ||
1027 | * 2) TLB context numbers are shared globally across all processors | ||
1028 | * in the system, this allows us to play several games to avoid | ||
1029 | * cross calls. | ||
1030 | * | ||
1031 | * One invariant is that when a cpu switches to a process, and | ||
1032 | * that processes tsk->active_mm->cpu_vm_mask does not have the | ||
1033 | * current cpu's bit set, that tlb context is flushed locally. | ||
1034 | * | ||
1035 | * If the address space is non-shared (ie. mm->count == 1) we avoid | ||
1036 | * cross calls when we want to flush the currently running process's | ||
1037 | * tlb state. This is done by clearing all cpu bits except the current | ||
1038 | * processor's in current->active_mm->cpu_vm_mask and performing the | ||
1039 | * flush locally only. This will force any subsequent cpus which run | ||
1040 | * this task to flush the context from the local tlb if the process | ||
1041 | * migrates to another cpu (again). | ||
1042 | * | ||
1043 | * 3) For shared address spaces (threads) and swapping we bite the | ||
1044 | * bullet for most cases and perform the cross call (but only to | ||
1045 | * the cpus listed in cpu_vm_mask). | ||
1046 | * | ||
1047 | * The performance gain from "optimizing" away the cross call for threads is | ||
1048 | * questionable (in theory the big win for threads is the massive sharing of | ||
1049 | * address space state across processors). | ||
1050 | */ | ||
1051 | |||
1052 | /* This currently is only used by the hugetlb arch pre-fault | ||
1053 | * hook on UltraSPARC-III+ and later when changing the pagesize | ||
1054 | * bits of the context register for an address space. | ||
1055 | */ | ||
1056 | void smp_flush_tlb_mm(struct mm_struct *mm) | ||
1057 | { | ||
1058 | u32 ctx = CTX_HWBITS(mm->context); | ||
1059 | int cpu = get_cpu(); | ||
1060 | |||
1061 | if (atomic_read(&mm->mm_users) == 1) { | ||
1062 | mm->cpu_vm_mask = cpumask_of_cpu(cpu); | ||
1063 | goto local_flush_and_out; | ||
1064 | } | ||
1065 | |||
1066 | smp_cross_call_masked(&xcall_flush_tlb_mm, | ||
1067 | ctx, 0, 0, | ||
1068 | &mm->cpu_vm_mask); | ||
1069 | |||
1070 | local_flush_and_out: | ||
1071 | __flush_tlb_mm(ctx, SECONDARY_CONTEXT); | ||
1072 | |||
1073 | put_cpu(); | ||
1074 | } | ||
1075 | |||
1076 | void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) | ||
1077 | { | ||
1078 | u32 ctx = CTX_HWBITS(mm->context); | ||
1079 | int cpu = get_cpu(); | ||
1080 | |||
1081 | if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) | ||
1082 | mm->cpu_vm_mask = cpumask_of_cpu(cpu); | ||
1083 | else | ||
1084 | smp_cross_call_masked(&xcall_flush_tlb_pending, | ||
1085 | ctx, nr, (unsigned long) vaddrs, | ||
1086 | &mm->cpu_vm_mask); | ||
1087 | |||
1088 | __flush_tlb_pending(ctx, nr, vaddrs); | ||
1089 | |||
1090 | put_cpu(); | ||
1091 | } | ||
1092 | |||
1093 | void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) | ||
1094 | { | ||
1095 | start &= PAGE_MASK; | ||
1096 | end = PAGE_ALIGN(end); | ||
1097 | if (start != end) { | ||
1098 | smp_cross_call(&xcall_flush_tlb_kernel_range, | ||
1099 | 0, start, end); | ||
1100 | |||
1101 | __flush_tlb_kernel_range(start, end); | ||
1102 | } | ||
1103 | } | ||
1104 | |||
1105 | /* CPU capture. */ | ||
1106 | /* #define CAPTURE_DEBUG */ | ||
1107 | extern unsigned long xcall_capture; | ||
1108 | |||
1109 | static atomic_t smp_capture_depth = ATOMIC_INIT(0); | ||
1110 | static atomic_t smp_capture_registry = ATOMIC_INIT(0); | ||
1111 | static unsigned long penguins_are_doing_time; | ||
1112 | |||
1113 | void smp_capture(void) | ||
1114 | { | ||
1115 | int result = atomic_add_ret(1, &smp_capture_depth); | ||
1116 | |||
1117 | if (result == 1) { | ||
1118 | int ncpus = num_online_cpus(); | ||
1119 | |||
1120 | #ifdef CAPTURE_DEBUG | ||
1121 | printk("CPU[%d]: Sending penguins to jail...", | ||
1122 | smp_processor_id()); | ||
1123 | #endif | ||
1124 | penguins_are_doing_time = 1; | ||
1125 | atomic_inc(&smp_capture_registry); | ||
1126 | smp_cross_call(&xcall_capture, 0, 0, 0); | ||
1127 | while (atomic_read(&smp_capture_registry) != ncpus) | ||
1128 | rmb(); | ||
1129 | #ifdef CAPTURE_DEBUG | ||
1130 | printk("done\n"); | ||
1131 | #endif | ||
1132 | } | ||
1133 | } | ||
1134 | |||
1135 | void smp_release(void) | ||
1136 | { | ||
1137 | if (atomic_dec_and_test(&smp_capture_depth)) { | ||
1138 | #ifdef CAPTURE_DEBUG | ||
1139 | printk("CPU[%d]: Giving pardon to " | ||
1140 | "imprisoned penguins\n", | ||
1141 | smp_processor_id()); | ||
1142 | #endif | ||
1143 | penguins_are_doing_time = 0; | ||
1144 | membar_safe("#StoreLoad"); | ||
1145 | atomic_dec(&smp_capture_registry); | ||
1146 | } | ||
1147 | } | ||
1148 | |||
1149 | /* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE | ||
1150 | * set, so they can service tlb flush xcalls... | ||
1151 | */ | ||
1152 | extern void prom_world(int); | ||
1153 | |||
1154 | void smp_penguin_jailcell(int irq, struct pt_regs *regs) | ||
1155 | { | ||
1156 | clear_softint(1 << irq); | ||
1157 | |||
1158 | preempt_disable(); | ||
1159 | |||
1160 | __asm__ __volatile__("flushw"); | ||
1161 | prom_world(1); | ||
1162 | atomic_inc(&smp_capture_registry); | ||
1163 | membar_safe("#StoreLoad"); | ||
1164 | while (penguins_are_doing_time) | ||
1165 | rmb(); | ||
1166 | atomic_dec(&smp_capture_registry); | ||
1167 | prom_world(0); | ||
1168 | |||
1169 | preempt_enable(); | ||
1170 | } | ||
1171 | |||
1172 | /* /proc/profile writes can call this, don't __init it please. */ | ||
1173 | int setup_profiling_timer(unsigned int multiplier) | ||
1174 | { | ||
1175 | return -EINVAL; | ||
1176 | } | ||
1177 | |||
1178 | void __init smp_prepare_cpus(unsigned int max_cpus) | ||
1179 | { | ||
1180 | } | ||
1181 | |||
1182 | void __devinit smp_prepare_boot_cpu(void) | ||
1183 | { | ||
1184 | } | ||
1185 | |||
1186 | void __init smp_setup_processor_id(void) | ||
1187 | { | ||
1188 | if (tlb_type == spitfire) | ||
1189 | xcall_deliver_impl = spitfire_xcall_deliver; | ||
1190 | else if (tlb_type == cheetah || tlb_type == cheetah_plus) | ||
1191 | xcall_deliver_impl = cheetah_xcall_deliver; | ||
1192 | else | ||
1193 | xcall_deliver_impl = hypervisor_xcall_deliver; | ||
1194 | } | ||
1195 | |||
1196 | void __devinit smp_fill_in_sib_core_maps(void) | ||
1197 | { | ||
1198 | unsigned int i; | ||
1199 | |||
1200 | for_each_present_cpu(i) { | ||
1201 | unsigned int j; | ||
1202 | |||
1203 | cpus_clear(cpu_core_map[i]); | ||
1204 | if (cpu_data(i).core_id == 0) { | ||
1205 | cpu_set(i, cpu_core_map[i]); | ||
1206 | continue; | ||
1207 | } | ||
1208 | |||
1209 | for_each_present_cpu(j) { | ||
1210 | if (cpu_data(i).core_id == | ||
1211 | cpu_data(j).core_id) | ||
1212 | cpu_set(j, cpu_core_map[i]); | ||
1213 | } | ||
1214 | } | ||
1215 | |||
1216 | for_each_present_cpu(i) { | ||
1217 | unsigned int j; | ||
1218 | |||
1219 | cpus_clear(per_cpu(cpu_sibling_map, i)); | ||
1220 | if (cpu_data(i).proc_id == -1) { | ||
1221 | cpu_set(i, per_cpu(cpu_sibling_map, i)); | ||
1222 | continue; | ||
1223 | } | ||
1224 | |||
1225 | for_each_present_cpu(j) { | ||
1226 | if (cpu_data(i).proc_id == | ||
1227 | cpu_data(j).proc_id) | ||
1228 | cpu_set(j, per_cpu(cpu_sibling_map, i)); | ||
1229 | } | ||
1230 | } | ||
1231 | } | ||
1232 | |||
1233 | int __cpuinit __cpu_up(unsigned int cpu) | ||
1234 | { | ||
1235 | int ret = smp_boot_one_cpu(cpu); | ||
1236 | |||
1237 | if (!ret) { | ||
1238 | cpu_set(cpu, smp_commenced_mask); | ||
1239 | while (!cpu_isset(cpu, cpu_online_map)) | ||
1240 | mb(); | ||
1241 | if (!cpu_isset(cpu, cpu_online_map)) { | ||
1242 | ret = -ENODEV; | ||
1243 | } else { | ||
1244 | /* On SUN4V, writes to %tick and %stick are | ||
1245 | * not allowed. | ||
1246 | */ | ||
1247 | if (tlb_type != hypervisor) | ||
1248 | smp_synchronize_one_tick(cpu); | ||
1249 | } | ||
1250 | } | ||
1251 | return ret; | ||
1252 | } | ||
1253 | |||
1254 | #ifdef CONFIG_HOTPLUG_CPU | ||
1255 | void cpu_play_dead(void) | ||
1256 | { | ||
1257 | int cpu = smp_processor_id(); | ||
1258 | unsigned long pstate; | ||
1259 | |||
1260 | idle_task_exit(); | ||
1261 | |||
1262 | if (tlb_type == hypervisor) { | ||
1263 | struct trap_per_cpu *tb = &trap_block[cpu]; | ||
1264 | |||
1265 | sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO, | ||
1266 | tb->cpu_mondo_pa, 0); | ||
1267 | sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO, | ||
1268 | tb->dev_mondo_pa, 0); | ||
1269 | sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR, | ||
1270 | tb->resum_mondo_pa, 0); | ||
1271 | sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR, | ||
1272 | tb->nonresum_mondo_pa, 0); | ||
1273 | } | ||
1274 | |||
1275 | cpu_clear(cpu, smp_commenced_mask); | ||
1276 | membar_safe("#Sync"); | ||
1277 | |||
1278 | local_irq_disable(); | ||
1279 | |||
1280 | __asm__ __volatile__( | ||
1281 | "rdpr %%pstate, %0\n\t" | ||
1282 | "wrpr %0, %1, %%pstate" | ||
1283 | : "=r" (pstate) | ||
1284 | : "i" (PSTATE_IE)); | ||
1285 | |||
1286 | while (1) | ||
1287 | barrier(); | ||
1288 | } | ||
1289 | |||
1290 | int __cpu_disable(void) | ||
1291 | { | ||
1292 | int cpu = smp_processor_id(); | ||
1293 | cpuinfo_sparc *c; | ||
1294 | int i; | ||
1295 | |||
1296 | for_each_cpu_mask(i, cpu_core_map[cpu]) | ||
1297 | cpu_clear(cpu, cpu_core_map[i]); | ||
1298 | cpus_clear(cpu_core_map[cpu]); | ||
1299 | |||
1300 | for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) | ||
1301 | cpu_clear(cpu, per_cpu(cpu_sibling_map, i)); | ||
1302 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); | ||
1303 | |||
1304 | c = &cpu_data(cpu); | ||
1305 | |||
1306 | c->core_id = 0; | ||
1307 | c->proc_id = -1; | ||
1308 | |||
1309 | smp_wmb(); | ||
1310 | |||
1311 | /* Make sure no interrupts point to this cpu. */ | ||
1312 | fixup_irqs(); | ||
1313 | |||
1314 | local_irq_enable(); | ||
1315 | mdelay(1); | ||
1316 | local_irq_disable(); | ||
1317 | |||
1318 | ipi_call_lock(); | ||
1319 | cpu_clear(cpu, cpu_online_map); | ||
1320 | ipi_call_unlock(); | ||
1321 | |||
1322 | return 0; | ||
1323 | } | ||
1324 | |||
1325 | void __cpu_die(unsigned int cpu) | ||
1326 | { | ||
1327 | int i; | ||
1328 | |||
1329 | for (i = 0; i < 100; i++) { | ||
1330 | smp_rmb(); | ||
1331 | if (!cpu_isset(cpu, smp_commenced_mask)) | ||
1332 | break; | ||
1333 | msleep(100); | ||
1334 | } | ||
1335 | if (cpu_isset(cpu, smp_commenced_mask)) { | ||
1336 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); | ||
1337 | } else { | ||
1338 | #if defined(CONFIG_SUN_LDOMS) | ||
1339 | unsigned long hv_err; | ||
1340 | int limit = 100; | ||
1341 | |||
1342 | do { | ||
1343 | hv_err = sun4v_cpu_stop(cpu); | ||
1344 | if (hv_err == HV_EOK) { | ||
1345 | cpu_clear(cpu, cpu_present_map); | ||
1346 | break; | ||
1347 | } | ||
1348 | } while (--limit > 0); | ||
1349 | if (limit <= 0) { | ||
1350 | printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n", | ||
1351 | hv_err); | ||
1352 | } | ||
1353 | #endif | ||
1354 | } | ||
1355 | } | ||
1356 | #endif | ||
1357 | |||
1358 | void __init smp_cpus_done(unsigned int max_cpus) | ||
1359 | { | ||
1360 | } | ||
1361 | |||
1362 | void smp_send_reschedule(int cpu) | ||
1363 | { | ||
1364 | xcall_deliver((u64) &xcall_receive_signal, 0, 0, | ||
1365 | &cpumask_of_cpu(cpu)); | ||
1366 | } | ||
1367 | |||
1368 | void smp_receive_signal_client(int irq, struct pt_regs *regs) | ||
1369 | { | ||
1370 | clear_softint(1 << irq); | ||
1371 | } | ||
1372 | |||
1373 | /* This is a nop because we capture all other cpus | ||
1374 | * anyways when making the PROM active. | ||
1375 | */ | ||
1376 | void smp_send_stop(void) | ||
1377 | { | ||
1378 | } | ||
1379 | |||
1380 | unsigned long __per_cpu_base __read_mostly; | ||
1381 | unsigned long __per_cpu_shift __read_mostly; | ||
1382 | |||
1383 | EXPORT_SYMBOL(__per_cpu_base); | ||
1384 | EXPORT_SYMBOL(__per_cpu_shift); | ||
1385 | |||
1386 | void __init real_setup_per_cpu_areas(void) | ||
1387 | { | ||
1388 | unsigned long paddr, goal, size, i; | ||
1389 | char *ptr; | ||
1390 | |||
1391 | /* Copy section for each CPU (we discard the original) */ | ||
1392 | goal = PERCPU_ENOUGH_ROOM; | ||
1393 | |||
1394 | __per_cpu_shift = PAGE_SHIFT; | ||
1395 | for (size = PAGE_SIZE; size < goal; size <<= 1UL) | ||
1396 | __per_cpu_shift++; | ||
1397 | |||
1398 | paddr = lmb_alloc(size * NR_CPUS, PAGE_SIZE); | ||
1399 | if (!paddr) { | ||
1400 | prom_printf("Cannot allocate per-cpu memory.\n"); | ||
1401 | prom_halt(); | ||
1402 | } | ||
1403 | |||
1404 | ptr = __va(paddr); | ||
1405 | __per_cpu_base = ptr - __per_cpu_start; | ||
1406 | |||
1407 | for (i = 0; i < NR_CPUS; i++, ptr += size) | ||
1408 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | ||
1409 | |||
1410 | /* Setup %g5 for the boot cpu. */ | ||
1411 | __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); | ||
1412 | } | ||
diff --git a/arch/sparc/kernel/sparc_ksyms_64.c b/arch/sparc/kernel/sparc_ksyms_64.c new file mode 100644 index 000000000000..c450825b3fe5 --- /dev/null +++ b/arch/sparc/kernel/sparc_ksyms_64.c | |||
@@ -0,0 +1,289 @@ | |||
1 | /* arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support. | ||
2 | * | ||
3 | * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net) | ||
4 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) | ||
5 | * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz) | ||
6 | */ | ||
7 | |||
8 | /* Tell string.h we don't want memcpy etc. as cpp defines */ | ||
9 | #define EXPORT_SYMTAB_STROPS | ||
10 | #define PROMLIB_INTERNAL | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/types.h> | ||
14 | #include <linux/string.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/in6.h> | ||
17 | #include <linux/pci.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/fs_struct.h> | ||
20 | #include <linux/fs.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/socket.h> | ||
23 | #include <linux/syscalls.h> | ||
24 | #include <linux/percpu.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/rwsem.h> | ||
27 | #include <net/compat.h> | ||
28 | |||
29 | #include <asm/oplib.h> | ||
30 | #include <asm/system.h> | ||
31 | #include <asm/auxio.h> | ||
32 | #include <asm/pgtable.h> | ||
33 | #include <asm/io.h> | ||
34 | #include <asm/irq.h> | ||
35 | #include <asm/idprom.h> | ||
36 | #include <asm/elf.h> | ||
37 | #include <asm/head.h> | ||
38 | #include <asm/smp.h> | ||
39 | #include <asm/ptrace.h> | ||
40 | #include <asm/uaccess.h> | ||
41 | #include <asm/checksum.h> | ||
42 | #include <asm/fpumacro.h> | ||
43 | #include <asm/pgalloc.h> | ||
44 | #include <asm/cacheflush.h> | ||
45 | #ifdef CONFIG_SBUS | ||
46 | #include <asm/dma.h> | ||
47 | #endif | ||
48 | #include <asm/ns87303.h> | ||
49 | #include <asm/timer.h> | ||
50 | #include <asm/cpudata.h> | ||
51 | #include <asm/ftrace.h> | ||
52 | #include <asm/hypervisor.h> | ||
53 | |||
54 | struct poll { | ||
55 | int fd; | ||
56 | short events; | ||
57 | short revents; | ||
58 | }; | ||
59 | |||
60 | extern void die_if_kernel(char *str, struct pt_regs *regs); | ||
61 | extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); | ||
62 | extern void *__bzero(void *, size_t); | ||
63 | extern void *__memscan_zero(void *, size_t); | ||
64 | extern void *__memscan_generic(void *, int, size_t); | ||
65 | extern int __memcmp(const void *, const void *, __kernel_size_t); | ||
66 | extern __kernel_size_t strlen(const char *); | ||
67 | extern void sys_sigsuspend(void); | ||
68 | extern int compat_sys_ioctl(unsigned int fd, unsigned int cmd, u32 arg); | ||
69 | extern int (*handle_mathemu)(struct pt_regs *, struct fpustate *); | ||
70 | extern long sparc32_open(const char __user * filename, int flags, int mode); | ||
71 | extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, | ||
72 | unsigned long pfn, unsigned long size, pgprot_t prot); | ||
73 | |||
74 | extern int __ashrdi3(int, int); | ||
75 | |||
76 | extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs); | ||
77 | |||
78 | extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); | ||
79 | extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, | ||
80 | unsigned long *); | ||
81 | extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *, | ||
82 | unsigned long *, unsigned long *); | ||
83 | extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *, | ||
84 | unsigned long *, unsigned long *, unsigned long *); | ||
85 | |||
86 | extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *); | ||
87 | extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *, | ||
88 | unsigned long *); | ||
89 | extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *, | ||
90 | unsigned long *, unsigned long *); | ||
91 | extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *, | ||
92 | unsigned long *, unsigned long *, unsigned long *); | ||
93 | |||
94 | /* Per-CPU information table */ | ||
95 | EXPORT_PER_CPU_SYMBOL(__cpu_data); | ||
96 | |||
97 | /* used by various drivers */ | ||
98 | #ifdef CONFIG_SMP | ||
99 | /* Out of line rw-locking implementation. */ | ||
100 | EXPORT_SYMBOL(__read_lock); | ||
101 | EXPORT_SYMBOL(__read_unlock); | ||
102 | EXPORT_SYMBOL(__write_lock); | ||
103 | EXPORT_SYMBOL(__write_unlock); | ||
104 | EXPORT_SYMBOL(__write_trylock); | ||
105 | #endif /* CONFIG_SMP */ | ||
106 | |||
107 | #ifdef CONFIG_MCOUNT | ||
108 | EXPORT_SYMBOL(_mcount); | ||
109 | #endif | ||
110 | |||
111 | EXPORT_SYMBOL(sparc64_get_clock_tick); | ||
112 | |||
113 | /* RW semaphores */ | ||
114 | EXPORT_SYMBOL(__down_read); | ||
115 | EXPORT_SYMBOL(__down_read_trylock); | ||
116 | EXPORT_SYMBOL(__down_write); | ||
117 | EXPORT_SYMBOL(__down_write_trylock); | ||
118 | EXPORT_SYMBOL(__up_read); | ||
119 | EXPORT_SYMBOL(__up_write); | ||
120 | EXPORT_SYMBOL(__downgrade_write); | ||
121 | |||
122 | /* Atomic counter implementation. */ | ||
123 | EXPORT_SYMBOL(atomic_add); | ||
124 | EXPORT_SYMBOL(atomic_add_ret); | ||
125 | EXPORT_SYMBOL(atomic_sub); | ||
126 | EXPORT_SYMBOL(atomic_sub_ret); | ||
127 | EXPORT_SYMBOL(atomic64_add); | ||
128 | EXPORT_SYMBOL(atomic64_add_ret); | ||
129 | EXPORT_SYMBOL(atomic64_sub); | ||
130 | EXPORT_SYMBOL(atomic64_sub_ret); | ||
131 | |||
132 | /* Atomic bit operations. */ | ||
133 | EXPORT_SYMBOL(test_and_set_bit); | ||
134 | EXPORT_SYMBOL(test_and_clear_bit); | ||
135 | EXPORT_SYMBOL(test_and_change_bit); | ||
136 | EXPORT_SYMBOL(set_bit); | ||
137 | EXPORT_SYMBOL(clear_bit); | ||
138 | EXPORT_SYMBOL(change_bit); | ||
139 | |||
140 | EXPORT_SYMBOL(__flushw_user); | ||
141 | |||
142 | EXPORT_SYMBOL(tlb_type); | ||
143 | EXPORT_SYMBOL(sun4v_chip_type); | ||
144 | EXPORT_SYMBOL(get_fb_unmapped_area); | ||
145 | EXPORT_SYMBOL(flush_icache_range); | ||
146 | |||
147 | EXPORT_SYMBOL(flush_dcache_page); | ||
148 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
149 | EXPORT_SYMBOL(__flush_dcache_range); | ||
150 | #endif | ||
151 | |||
152 | EXPORT_SYMBOL(sun4v_niagara_getperf); | ||
153 | EXPORT_SYMBOL(sun4v_niagara_setperf); | ||
154 | EXPORT_SYMBOL(sun4v_niagara2_getperf); | ||
155 | EXPORT_SYMBOL(sun4v_niagara2_setperf); | ||
156 | |||
157 | #ifdef CONFIG_SUN_AUXIO | ||
158 | EXPORT_SYMBOL(auxio_set_led); | ||
159 | EXPORT_SYMBOL(auxio_set_lte); | ||
160 | #endif | ||
161 | #ifdef CONFIG_SBUS | ||
162 | EXPORT_SYMBOL(sbus_set_sbus64); | ||
163 | #endif | ||
164 | EXPORT_SYMBOL(outsb); | ||
165 | EXPORT_SYMBOL(outsw); | ||
166 | EXPORT_SYMBOL(outsl); | ||
167 | EXPORT_SYMBOL(insb); | ||
168 | EXPORT_SYMBOL(insw); | ||
169 | EXPORT_SYMBOL(insl); | ||
170 | #ifdef CONFIG_PCI | ||
171 | EXPORT_SYMBOL(pci_alloc_consistent); | ||
172 | EXPORT_SYMBOL(pci_free_consistent); | ||
173 | EXPORT_SYMBOL(pci_map_single); | ||
174 | EXPORT_SYMBOL(pci_unmap_single); | ||
175 | EXPORT_SYMBOL(pci_map_sg); | ||
176 | EXPORT_SYMBOL(pci_unmap_sg); | ||
177 | EXPORT_SYMBOL(pci_dma_sync_single_for_cpu); | ||
178 | EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu); | ||
179 | EXPORT_SYMBOL(pci_dma_supported); | ||
180 | #endif | ||
181 | |||
182 | /* I/O device mmaping on Sparc64. */ | ||
183 | EXPORT_SYMBOL(io_remap_pfn_range); | ||
184 | |||
185 | EXPORT_SYMBOL(dump_fpu); | ||
186 | EXPORT_SYMBOL(put_fs_struct); | ||
187 | |||
188 | /* math-emu wants this */ | ||
189 | EXPORT_SYMBOL(die_if_kernel); | ||
190 | |||
191 | /* Kernel thread creation. */ | ||
192 | EXPORT_SYMBOL(kernel_thread); | ||
193 | |||
194 | /* prom symbols */ | ||
195 | EXPORT_SYMBOL(idprom); | ||
196 | EXPORT_SYMBOL(prom_root_node); | ||
197 | EXPORT_SYMBOL(prom_getchild); | ||
198 | EXPORT_SYMBOL(prom_getsibling); | ||
199 | EXPORT_SYMBOL(prom_searchsiblings); | ||
200 | EXPORT_SYMBOL(prom_firstprop); | ||
201 | EXPORT_SYMBOL(prom_nextprop); | ||
202 | EXPORT_SYMBOL(prom_getproplen); | ||
203 | EXPORT_SYMBOL(prom_getproperty); | ||
204 | EXPORT_SYMBOL(prom_node_has_property); | ||
205 | EXPORT_SYMBOL(prom_setprop); | ||
206 | EXPORT_SYMBOL(saved_command_line); | ||
207 | EXPORT_SYMBOL(prom_finddevice); | ||
208 | EXPORT_SYMBOL(prom_feval); | ||
209 | EXPORT_SYMBOL(prom_getbool); | ||
210 | EXPORT_SYMBOL(prom_getstring); | ||
211 | EXPORT_SYMBOL(prom_getint); | ||
212 | EXPORT_SYMBOL(prom_getintdefault); | ||
213 | EXPORT_SYMBOL(__prom_getchild); | ||
214 | EXPORT_SYMBOL(__prom_getsibling); | ||
215 | |||
216 | /* sparc library symbols */ | ||
217 | EXPORT_SYMBOL(strlen); | ||
218 | EXPORT_SYMBOL(__strlen_user); | ||
219 | EXPORT_SYMBOL(__strnlen_user); | ||
220 | |||
221 | /* Special internal versions of library functions. */ | ||
222 | EXPORT_SYMBOL(_clear_page); | ||
223 | EXPORT_SYMBOL(clear_user_page); | ||
224 | EXPORT_SYMBOL(copy_user_page); | ||
225 | EXPORT_SYMBOL(__bzero); | ||
226 | EXPORT_SYMBOL(__memscan_zero); | ||
227 | EXPORT_SYMBOL(__memscan_generic); | ||
228 | EXPORT_SYMBOL(__memcmp); | ||
229 | EXPORT_SYMBOL(__memset); | ||
230 | |||
231 | EXPORT_SYMBOL(csum_partial); | ||
232 | EXPORT_SYMBOL(csum_partial_copy_nocheck); | ||
233 | EXPORT_SYMBOL(__csum_partial_copy_from_user); | ||
234 | EXPORT_SYMBOL(__csum_partial_copy_to_user); | ||
235 | EXPORT_SYMBOL(ip_fast_csum); | ||
236 | |||
237 | /* Moving data to/from/in userspace. */ | ||
238 | EXPORT_SYMBOL(___copy_to_user); | ||
239 | EXPORT_SYMBOL(___copy_from_user); | ||
240 | EXPORT_SYMBOL(___copy_in_user); | ||
241 | EXPORT_SYMBOL(copy_to_user_fixup); | ||
242 | EXPORT_SYMBOL(copy_from_user_fixup); | ||
243 | EXPORT_SYMBOL(copy_in_user_fixup); | ||
244 | EXPORT_SYMBOL(__strncpy_from_user); | ||
245 | EXPORT_SYMBOL(__clear_user); | ||
246 | |||
247 | /* Various address conversion macros use this. */ | ||
248 | EXPORT_SYMBOL(sparc64_valid_addr_bitmap); | ||
249 | |||
250 | /* No version information on this, heavily used in inline asm, | ||
251 | * and will always be 'void __ret_efault(void)'. | ||
252 | */ | ||
253 | EXPORT_SYMBOL(__ret_efault); | ||
254 | |||
255 | /* No version information on these, as gcc produces such symbols. */ | ||
256 | EXPORT_SYMBOL(memcmp); | ||
257 | EXPORT_SYMBOL(memcpy); | ||
258 | EXPORT_SYMBOL(memset); | ||
259 | EXPORT_SYMBOL(memmove); | ||
260 | EXPORT_SYMBOL(strncmp); | ||
261 | |||
262 | void VISenter(void); | ||
263 | /* RAID code needs this */ | ||
264 | EXPORT_SYMBOL(VISenter); | ||
265 | |||
266 | /* for input/keybdev */ | ||
267 | EXPORT_SYMBOL(sun_do_break); | ||
268 | EXPORT_SYMBOL(stop_a_enabled); | ||
269 | |||
270 | #ifdef CONFIG_DEBUG_BUGVERBOSE | ||
271 | EXPORT_SYMBOL(do_BUG); | ||
272 | #endif | ||
273 | |||
274 | /* for ns8703 */ | ||
275 | EXPORT_SYMBOL(ns87303_lock); | ||
276 | |||
277 | EXPORT_SYMBOL(tick_ops); | ||
278 | |||
279 | EXPORT_SYMBOL(xor_vis_2); | ||
280 | EXPORT_SYMBOL(xor_vis_3); | ||
281 | EXPORT_SYMBOL(xor_vis_4); | ||
282 | EXPORT_SYMBOL(xor_vis_5); | ||
283 | |||
284 | EXPORT_SYMBOL(xor_niagara_2); | ||
285 | EXPORT_SYMBOL(xor_niagara_3); | ||
286 | EXPORT_SYMBOL(xor_niagara_4); | ||
287 | EXPORT_SYMBOL(xor_niagara_5); | ||
288 | |||
289 | EXPORT_SYMBOL_GPL(real_hard_smp_processor_id); | ||
diff --git a/arch/sparc/kernel/spiterrs.S b/arch/sparc/kernel/spiterrs.S new file mode 100644 index 000000000000..c357e40ffd01 --- /dev/null +++ b/arch/sparc/kernel/spiterrs.S | |||
@@ -0,0 +1,245 @@ | |||
1 | /* We need to carefully read the error status, ACK the errors, | ||
2 | * prevent recursive traps, and pass the information on to C | ||
3 | * code for logging. | ||
4 | * | ||
5 | * We pass the AFAR in as-is, and we encode the status | ||
6 | * information as described in asm-sparc64/sfafsr.h | ||
7 | */ | ||
8 | .type __spitfire_access_error,#function | ||
9 | __spitfire_access_error: | ||
10 | /* Disable ESTATE error reporting so that we do not take | ||
11 | * recursive traps and RED state the processor. | ||
12 | */ | ||
13 | stxa %g0, [%g0] ASI_ESTATE_ERROR_EN | ||
14 | membar #Sync | ||
15 | |||
16 | mov UDBE_UE, %g1 | ||
17 | ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR | ||
18 | |||
19 | /* __spitfire_cee_trap branches here with AFSR in %g4 and | ||
20 | * UDBE_CE in %g1. It only clears ESTATE_ERR_CE in the ESTATE | ||
21 | * Error Enable register. | ||
22 | */ | ||
23 | __spitfire_cee_trap_continue: | ||
24 | ldxa [%g0] ASI_AFAR, %g5 ! Get AFAR | ||
25 | |||
26 | rdpr %tt, %g3 | ||
27 | and %g3, 0x1ff, %g3 ! Paranoia | ||
28 | sllx %g3, SFSTAT_TRAP_TYPE_SHIFT, %g3 | ||
29 | or %g4, %g3, %g4 | ||
30 | rdpr %tl, %g3 | ||
31 | cmp %g3, 1 | ||
32 | mov 1, %g3 | ||
33 | bleu %xcc, 1f | ||
34 | sllx %g3, SFSTAT_TL_GT_ONE_SHIFT, %g3 | ||
35 | |||
36 | or %g4, %g3, %g4 | ||
37 | |||
38 | /* Read in the UDB error register state, clearing the sticky | ||
39 | * error bits as-needed. We only clear them if the UE bit is | ||
40 | * set. Likewise, __spitfire_cee_trap below will only do so | ||
41 | * if the CE bit is set. | ||
42 | * | ||
43 | * NOTE: UltraSparc-I/II have high and low UDB error | ||
44 | * registers, corresponding to the two UDB units | ||
45 | * present on those chips. UltraSparc-IIi only | ||
46 | * has a single UDB, called "SDB" in the manual. | ||
47 | * For IIi the upper UDB register always reads | ||
48 | * as zero so for our purposes things will just | ||
49 | * work with the checks below. | ||
50 | */ | ||
51 | 1: ldxa [%g0] ASI_UDBH_ERROR_R, %g3 | ||
52 | and %g3, 0x3ff, %g7 ! Paranoia | ||
53 | sllx %g7, SFSTAT_UDBH_SHIFT, %g7 | ||
54 | or %g4, %g7, %g4 | ||
55 | andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE | ||
56 | be,pn %xcc, 1f | ||
57 | nop | ||
58 | stxa %g3, [%g0] ASI_UDB_ERROR_W | ||
59 | membar #Sync | ||
60 | |||
61 | 1: mov 0x18, %g3 | ||
62 | ldxa [%g3] ASI_UDBL_ERROR_R, %g3 | ||
63 | and %g3, 0x3ff, %g7 ! Paranoia | ||
64 | sllx %g7, SFSTAT_UDBL_SHIFT, %g7 | ||
65 | or %g4, %g7, %g4 | ||
66 | andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE | ||
67 | be,pn %xcc, 1f | ||
68 | nop | ||
69 | mov 0x18, %g7 | ||
70 | stxa %g3, [%g7] ASI_UDB_ERROR_W | ||
71 | membar #Sync | ||
72 | |||
73 | 1: /* Ok, now that we've latched the error state, clear the | ||
74 | * sticky bits in the AFSR. | ||
75 | */ | ||
76 | stxa %g4, [%g0] ASI_AFSR | ||
77 | membar #Sync | ||
78 | |||
79 | rdpr %tl, %g2 | ||
80 | cmp %g2, 1 | ||
81 | rdpr %pil, %g2 | ||
82 | bleu,pt %xcc, 1f | ||
83 | wrpr %g0, PIL_NORMAL_MAX, %pil | ||
84 | |||
85 | ba,pt %xcc, etraptl1 | ||
86 | rd %pc, %g7 | ||
87 | |||
88 | ba,pt %xcc, 2f | ||
89 | nop | ||
90 | |||
91 | 1: ba,pt %xcc, etrap_irq | ||
92 | rd %pc, %g7 | ||
93 | |||
94 | 2: | ||
95 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
96 | call trace_hardirqs_off | ||
97 | nop | ||
98 | #endif | ||
99 | mov %l4, %o1 | ||
100 | mov %l5, %o2 | ||
101 | call spitfire_access_error | ||
102 | add %sp, PTREGS_OFF, %o0 | ||
103 | ba,pt %xcc, rtrap | ||
104 | nop | ||
105 | .size __spitfire_access_error,.-__spitfire_access_error | ||
106 | |||
107 | /* This is the trap handler entry point for ECC correctable | ||
108 | * errors. They are corrected, but we listen for the trap so | ||
109 | * that the event can be logged. | ||
110 | * | ||
111 | * Disrupting errors are either: | ||
112 | * 1) single-bit ECC errors during UDB reads to system | ||
113 | * memory | ||
114 | * 2) data parity errors during write-back events | ||
115 | * | ||
116 | * As far as I can make out from the manual, the CEE trap is | ||
117 | * only for correctable errors during memory read accesses by | ||
118 | * the front-end of the processor. | ||
119 | * | ||
120 | * The code below is only for trap level 1 CEE events, as it | ||
121 | * is the only situation where we can safely record and log. | ||
122 | * For trap level >1 we just clear the CE bit in the AFSR and | ||
123 | * return. | ||
124 | * | ||
125 | * This is just like __spiftire_access_error above, but it | ||
126 | * specifically handles correctable errors. If an | ||
127 | * uncorrectable error is indicated in the AFSR we will branch | ||
128 | * directly above to __spitfire_access_error to handle it | ||
129 | * instead. Uncorrectable therefore takes priority over | ||
130 | * correctable, and the error logging C code will notice this | ||
131 | * case by inspecting the trap type. | ||
132 | */ | ||
133 | .type __spitfire_cee_trap,#function | ||
134 | __spitfire_cee_trap: | ||
135 | ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR | ||
136 | mov 1, %g3 | ||
137 | sllx %g3, SFAFSR_UE_SHIFT, %g3 | ||
138 | andcc %g4, %g3, %g0 ! Check for UE | ||
139 | bne,pn %xcc, __spitfire_access_error | ||
140 | nop | ||
141 | |||
142 | /* Ok, in this case we only have a correctable error. | ||
143 | * Indicate we only wish to capture that state in register | ||
144 | * %g1, and we only disable CE error reporting unlike UE | ||
145 | * handling which disables all errors. | ||
146 | */ | ||
147 | ldxa [%g0] ASI_ESTATE_ERROR_EN, %g3 | ||
148 | andn %g3, ESTATE_ERR_CE, %g3 | ||
149 | stxa %g3, [%g0] ASI_ESTATE_ERROR_EN | ||
150 | membar #Sync | ||
151 | |||
152 | /* Preserve AFSR in %g4, indicate UDB state to capture in %g1 */ | ||
153 | ba,pt %xcc, __spitfire_cee_trap_continue | ||
154 | mov UDBE_CE, %g1 | ||
155 | .size __spitfire_cee_trap,.-__spitfire_cee_trap | ||
156 | |||
157 | .type __spitfire_data_access_exception_tl1,#function | ||
158 | __spitfire_data_access_exception_tl1: | ||
159 | rdpr %pstate, %g4 | ||
160 | wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate | ||
161 | mov TLB_SFSR, %g3 | ||
162 | mov DMMU_SFAR, %g5 | ||
163 | ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR | ||
164 | ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR | ||
165 | stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit | ||
166 | membar #Sync | ||
167 | rdpr %tt, %g3 | ||
168 | cmp %g3, 0x80 ! first win spill/fill trap | ||
169 | blu,pn %xcc, 1f | ||
170 | cmp %g3, 0xff ! last win spill/fill trap | ||
171 | bgu,pn %xcc, 1f | ||
172 | nop | ||
173 | ba,pt %xcc, winfix_dax | ||
174 | rdpr %tpc, %g3 | ||
175 | 1: sethi %hi(109f), %g7 | ||
176 | ba,pt %xcc, etraptl1 | ||
177 | 109: or %g7, %lo(109b), %g7 | ||
178 | mov %l4, %o1 | ||
179 | mov %l5, %o2 | ||
180 | call spitfire_data_access_exception_tl1 | ||
181 | add %sp, PTREGS_OFF, %o0 | ||
182 | ba,pt %xcc, rtrap | ||
183 | nop | ||
184 | .size __spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1 | ||
185 | |||
186 | .type __spitfire_data_access_exception,#function | ||
187 | __spitfire_data_access_exception: | ||
188 | rdpr %pstate, %g4 | ||
189 | wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate | ||
190 | mov TLB_SFSR, %g3 | ||
191 | mov DMMU_SFAR, %g5 | ||
192 | ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR | ||
193 | ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR | ||
194 | stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit | ||
195 | membar #Sync | ||
196 | sethi %hi(109f), %g7 | ||
197 | ba,pt %xcc, etrap | ||
198 | 109: or %g7, %lo(109b), %g7 | ||
199 | mov %l4, %o1 | ||
200 | mov %l5, %o2 | ||
201 | call spitfire_data_access_exception | ||
202 | add %sp, PTREGS_OFF, %o0 | ||
203 | ba,pt %xcc, rtrap | ||
204 | nop | ||
205 | .size __spitfire_data_access_exception,.-__spitfire_data_access_exception | ||
206 | |||
207 | .type __spitfire_insn_access_exception_tl1,#function | ||
208 | __spitfire_insn_access_exception_tl1: | ||
209 | rdpr %pstate, %g4 | ||
210 | wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate | ||
211 | mov TLB_SFSR, %g3 | ||
212 | ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR | ||
213 | rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC | ||
214 | stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit | ||
215 | membar #Sync | ||
216 | sethi %hi(109f), %g7 | ||
217 | ba,pt %xcc, etraptl1 | ||
218 | 109: or %g7, %lo(109b), %g7 | ||
219 | mov %l4, %o1 | ||
220 | mov %l5, %o2 | ||
221 | call spitfire_insn_access_exception_tl1 | ||
222 | add %sp, PTREGS_OFF, %o0 | ||
223 | ba,pt %xcc, rtrap | ||
224 | nop | ||
225 | .size __spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1 | ||
226 | |||
227 | .type __spitfire_insn_access_exception,#function | ||
228 | __spitfire_insn_access_exception: | ||
229 | rdpr %pstate, %g4 | ||
230 | wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate | ||
231 | mov TLB_SFSR, %g3 | ||
232 | ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR | ||
233 | rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC | ||
234 | stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit | ||
235 | membar #Sync | ||
236 | sethi %hi(109f), %g7 | ||
237 | ba,pt %xcc, etrap | ||
238 | 109: or %g7, %lo(109b), %g7 | ||
239 | mov %l4, %o1 | ||
240 | mov %l5, %o2 | ||
241 | call spitfire_insn_access_exception | ||
242 | add %sp, PTREGS_OFF, %o0 | ||
243 | ba,pt %xcc, rtrap | ||
244 | nop | ||
245 | .size __spitfire_insn_access_exception,.-__spitfire_insn_access_exception | ||
diff --git a/arch/sparc/kernel/sstate.c b/arch/sparc/kernel/sstate.c new file mode 100644 index 000000000000..8cdbe5946b43 --- /dev/null +++ b/arch/sparc/kernel/sstate.c | |||
@@ -0,0 +1,127 @@ | |||
1 | /* sstate.c: System soft state support. | ||
2 | * | ||
3 | * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/notifier.h> | ||
8 | #include <linux/reboot.h> | ||
9 | #include <linux/init.h> | ||
10 | |||
11 | #include <asm/hypervisor.h> | ||
12 | #include <asm/spitfire.h> | ||
13 | #include <asm/oplib.h> | ||
14 | #include <asm/head.h> | ||
15 | #include <asm/io.h> | ||
16 | |||
17 | static int hv_supports_soft_state; | ||
18 | |||
19 | static unsigned long kimage_addr_to_ra(const char *p) | ||
20 | { | ||
21 | unsigned long val = (unsigned long) p; | ||
22 | |||
23 | return kern_base + (val - KERNBASE); | ||
24 | } | ||
25 | |||
26 | static void do_set_sstate(unsigned long state, const char *msg) | ||
27 | { | ||
28 | unsigned long err; | ||
29 | |||
30 | if (!hv_supports_soft_state) | ||
31 | return; | ||
32 | |||
33 | err = sun4v_mach_set_soft_state(state, kimage_addr_to_ra(msg)); | ||
34 | if (err) { | ||
35 | printk(KERN_WARNING "SSTATE: Failed to set soft-state to " | ||
36 | "state[%lx] msg[%s], err=%lu\n", | ||
37 | state, msg, err); | ||
38 | } | ||
39 | } | ||
40 | |||
41 | static const char booting_msg[32] __attribute__((aligned(32))) = | ||
42 | "Linux booting"; | ||
43 | static const char running_msg[32] __attribute__((aligned(32))) = | ||
44 | "Linux running"; | ||
45 | static const char halting_msg[32] __attribute__((aligned(32))) = | ||
46 | "Linux halting"; | ||
47 | static const char poweroff_msg[32] __attribute__((aligned(32))) = | ||
48 | "Linux powering off"; | ||
49 | static const char rebooting_msg[32] __attribute__((aligned(32))) = | ||
50 | "Linux rebooting"; | ||
51 | static const char panicing_msg[32] __attribute__((aligned(32))) = | ||
52 | "Linux panicing"; | ||
53 | |||
54 | static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused) | ||
55 | { | ||
56 | const char *msg; | ||
57 | |||
58 | switch (type) { | ||
59 | case SYS_DOWN: | ||
60 | default: | ||
61 | msg = rebooting_msg; | ||
62 | break; | ||
63 | |||
64 | case SYS_HALT: | ||
65 | msg = halting_msg; | ||
66 | break; | ||
67 | |||
68 | case SYS_POWER_OFF: | ||
69 | msg = poweroff_msg; | ||
70 | break; | ||
71 | } | ||
72 | |||
73 | do_set_sstate(HV_SOFT_STATE_TRANSITION, msg); | ||
74 | |||
75 | return NOTIFY_OK; | ||
76 | } | ||
77 | |||
78 | static struct notifier_block sstate_reboot_notifier = { | ||
79 | .notifier_call = sstate_reboot_call, | ||
80 | }; | ||
81 | |||
82 | static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr) | ||
83 | { | ||
84 | do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg); | ||
85 | |||
86 | return NOTIFY_DONE; | ||
87 | } | ||
88 | |||
89 | static struct notifier_block sstate_panic_block = { | ||
90 | .notifier_call = sstate_panic_event, | ||
91 | .priority = INT_MAX, | ||
92 | }; | ||
93 | |||
94 | static int __init sstate_init(void) | ||
95 | { | ||
96 | unsigned long major, minor; | ||
97 | |||
98 | if (tlb_type != hypervisor) | ||
99 | return 0; | ||
100 | |||
101 | major = 1; | ||
102 | minor = 0; | ||
103 | if (sun4v_hvapi_register(HV_GRP_SOFT_STATE, major, &minor)) | ||
104 | return 0; | ||
105 | |||
106 | hv_supports_soft_state = 1; | ||
107 | |||
108 | prom_sun4v_guest_soft_state(); | ||
109 | |||
110 | do_set_sstate(HV_SOFT_STATE_TRANSITION, booting_msg); | ||
111 | |||
112 | atomic_notifier_chain_register(&panic_notifier_list, | ||
113 | &sstate_panic_block); | ||
114 | register_reboot_notifier(&sstate_reboot_notifier); | ||
115 | |||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | core_initcall(sstate_init); | ||
120 | |||
121 | static int __init sstate_running(void) | ||
122 | { | ||
123 | do_set_sstate(HV_SOFT_STATE_NORMAL, running_msg); | ||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | late_initcall(sstate_running); | ||
diff --git a/arch/sparc/kernel/stacktrace.c b/arch/sparc/kernel/stacktrace.c new file mode 100644 index 000000000000..acb12f673757 --- /dev/null +++ b/arch/sparc/kernel/stacktrace.c | |||
@@ -0,0 +1,64 @@ | |||
1 | #include <linux/sched.h> | ||
2 | #include <linux/stacktrace.h> | ||
3 | #include <linux/thread_info.h> | ||
4 | #include <linux/module.h> | ||
5 | #include <asm/ptrace.h> | ||
6 | #include <asm/stacktrace.h> | ||
7 | |||
8 | #include "kstack.h" | ||
9 | |||
10 | static void __save_stack_trace(struct thread_info *tp, | ||
11 | struct stack_trace *trace, | ||
12 | bool skip_sched) | ||
13 | { | ||
14 | unsigned long ksp, fp; | ||
15 | |||
16 | if (tp == current_thread_info()) { | ||
17 | stack_trace_flush(); | ||
18 | __asm__ __volatile__("mov %%fp, %0" : "=r" (ksp)); | ||
19 | } else { | ||
20 | ksp = tp->ksp; | ||
21 | } | ||
22 | |||
23 | fp = ksp + STACK_BIAS; | ||
24 | do { | ||
25 | struct sparc_stackf *sf; | ||
26 | struct pt_regs *regs; | ||
27 | unsigned long pc; | ||
28 | |||
29 | if (!kstack_valid(tp, fp)) | ||
30 | break; | ||
31 | |||
32 | sf = (struct sparc_stackf *) fp; | ||
33 | regs = (struct pt_regs *) (sf + 1); | ||
34 | |||
35 | if (kstack_is_trap_frame(tp, regs)) { | ||
36 | if (!(regs->tstate & TSTATE_PRIV)) | ||
37 | break; | ||
38 | pc = regs->tpc; | ||
39 | fp = regs->u_regs[UREG_I6] + STACK_BIAS; | ||
40 | } else { | ||
41 | pc = sf->callers_pc; | ||
42 | fp = (unsigned long)sf->fp + STACK_BIAS; | ||
43 | } | ||
44 | |||
45 | if (trace->skip > 0) | ||
46 | trace->skip--; | ||
47 | else if (!skip_sched || !in_sched_functions(pc)) | ||
48 | trace->entries[trace->nr_entries++] = pc; | ||
49 | } while (trace->nr_entries < trace->max_entries); | ||
50 | } | ||
51 | |||
52 | void save_stack_trace(struct stack_trace *trace) | ||
53 | { | ||
54 | __save_stack_trace(current_thread_info(), trace, false); | ||
55 | } | ||
56 | EXPORT_SYMBOL_GPL(save_stack_trace); | ||
57 | |||
58 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | ||
59 | { | ||
60 | struct thread_info *tp = task_thread_info(tsk); | ||
61 | |||
62 | __save_stack_trace(tp, trace, true); | ||
63 | } | ||
64 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); | ||
diff --git a/arch/sparc/kernel/starfire.c b/arch/sparc/kernel/starfire.c new file mode 100644 index 000000000000..060d0f3a6151 --- /dev/null +++ b/arch/sparc/kernel/starfire.c | |||
@@ -0,0 +1,116 @@ | |||
1 | /* | ||
2 | * starfire.c: Starfire/E10000 support. | ||
3 | * | ||
4 | * Copyright (C) 1998 David S. Miller (davem@redhat.com) | ||
5 | * Copyright (C) 2000 Anton Blanchard (anton@samba.org) | ||
6 | */ | ||
7 | |||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/slab.h> | ||
10 | |||
11 | #include <asm/page.h> | ||
12 | #include <asm/oplib.h> | ||
13 | #include <asm/smp.h> | ||
14 | #include <asm/upa.h> | ||
15 | #include <asm/starfire.h> | ||
16 | |||
17 | /* | ||
18 | * A few places around the kernel check this to see if | ||
19 | * they need to call us to do things in a Starfire specific | ||
20 | * way. | ||
21 | */ | ||
22 | int this_is_starfire = 0; | ||
23 | |||
24 | void check_if_starfire(void) | ||
25 | { | ||
26 | int ssnode = prom_finddevice("/ssp-serial"); | ||
27 | if (ssnode != 0 && ssnode != -1) | ||
28 | this_is_starfire = 1; | ||
29 | } | ||
30 | |||
31 | int starfire_hard_smp_processor_id(void) | ||
32 | { | ||
33 | return upa_readl(0x1fff40000d0UL); | ||
34 | } | ||
35 | |||
36 | /* | ||
37 | * Each Starfire board has 32 registers which perform translation | ||
38 | * and delivery of traditional interrupt packets into the extended | ||
39 | * Starfire hardware format. Essentially UPAID's now have 2 more | ||
40 | * bits than in all previous Sun5 systems. | ||
41 | */ | ||
42 | struct starfire_irqinfo { | ||
43 | unsigned long imap_slots[32]; | ||
44 | unsigned long tregs[32]; | ||
45 | struct starfire_irqinfo *next; | ||
46 | int upaid, hwmid; | ||
47 | }; | ||
48 | |||
49 | static struct starfire_irqinfo *sflist = NULL; | ||
50 | |||
51 | /* Beam me up Scott(McNeil)y... */ | ||
52 | void starfire_hookup(int upaid) | ||
53 | { | ||
54 | struct starfire_irqinfo *p; | ||
55 | unsigned long treg_base, hwmid, i; | ||
56 | |||
57 | p = kmalloc(sizeof(*p), GFP_KERNEL); | ||
58 | if (!p) { | ||
59 | prom_printf("starfire_hookup: No memory, this is insane.\n"); | ||
60 | prom_halt(); | ||
61 | } | ||
62 | treg_base = 0x100fc000000UL; | ||
63 | hwmid = ((upaid & 0x3c) << 1) | | ||
64 | ((upaid & 0x40) >> 4) | | ||
65 | (upaid & 0x3); | ||
66 | p->hwmid = hwmid; | ||
67 | treg_base += (hwmid << 33UL); | ||
68 | treg_base += 0x200UL; | ||
69 | for (i = 0; i < 32; i++) { | ||
70 | p->imap_slots[i] = 0UL; | ||
71 | p->tregs[i] = treg_base + (i * 0x10UL); | ||
72 | /* Lets play it safe and not overwrite existing mappings */ | ||
73 | if (upa_readl(p->tregs[i]) != 0) | ||
74 | p->imap_slots[i] = 0xdeadbeaf; | ||
75 | } | ||
76 | p->upaid = upaid; | ||
77 | p->next = sflist; | ||
78 | sflist = p; | ||
79 | } | ||
80 | |||
81 | unsigned int starfire_translate(unsigned long imap, | ||
82 | unsigned int upaid) | ||
83 | { | ||
84 | struct starfire_irqinfo *p; | ||
85 | unsigned int bus_hwmid; | ||
86 | unsigned int i; | ||
87 | |||
88 | bus_hwmid = (((unsigned long)imap) >> 33) & 0x7f; | ||
89 | for (p = sflist; p != NULL; p = p->next) | ||
90 | if (p->hwmid == bus_hwmid) | ||
91 | break; | ||
92 | if (p == NULL) { | ||
93 | prom_printf("XFIRE: Cannot find irqinfo for imap %016lx\n", | ||
94 | ((unsigned long)imap)); | ||
95 | prom_halt(); | ||
96 | } | ||
97 | for (i = 0; i < 32; i++) { | ||
98 | if (p->imap_slots[i] == imap || | ||
99 | p->imap_slots[i] == 0UL) | ||
100 | break; | ||
101 | } | ||
102 | if (i == 32) { | ||
103 | printk("starfire_translate: Are you kidding me?\n"); | ||
104 | panic("Lucy in the sky...."); | ||
105 | } | ||
106 | p->imap_slots[i] = imap; | ||
107 | |||
108 | /* map to real upaid */ | ||
109 | upaid = (((upaid & 0x3c) << 1) | | ||
110 | ((upaid & 0x40) >> 4) | | ||
111 | (upaid & 0x3)); | ||
112 | |||
113 | upa_writel(upaid, p->tregs[i]); | ||
114 | |||
115 | return i; | ||
116 | } | ||
diff --git a/arch/sparc/kernel/sun4v_ivec.S b/arch/sparc/kernel/sun4v_ivec.S new file mode 100644 index 000000000000..559bc5e9c199 --- /dev/null +++ b/arch/sparc/kernel/sun4v_ivec.S | |||
@@ -0,0 +1,341 @@ | |||
1 | /* sun4v_ivec.S: Sun4v interrupt vector handling. | ||
2 | * | ||
3 | * Copyright (C) 2006 <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <asm/cpudata.h> | ||
7 | #include <asm/intr_queue.h> | ||
8 | #include <asm/pil.h> | ||
9 | |||
10 | .text | ||
11 | .align 32 | ||
12 | |||
13 | sun4v_cpu_mondo: | ||
14 | /* Head offset in %g2, tail offset in %g4. | ||
15 | * If they are the same, no work. | ||
16 | */ | ||
17 | mov INTRQ_CPU_MONDO_HEAD, %g2 | ||
18 | ldxa [%g2] ASI_QUEUE, %g2 | ||
19 | mov INTRQ_CPU_MONDO_TAIL, %g4 | ||
20 | ldxa [%g4] ASI_QUEUE, %g4 | ||
21 | cmp %g2, %g4 | ||
22 | be,pn %xcc, sun4v_cpu_mondo_queue_empty | ||
23 | nop | ||
24 | |||
25 | /* Get &trap_block[smp_processor_id()] into %g4. */ | ||
26 | ldxa [%g0] ASI_SCRATCHPAD, %g4 | ||
27 | sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4 | ||
28 | |||
29 | /* Get CPU mondo queue base phys address into %g7. */ | ||
30 | ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 | ||
31 | |||
32 | /* Now get the cross-call arguments and handler PC, same | ||
33 | * layout as sun4u: | ||
34 | * | ||
35 | * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it | ||
36 | * high half is context arg to MMU flushes, into %g5 | ||
37 | * 2nd 64-bit word: 64-bit arg, load into %g1 | ||
38 | * 3rd 64-bit word: 64-bit arg, load into %g7 | ||
39 | */ | ||
40 | ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3 | ||
41 | add %g2, 0x8, %g2 | ||
42 | srlx %g3, 32, %g5 | ||
43 | ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1 | ||
44 | add %g2, 0x8, %g2 | ||
45 | srl %g3, 0, %g3 | ||
46 | ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7 | ||
47 | add %g2, 0x40 - 0x8 - 0x8, %g2 | ||
48 | |||
49 | /* Update queue head pointer. */ | ||
50 | lduw [%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4 | ||
51 | and %g2, %g4, %g2 | ||
52 | |||
53 | mov INTRQ_CPU_MONDO_HEAD, %g4 | ||
54 | stxa %g2, [%g4] ASI_QUEUE | ||
55 | membar #Sync | ||
56 | |||
57 | jmpl %g3, %g0 | ||
58 | nop | ||
59 | |||
60 | sun4v_cpu_mondo_queue_empty: | ||
61 | retry | ||
62 | |||
63 | sun4v_dev_mondo: | ||
64 | /* Head offset in %g2, tail offset in %g4. */ | ||
65 | mov INTRQ_DEVICE_MONDO_HEAD, %g2 | ||
66 | ldxa [%g2] ASI_QUEUE, %g2 | ||
67 | mov INTRQ_DEVICE_MONDO_TAIL, %g4 | ||
68 | ldxa [%g4] ASI_QUEUE, %g4 | ||
69 | cmp %g2, %g4 | ||
70 | be,pn %xcc, sun4v_dev_mondo_queue_empty | ||
71 | nop | ||
72 | |||
73 | /* Get &trap_block[smp_processor_id()] into %g4. */ | ||
74 | ldxa [%g0] ASI_SCRATCHPAD, %g4 | ||
75 | sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4 | ||
76 | |||
77 | /* Get DEV mondo queue base phys address into %g5. */ | ||
78 | ldx [%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5 | ||
79 | |||
80 | /* Load IVEC into %g3. */ | ||
81 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
82 | add %g2, 0x40, %g2 | ||
83 | |||
84 | /* XXX There can be a full 64-byte block of data here. | ||
85 | * XXX This is how we can get at MSI vector data. | ||
86 | * XXX Current we do not capture this, but when we do we'll | ||
87 | * XXX need to add a 64-byte storage area in the struct ino_bucket | ||
88 | * XXX or the struct irq_desc. | ||
89 | */ | ||
90 | |||
91 | /* Update queue head pointer, this frees up some registers. */ | ||
92 | lduw [%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4 | ||
93 | and %g2, %g4, %g2 | ||
94 | |||
95 | mov INTRQ_DEVICE_MONDO_HEAD, %g4 | ||
96 | stxa %g2, [%g4] ASI_QUEUE | ||
97 | membar #Sync | ||
98 | |||
99 | TRAP_LOAD_IRQ_WORK_PA(%g1, %g4) | ||
100 | |||
101 | /* For VIRQs, cookie is encoded as ~bucket_phys_addr */ | ||
102 | brlz,pt %g3, 1f | ||
103 | xnor %g3, %g0, %g4 | ||
104 | |||
105 | /* Get __pa(&ivector_table[IVEC]) into %g4. */ | ||
106 | sethi %hi(ivector_table_pa), %g4 | ||
107 | ldx [%g4 + %lo(ivector_table_pa)], %g4 | ||
108 | sllx %g3, 4, %g3 | ||
109 | add %g4, %g3, %g4 | ||
110 | |||
111 | 1: ldx [%g1], %g2 | ||
112 | stxa %g2, [%g4] ASI_PHYS_USE_EC | ||
113 | stx %g4, [%g1] | ||
114 | |||
115 | /* Signal the interrupt by setting (1 << pil) in %softint. */ | ||
116 | wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint | ||
117 | |||
118 | sun4v_dev_mondo_queue_empty: | ||
119 | retry | ||
120 | |||
121 | sun4v_res_mondo: | ||
122 | /* Head offset in %g2, tail offset in %g4. */ | ||
123 | mov INTRQ_RESUM_MONDO_HEAD, %g2 | ||
124 | ldxa [%g2] ASI_QUEUE, %g2 | ||
125 | mov INTRQ_RESUM_MONDO_TAIL, %g4 | ||
126 | ldxa [%g4] ASI_QUEUE, %g4 | ||
127 | cmp %g2, %g4 | ||
128 | be,pn %xcc, sun4v_res_mondo_queue_empty | ||
129 | nop | ||
130 | |||
131 | /* Get &trap_block[smp_processor_id()] into %g3. */ | ||
132 | ldxa [%g0] ASI_SCRATCHPAD, %g3 | ||
133 | sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 | ||
134 | |||
135 | /* Get RES mondo queue base phys address into %g5. */ | ||
136 | ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5 | ||
137 | |||
138 | /* Get RES kernel buffer base phys address into %g7. */ | ||
139 | ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7 | ||
140 | |||
141 | /* If the first word is non-zero, queue is full. */ | ||
142 | ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1 | ||
143 | brnz,pn %g1, sun4v_res_mondo_queue_full | ||
144 | nop | ||
145 | |||
146 | lduw [%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4 | ||
147 | |||
148 | /* Remember this entry's offset in %g1. */ | ||
149 | mov %g2, %g1 | ||
150 | |||
151 | /* Copy 64-byte queue entry into kernel buffer. */ | ||
152 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
153 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
154 | add %g2, 0x08, %g2 | ||
155 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
156 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
157 | add %g2, 0x08, %g2 | ||
158 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
159 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
160 | add %g2, 0x08, %g2 | ||
161 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
162 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
163 | add %g2, 0x08, %g2 | ||
164 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
165 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
166 | add %g2, 0x08, %g2 | ||
167 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
168 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
169 | add %g2, 0x08, %g2 | ||
170 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
171 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
172 | add %g2, 0x08, %g2 | ||
173 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
174 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
175 | add %g2, 0x08, %g2 | ||
176 | |||
177 | /* Update queue head pointer. */ | ||
178 | and %g2, %g4, %g2 | ||
179 | |||
180 | mov INTRQ_RESUM_MONDO_HEAD, %g4 | ||
181 | stxa %g2, [%g4] ASI_QUEUE | ||
182 | membar #Sync | ||
183 | |||
184 | /* Disable interrupts and save register state so we can call | ||
185 | * C code. The etrap handling will leave %g4 in %l4 for us | ||
186 | * when it's done. | ||
187 | */ | ||
188 | rdpr %pil, %g2 | ||
189 | wrpr %g0, PIL_NORMAL_MAX, %pil | ||
190 | mov %g1, %g4 | ||
191 | ba,pt %xcc, etrap_irq | ||
192 | rd %pc, %g7 | ||
193 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
194 | call trace_hardirqs_off | ||
195 | nop | ||
196 | #endif | ||
197 | /* Log the event. */ | ||
198 | add %sp, PTREGS_OFF, %o0 | ||
199 | call sun4v_resum_error | ||
200 | mov %l4, %o1 | ||
201 | |||
202 | /* Return from trap. */ | ||
203 | ba,pt %xcc, rtrap_irq | ||
204 | nop | ||
205 | |||
206 | sun4v_res_mondo_queue_empty: | ||
207 | retry | ||
208 | |||
209 | sun4v_res_mondo_queue_full: | ||
210 | /* The queue is full, consolidate our damage by setting | ||
211 | * the head equal to the tail. We'll just trap again otherwise. | ||
212 | * Call C code to log the event. | ||
213 | */ | ||
214 | mov INTRQ_RESUM_MONDO_HEAD, %g2 | ||
215 | stxa %g4, [%g2] ASI_QUEUE | ||
216 | membar #Sync | ||
217 | |||
218 | rdpr %pil, %g2 | ||
219 | wrpr %g0, PIL_NORMAL_MAX, %pil | ||
220 | ba,pt %xcc, etrap_irq | ||
221 | rd %pc, %g7 | ||
222 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
223 | call trace_hardirqs_off | ||
224 | nop | ||
225 | #endif | ||
226 | call sun4v_resum_overflow | ||
227 | add %sp, PTREGS_OFF, %o0 | ||
228 | |||
229 | ba,pt %xcc, rtrap_irq | ||
230 | nop | ||
231 | |||
232 | sun4v_nonres_mondo: | ||
233 | /* Head offset in %g2, tail offset in %g4. */ | ||
234 | mov INTRQ_NONRESUM_MONDO_HEAD, %g2 | ||
235 | ldxa [%g2] ASI_QUEUE, %g2 | ||
236 | mov INTRQ_NONRESUM_MONDO_TAIL, %g4 | ||
237 | ldxa [%g4] ASI_QUEUE, %g4 | ||
238 | cmp %g2, %g4 | ||
239 | be,pn %xcc, sun4v_nonres_mondo_queue_empty | ||
240 | nop | ||
241 | |||
242 | /* Get &trap_block[smp_processor_id()] into %g3. */ | ||
243 | ldxa [%g0] ASI_SCRATCHPAD, %g3 | ||
244 | sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 | ||
245 | |||
246 | /* Get RES mondo queue base phys address into %g5. */ | ||
247 | ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5 | ||
248 | |||
249 | /* Get RES kernel buffer base phys address into %g7. */ | ||
250 | ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7 | ||
251 | |||
252 | /* If the first word is non-zero, queue is full. */ | ||
253 | ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1 | ||
254 | brnz,pn %g1, sun4v_nonres_mondo_queue_full | ||
255 | nop | ||
256 | |||
257 | lduw [%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4 | ||
258 | |||
259 | /* Remember this entry's offset in %g1. */ | ||
260 | mov %g2, %g1 | ||
261 | |||
262 | /* Copy 64-byte queue entry into kernel buffer. */ | ||
263 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
264 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
265 | add %g2, 0x08, %g2 | ||
266 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
267 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
268 | add %g2, 0x08, %g2 | ||
269 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
270 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
271 | add %g2, 0x08, %g2 | ||
272 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
273 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
274 | add %g2, 0x08, %g2 | ||
275 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
276 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
277 | add %g2, 0x08, %g2 | ||
278 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
279 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
280 | add %g2, 0x08, %g2 | ||
281 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
282 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
283 | add %g2, 0x08, %g2 | ||
284 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | ||
285 | stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC | ||
286 | add %g2, 0x08, %g2 | ||
287 | |||
288 | /* Update queue head pointer. */ | ||
289 | and %g2, %g4, %g2 | ||
290 | |||
291 | mov INTRQ_NONRESUM_MONDO_HEAD, %g4 | ||
292 | stxa %g2, [%g4] ASI_QUEUE | ||
293 | membar #Sync | ||
294 | |||
295 | /* Disable interrupts and save register state so we can call | ||
296 | * C code. The etrap handling will leave %g4 in %l4 for us | ||
297 | * when it's done. | ||
298 | */ | ||
299 | rdpr %pil, %g2 | ||
300 | wrpr %g0, PIL_NORMAL_MAX, %pil | ||
301 | mov %g1, %g4 | ||
302 | ba,pt %xcc, etrap_irq | ||
303 | rd %pc, %g7 | ||
304 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
305 | call trace_hardirqs_off | ||
306 | nop | ||
307 | #endif | ||
308 | /* Log the event. */ | ||
309 | add %sp, PTREGS_OFF, %o0 | ||
310 | call sun4v_nonresum_error | ||
311 | mov %l4, %o1 | ||
312 | |||
313 | /* Return from trap. */ | ||
314 | ba,pt %xcc, rtrap_irq | ||
315 | nop | ||
316 | |||
317 | sun4v_nonres_mondo_queue_empty: | ||
318 | retry | ||
319 | |||
320 | sun4v_nonres_mondo_queue_full: | ||
321 | /* The queue is full, consolidate our damage by setting | ||
322 | * the head equal to the tail. We'll just trap again otherwise. | ||
323 | * Call C code to log the event. | ||
324 | */ | ||
325 | mov INTRQ_NONRESUM_MONDO_HEAD, %g2 | ||
326 | stxa %g4, [%g2] ASI_QUEUE | ||
327 | membar #Sync | ||
328 | |||
329 | rdpr %pil, %g2 | ||
330 | wrpr %g0, PIL_NORMAL_MAX, %pil | ||
331 | ba,pt %xcc, etrap_irq | ||
332 | rd %pc, %g7 | ||
333 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
334 | call trace_hardirqs_off | ||
335 | nop | ||
336 | #endif | ||
337 | call sun4v_nonresum_overflow | ||
338 | add %sp, PTREGS_OFF, %o0 | ||
339 | |||
340 | ba,pt %xcc, rtrap_irq | ||
341 | nop | ||
diff --git a/arch/sparc/kernel/sun4v_tlb_miss.S b/arch/sparc/kernel/sun4v_tlb_miss.S new file mode 100644 index 000000000000..e1fbf8c75787 --- /dev/null +++ b/arch/sparc/kernel/sun4v_tlb_miss.S | |||
@@ -0,0 +1,428 @@ | |||
1 | /* sun4v_tlb_miss.S: Sun4v TLB miss handlers. | ||
2 | * | ||
3 | * Copyright (C) 2006 <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | .text | ||
7 | .align 32 | ||
8 | |||
9 | /* Load ITLB fault information into VADDR and CTX, using BASE. */ | ||
10 | #define LOAD_ITLB_INFO(BASE, VADDR, CTX) \ | ||
11 | ldx [BASE + HV_FAULT_I_ADDR_OFFSET], VADDR; \ | ||
12 | ldx [BASE + HV_FAULT_I_CTX_OFFSET], CTX; | ||
13 | |||
14 | /* Load DTLB fault information into VADDR and CTX, using BASE. */ | ||
15 | #define LOAD_DTLB_INFO(BASE, VADDR, CTX) \ | ||
16 | ldx [BASE + HV_FAULT_D_ADDR_OFFSET], VADDR; \ | ||
17 | ldx [BASE + HV_FAULT_D_CTX_OFFSET], CTX; | ||
18 | |||
19 | /* DEST = (VADDR >> 22) | ||
20 | * | ||
21 | * Branch to ZERO_CTX_LABEL if context is zero. | ||
22 | */ | ||
23 | #define COMPUTE_TAG_TARGET(DEST, VADDR, CTX, ZERO_CTX_LABEL) \ | ||
24 | srlx VADDR, 22, DEST; \ | ||
25 | brz,pn CTX, ZERO_CTX_LABEL; \ | ||
26 | nop; | ||
27 | |||
28 | /* Create TSB pointer. This is something like: | ||
29 | * | ||
30 | * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL; | ||
31 | * tsb_base = tsb_reg & ~0x7UL; | ||
32 | * tsb_index = ((vaddr >> HASH_SHIFT) & tsb_mask); | ||
33 | * tsb_ptr = tsb_base + (tsb_index * 16); | ||
34 | */ | ||
35 | #define COMPUTE_TSB_PTR(TSB_PTR, VADDR, HASH_SHIFT, TMP1, TMP2) \ | ||
36 | and TSB_PTR, 0x7, TMP1; \ | ||
37 | mov 512, TMP2; \ | ||
38 | andn TSB_PTR, 0x7, TSB_PTR; \ | ||
39 | sllx TMP2, TMP1, TMP2; \ | ||
40 | srlx VADDR, HASH_SHIFT, TMP1; \ | ||
41 | sub TMP2, 1, TMP2; \ | ||
42 | and TMP1, TMP2, TMP1; \ | ||
43 | sllx TMP1, 4, TMP1; \ | ||
44 | add TSB_PTR, TMP1, TSB_PTR; | ||
45 | |||
46 | sun4v_itlb_miss: | ||
47 | /* Load MMU Miss base into %g2. */ | ||
48 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
49 | |||
50 | /* Load UTSB reg into %g1. */ | ||
51 | mov SCRATCHPAD_UTSBREG1, %g1 | ||
52 | ldxa [%g1] ASI_SCRATCHPAD, %g1 | ||
53 | |||
54 | LOAD_ITLB_INFO(%g2, %g4, %g5) | ||
55 | COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_itlb_4v) | ||
56 | COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g3, %g7) | ||
57 | |||
58 | /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ | ||
59 | ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2 | ||
60 | cmp %g2, %g6 | ||
61 | bne,a,pn %xcc, tsb_miss_page_table_walk | ||
62 | mov FAULT_CODE_ITLB, %g3 | ||
63 | andcc %g3, _PAGE_EXEC_4V, %g0 | ||
64 | be,a,pn %xcc, tsb_do_fault | ||
65 | mov FAULT_CODE_ITLB, %g3 | ||
66 | |||
67 | /* We have a valid entry, make hypervisor call to load | ||
68 | * I-TLB and return from trap. | ||
69 | * | ||
70 | * %g3: PTE | ||
71 | * %g4: vaddr | ||
72 | */ | ||
73 | sun4v_itlb_load: | ||
74 | ldxa [%g0] ASI_SCRATCHPAD, %g6 | ||
75 | mov %o0, %g1 ! save %o0 | ||
76 | mov %o1, %g2 ! save %o1 | ||
77 | mov %o2, %g5 ! save %o2 | ||
78 | mov %o3, %g7 ! save %o3 | ||
79 | mov %g4, %o0 ! vaddr | ||
80 | ldx [%g6 + HV_FAULT_I_CTX_OFFSET], %o1 ! ctx | ||
81 | mov %g3, %o2 ! PTE | ||
82 | mov HV_MMU_IMMU, %o3 ! flags | ||
83 | ta HV_MMU_MAP_ADDR_TRAP | ||
84 | brnz,pn %o0, sun4v_itlb_error | ||
85 | mov %g2, %o1 ! restore %o1 | ||
86 | mov %g1, %o0 ! restore %o0 | ||
87 | mov %g5, %o2 ! restore %o2 | ||
88 | mov %g7, %o3 ! restore %o3 | ||
89 | |||
90 | retry | ||
91 | |||
92 | sun4v_dtlb_miss: | ||
93 | /* Load MMU Miss base into %g2. */ | ||
94 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
95 | |||
96 | /* Load UTSB reg into %g1. */ | ||
97 | mov SCRATCHPAD_UTSBREG1, %g1 | ||
98 | ldxa [%g1] ASI_SCRATCHPAD, %g1 | ||
99 | |||
100 | LOAD_DTLB_INFO(%g2, %g4, %g5) | ||
101 | COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_dtlb_4v) | ||
102 | COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g3, %g7) | ||
103 | |||
104 | /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ | ||
105 | ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2 | ||
106 | cmp %g2, %g6 | ||
107 | bne,a,pn %xcc, tsb_miss_page_table_walk | ||
108 | mov FAULT_CODE_DTLB, %g3 | ||
109 | |||
110 | /* We have a valid entry, make hypervisor call to load | ||
111 | * D-TLB and return from trap. | ||
112 | * | ||
113 | * %g3: PTE | ||
114 | * %g4: vaddr | ||
115 | */ | ||
116 | sun4v_dtlb_load: | ||
117 | ldxa [%g0] ASI_SCRATCHPAD, %g6 | ||
118 | mov %o0, %g1 ! save %o0 | ||
119 | mov %o1, %g2 ! save %o1 | ||
120 | mov %o2, %g5 ! save %o2 | ||
121 | mov %o3, %g7 ! save %o3 | ||
122 | mov %g4, %o0 ! vaddr | ||
123 | ldx [%g6 + HV_FAULT_D_CTX_OFFSET], %o1 ! ctx | ||
124 | mov %g3, %o2 ! PTE | ||
125 | mov HV_MMU_DMMU, %o3 ! flags | ||
126 | ta HV_MMU_MAP_ADDR_TRAP | ||
127 | brnz,pn %o0, sun4v_dtlb_error | ||
128 | mov %g2, %o1 ! restore %o1 | ||
129 | mov %g1, %o0 ! restore %o0 | ||
130 | mov %g5, %o2 ! restore %o2 | ||
131 | mov %g7, %o3 ! restore %o3 | ||
132 | |||
133 | retry | ||
134 | |||
135 | sun4v_dtlb_prot: | ||
136 | SET_GL(1) | ||
137 | |||
138 | /* Load MMU Miss base into %g5. */ | ||
139 | ldxa [%g0] ASI_SCRATCHPAD, %g5 | ||
140 | |||
141 | ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5 | ||
142 | rdpr %tl, %g1 | ||
143 | cmp %g1, 1 | ||
144 | bgu,pn %xcc, winfix_trampoline | ||
145 | mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4 | ||
146 | ba,pt %xcc, sparc64_realfault_common | ||
147 | nop | ||
148 | |||
149 | /* Called from trap table: | ||
150 | * %g4: vaddr | ||
151 | * %g5: context | ||
152 | * %g6: TAG TARGET | ||
153 | */ | ||
154 | sun4v_itsb_miss: | ||
155 | mov SCRATCHPAD_UTSBREG1, %g1 | ||
156 | ldxa [%g1] ASI_SCRATCHPAD, %g1 | ||
157 | brz,pn %g5, kvmap_itlb_4v | ||
158 | mov FAULT_CODE_ITLB, %g3 | ||
159 | ba,a,pt %xcc, sun4v_tsb_miss_common | ||
160 | |||
161 | /* Called from trap table: | ||
162 | * %g4: vaddr | ||
163 | * %g5: context | ||
164 | * %g6: TAG TARGET | ||
165 | */ | ||
166 | sun4v_dtsb_miss: | ||
167 | mov SCRATCHPAD_UTSBREG1, %g1 | ||
168 | ldxa [%g1] ASI_SCRATCHPAD, %g1 | ||
169 | brz,pn %g5, kvmap_dtlb_4v | ||
170 | mov FAULT_CODE_DTLB, %g3 | ||
171 | |||
172 | /* fallthrough */ | ||
173 | |||
174 | sun4v_tsb_miss_common: | ||
175 | COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g5, %g7) | ||
176 | |||
177 | sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2 | ||
178 | |||
179 | #ifdef CONFIG_HUGETLB_PAGE | ||
180 | mov SCRATCHPAD_UTSBREG2, %g5 | ||
181 | ldxa [%g5] ASI_SCRATCHPAD, %g5 | ||
182 | cmp %g5, -1 | ||
183 | be,pt %xcc, 80f | ||
184 | nop | ||
185 | COMPUTE_TSB_PTR(%g5, %g4, HPAGE_SHIFT, %g2, %g7) | ||
186 | |||
187 | /* That clobbered %g2, reload it. */ | ||
188 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
189 | sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2 | ||
190 | |||
191 | 80: stx %g5, [%g2 + TRAP_PER_CPU_TSB_HUGE_TEMP] | ||
192 | #endif | ||
193 | |||
194 | ba,pt %xcc, tsb_miss_page_table_walk_sun4v_fastpath | ||
195 | ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7 | ||
196 | |||
197 | sun4v_itlb_error: | ||
198 | sethi %hi(sun4v_err_itlb_vaddr), %g1 | ||
199 | stx %g4, [%g1 + %lo(sun4v_err_itlb_vaddr)] | ||
200 | sethi %hi(sun4v_err_itlb_ctx), %g1 | ||
201 | ldxa [%g0] ASI_SCRATCHPAD, %g6 | ||
202 | ldx [%g6 + HV_FAULT_I_CTX_OFFSET], %o1 | ||
203 | stx %o1, [%g1 + %lo(sun4v_err_itlb_ctx)] | ||
204 | sethi %hi(sun4v_err_itlb_pte), %g1 | ||
205 | stx %g3, [%g1 + %lo(sun4v_err_itlb_pte)] | ||
206 | sethi %hi(sun4v_err_itlb_error), %g1 | ||
207 | stx %o0, [%g1 + %lo(sun4v_err_itlb_error)] | ||
208 | |||
209 | rdpr %tl, %g4 | ||
210 | cmp %g4, 1 | ||
211 | ble,pt %icc, 1f | ||
212 | sethi %hi(2f), %g7 | ||
213 | ba,pt %xcc, etraptl1 | ||
214 | or %g7, %lo(2f), %g7 | ||
215 | |||
216 | 1: ba,pt %xcc, etrap | ||
217 | 2: or %g7, %lo(2b), %g7 | ||
218 | mov %l4, %o1 | ||
219 | call sun4v_itlb_error_report | ||
220 | add %sp, PTREGS_OFF, %o0 | ||
221 | |||
222 | /* NOTREACHED */ | ||
223 | |||
224 | sun4v_dtlb_error: | ||
225 | sethi %hi(sun4v_err_dtlb_vaddr), %g1 | ||
226 | stx %g4, [%g1 + %lo(sun4v_err_dtlb_vaddr)] | ||
227 | sethi %hi(sun4v_err_dtlb_ctx), %g1 | ||
228 | ldxa [%g0] ASI_SCRATCHPAD, %g6 | ||
229 | ldx [%g6 + HV_FAULT_D_CTX_OFFSET], %o1 | ||
230 | stx %o1, [%g1 + %lo(sun4v_err_dtlb_ctx)] | ||
231 | sethi %hi(sun4v_err_dtlb_pte), %g1 | ||
232 | stx %g3, [%g1 + %lo(sun4v_err_dtlb_pte)] | ||
233 | sethi %hi(sun4v_err_dtlb_error), %g1 | ||
234 | stx %o0, [%g1 + %lo(sun4v_err_dtlb_error)] | ||
235 | |||
236 | rdpr %tl, %g4 | ||
237 | cmp %g4, 1 | ||
238 | ble,pt %icc, 1f | ||
239 | sethi %hi(2f), %g7 | ||
240 | ba,pt %xcc, etraptl1 | ||
241 | or %g7, %lo(2f), %g7 | ||
242 | |||
243 | 1: ba,pt %xcc, etrap | ||
244 | 2: or %g7, %lo(2b), %g7 | ||
245 | mov %l4, %o1 | ||
246 | call sun4v_dtlb_error_report | ||
247 | add %sp, PTREGS_OFF, %o0 | ||
248 | |||
249 | /* NOTREACHED */ | ||
250 | |||
251 | /* Instruction Access Exception, tl0. */ | ||
252 | sun4v_iacc: | ||
253 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
254 | ldx [%g2 + HV_FAULT_I_TYPE_OFFSET], %g3 | ||
255 | ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4 | ||
256 | ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5 | ||
257 | sllx %g3, 16, %g3 | ||
258 | or %g5, %g3, %g5 | ||
259 | ba,pt %xcc, etrap | ||
260 | rd %pc, %g7 | ||
261 | mov %l4, %o1 | ||
262 | mov %l5, %o2 | ||
263 | call sun4v_insn_access_exception | ||
264 | add %sp, PTREGS_OFF, %o0 | ||
265 | ba,a,pt %xcc, rtrap | ||
266 | |||
267 | /* Instruction Access Exception, tl1. */ | ||
268 | sun4v_iacc_tl1: | ||
269 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
270 | ldx [%g2 + HV_FAULT_I_TYPE_OFFSET], %g3 | ||
271 | ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4 | ||
272 | ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5 | ||
273 | sllx %g3, 16, %g3 | ||
274 | or %g5, %g3, %g5 | ||
275 | ba,pt %xcc, etraptl1 | ||
276 | rd %pc, %g7 | ||
277 | mov %l4, %o1 | ||
278 | mov %l5, %o2 | ||
279 | call sun4v_insn_access_exception_tl1 | ||
280 | add %sp, PTREGS_OFF, %o0 | ||
281 | ba,a,pt %xcc, rtrap | ||
282 | |||
283 | /* Data Access Exception, tl0. */ | ||
284 | sun4v_dacc: | ||
285 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
286 | ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3 | ||
287 | ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 | ||
288 | ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 | ||
289 | sllx %g3, 16, %g3 | ||
290 | or %g5, %g3, %g5 | ||
291 | ba,pt %xcc, etrap | ||
292 | rd %pc, %g7 | ||
293 | mov %l4, %o1 | ||
294 | mov %l5, %o2 | ||
295 | call sun4v_data_access_exception | ||
296 | add %sp, PTREGS_OFF, %o0 | ||
297 | ba,a,pt %xcc, rtrap | ||
298 | |||
299 | /* Data Access Exception, tl1. */ | ||
300 | sun4v_dacc_tl1: | ||
301 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
302 | ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3 | ||
303 | ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 | ||
304 | ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 | ||
305 | sllx %g3, 16, %g3 | ||
306 | or %g5, %g3, %g5 | ||
307 | ba,pt %xcc, etraptl1 | ||
308 | rd %pc, %g7 | ||
309 | mov %l4, %o1 | ||
310 | mov %l5, %o2 | ||
311 | call sun4v_data_access_exception_tl1 | ||
312 | add %sp, PTREGS_OFF, %o0 | ||
313 | ba,a,pt %xcc, rtrap | ||
314 | |||
315 | /* Memory Address Unaligned. */ | ||
316 | sun4v_mna: | ||
317 | /* Window fixup? */ | ||
318 | rdpr %tl, %g2 | ||
319 | cmp %g2, 1 | ||
320 | ble,pt %icc, 1f | ||
321 | nop | ||
322 | |||
323 | SET_GL(1) | ||
324 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
325 | ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g5 | ||
326 | mov HV_FAULT_TYPE_UNALIGNED, %g3 | ||
327 | ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g4 | ||
328 | sllx %g3, 16, %g3 | ||
329 | or %g4, %g3, %g4 | ||
330 | ba,pt %xcc, winfix_mna | ||
331 | rdpr %tpc, %g3 | ||
332 | /* not reached */ | ||
333 | |||
334 | 1: ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
335 | mov HV_FAULT_TYPE_UNALIGNED, %g3 | ||
336 | ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 | ||
337 | ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 | ||
338 | sllx %g3, 16, %g3 | ||
339 | or %g5, %g3, %g5 | ||
340 | |||
341 | ba,pt %xcc, etrap | ||
342 | rd %pc, %g7 | ||
343 | mov %l4, %o1 | ||
344 | mov %l5, %o2 | ||
345 | call sun4v_do_mna | ||
346 | add %sp, PTREGS_OFF, %o0 | ||
347 | ba,a,pt %xcc, rtrap | ||
348 | |||
349 | /* Privileged Action. */ | ||
350 | sun4v_privact: | ||
351 | ba,pt %xcc, etrap | ||
352 | rd %pc, %g7 | ||
353 | call do_privact | ||
354 | add %sp, PTREGS_OFF, %o0 | ||
355 | ba,a,pt %xcc, rtrap | ||
356 | |||
357 | /* Unaligned ldd float, tl0. */ | ||
358 | sun4v_lddfmna: | ||
359 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
360 | ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3 | ||
361 | ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 | ||
362 | ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 | ||
363 | sllx %g3, 16, %g3 | ||
364 | or %g5, %g3, %g5 | ||
365 | ba,pt %xcc, etrap | ||
366 | rd %pc, %g7 | ||
367 | mov %l4, %o1 | ||
368 | mov %l5, %o2 | ||
369 | call handle_lddfmna | ||
370 | add %sp, PTREGS_OFF, %o0 | ||
371 | ba,a,pt %xcc, rtrap | ||
372 | |||
373 | /* Unaligned std float, tl0. */ | ||
374 | sun4v_stdfmna: | ||
375 | ldxa [%g0] ASI_SCRATCHPAD, %g2 | ||
376 | ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3 | ||
377 | ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 | ||
378 | ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 | ||
379 | sllx %g3, 16, %g3 | ||
380 | or %g5, %g3, %g5 | ||
381 | ba,pt %xcc, etrap | ||
382 | rd %pc, %g7 | ||
383 | mov %l4, %o1 | ||
384 | mov %l5, %o2 | ||
385 | call handle_stdfmna | ||
386 | add %sp, PTREGS_OFF, %o0 | ||
387 | ba,a,pt %xcc, rtrap | ||
388 | |||
389 | #define BRANCH_ALWAYS 0x10680000 | ||
390 | #define NOP 0x01000000 | ||
391 | #define SUN4V_DO_PATCH(OLD, NEW) \ | ||
392 | sethi %hi(NEW), %g1; \ | ||
393 | or %g1, %lo(NEW), %g1; \ | ||
394 | sethi %hi(OLD), %g2; \ | ||
395 | or %g2, %lo(OLD), %g2; \ | ||
396 | sub %g1, %g2, %g1; \ | ||
397 | sethi %hi(BRANCH_ALWAYS), %g3; \ | ||
398 | sll %g1, 11, %g1; \ | ||
399 | srl %g1, 11 + 2, %g1; \ | ||
400 | or %g3, %lo(BRANCH_ALWAYS), %g3; \ | ||
401 | or %g3, %g1, %g3; \ | ||
402 | stw %g3, [%g2]; \ | ||
403 | sethi %hi(NOP), %g3; \ | ||
404 | or %g3, %lo(NOP), %g3; \ | ||
405 | stw %g3, [%g2 + 0x4]; \ | ||
406 | flush %g2; | ||
407 | |||
408 | .globl sun4v_patch_tlb_handlers | ||
409 | .type sun4v_patch_tlb_handlers,#function | ||
410 | sun4v_patch_tlb_handlers: | ||
411 | SUN4V_DO_PATCH(tl0_iamiss, sun4v_itlb_miss) | ||
412 | SUN4V_DO_PATCH(tl1_iamiss, sun4v_itlb_miss) | ||
413 | SUN4V_DO_PATCH(tl0_damiss, sun4v_dtlb_miss) | ||
414 | SUN4V_DO_PATCH(tl1_damiss, sun4v_dtlb_miss) | ||
415 | SUN4V_DO_PATCH(tl0_daprot, sun4v_dtlb_prot) | ||
416 | SUN4V_DO_PATCH(tl1_daprot, sun4v_dtlb_prot) | ||
417 | SUN4V_DO_PATCH(tl0_iax, sun4v_iacc) | ||
418 | SUN4V_DO_PATCH(tl1_iax, sun4v_iacc_tl1) | ||
419 | SUN4V_DO_PATCH(tl0_dax, sun4v_dacc) | ||
420 | SUN4V_DO_PATCH(tl1_dax, sun4v_dacc_tl1) | ||
421 | SUN4V_DO_PATCH(tl0_mna, sun4v_mna) | ||
422 | SUN4V_DO_PATCH(tl1_mna, sun4v_mna) | ||
423 | SUN4V_DO_PATCH(tl0_lddfmna, sun4v_lddfmna) | ||
424 | SUN4V_DO_PATCH(tl0_stdfmna, sun4v_stdfmna) | ||
425 | SUN4V_DO_PATCH(tl0_privact, sun4v_privact) | ||
426 | retl | ||
427 | nop | ||
428 | .size sun4v_patch_tlb_handlers,.-sun4v_patch_tlb_handlers | ||
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S new file mode 100644 index 000000000000..f061c4dda9ef --- /dev/null +++ b/arch/sparc/kernel/sys32.S | |||
@@ -0,0 +1,367 @@ | |||
1 | /* | ||
2 | * sys32.S: I-cache tricks for 32-bit compatibility layer simple | ||
3 | * conversions. | ||
4 | * | ||
5 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | ||
6 | * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz) | ||
7 | */ | ||
8 | |||
9 | #include <asm/errno.h> | ||
10 | |||
11 | /* NOTE: call as jump breaks return stack, we have to avoid that */ | ||
12 | |||
13 | .text | ||
14 | |||
15 | #define SIGN1(STUB,SYSCALL,REG1) \ | ||
16 | .align 32; \ | ||
17 | .globl STUB; \ | ||
18 | STUB: sethi %hi(SYSCALL), %g1; \ | ||
19 | jmpl %g1 + %lo(SYSCALL), %g0; \ | ||
20 | sra REG1, 0, REG1 | ||
21 | |||
22 | #define SIGN2(STUB,SYSCALL,REG1,REG2) \ | ||
23 | .align 32; \ | ||
24 | .globl STUB; \ | ||
25 | STUB: sethi %hi(SYSCALL), %g1; \ | ||
26 | sra REG1, 0, REG1; \ | ||
27 | jmpl %g1 + %lo(SYSCALL), %g0; \ | ||
28 | sra REG2, 0, REG2 | ||
29 | |||
30 | #define SIGN3(STUB,SYSCALL,REG1,REG2,REG3) \ | ||
31 | .align 32; \ | ||
32 | .globl STUB; \ | ||
33 | STUB: sra REG1, 0, REG1; \ | ||
34 | sethi %hi(SYSCALL), %g1; \ | ||
35 | sra REG2, 0, REG2; \ | ||
36 | jmpl %g1 + %lo(SYSCALL), %g0; \ | ||
37 | sra REG3, 0, REG3 | ||
38 | |||
39 | #define SIGN4(STUB,SYSCALL,REG1,REG2,REG3,REG4) \ | ||
40 | .align 32; \ | ||
41 | .globl STUB; \ | ||
42 | STUB: sra REG1, 0, REG1; \ | ||
43 | sethi %hi(SYSCALL), %g1; \ | ||
44 | sra REG2, 0, REG2; \ | ||
45 | sra REG3, 0, REG3; \ | ||
46 | jmpl %g1 + %lo(SYSCALL), %g0; \ | ||
47 | sra REG4, 0, REG4 | ||
48 | |||
49 | SIGN1(sys32_exit, sparc_exit, %o0) | ||
50 | SIGN1(sys32_exit_group, sys_exit_group, %o0) | ||
51 | SIGN1(sys32_wait4, compat_sys_wait4, %o2) | ||
52 | SIGN1(sys32_creat, sys_creat, %o1) | ||
53 | SIGN1(sys32_mknod, sys_mknod, %o1) | ||
54 | SIGN1(sys32_perfctr, sys_perfctr, %o0) | ||
55 | SIGN1(sys32_umount, sys_umount, %o1) | ||
56 | SIGN1(sys32_signal, sys_signal, %o0) | ||
57 | SIGN1(sys32_access, sys_access, %o1) | ||
58 | SIGN1(sys32_msync, sys_msync, %o2) | ||
59 | SIGN2(sys32_reboot, sys_reboot, %o0, %o1) | ||
60 | SIGN1(sys32_setitimer, compat_sys_setitimer, %o0) | ||
61 | SIGN1(sys32_getitimer, compat_sys_getitimer, %o0) | ||
62 | SIGN1(sys32_sethostname, sys_sethostname, %o1) | ||
63 | SIGN1(sys32_swapon, sys_swapon, %o1) | ||
64 | SIGN1(sys32_sigaction, compat_sys_sigaction, %o0) | ||
65 | SIGN1(sys32_rt_sigaction, compat_sys_rt_sigaction, %o0) | ||
66 | SIGN1(sys32_sigprocmask, compat_sys_sigprocmask, %o0) | ||
67 | SIGN1(sys32_rt_sigprocmask, compat_sys_rt_sigprocmask, %o0) | ||
68 | SIGN2(sys32_rt_sigqueueinfo, compat_sys_rt_sigqueueinfo, %o0, %o1) | ||
69 | SIGN1(sys32_getrusage, compat_sys_getrusage, %o0) | ||
70 | SIGN1(sys32_setxattr, sys_setxattr, %o4) | ||
71 | SIGN1(sys32_lsetxattr, sys_lsetxattr, %o4) | ||
72 | SIGN1(sys32_fsetxattr, sys_fsetxattr, %o4) | ||
73 | SIGN1(sys32_fgetxattr, sys_fgetxattr, %o0) | ||
74 | SIGN1(sys32_flistxattr, sys_flistxattr, %o0) | ||
75 | SIGN1(sys32_fremovexattr, sys_fremovexattr, %o0) | ||
76 | SIGN2(sys32_tkill, sys_tkill, %o0, %o1) | ||
77 | SIGN1(sys32_epoll_create, sys_epoll_create, %o0) | ||
78 | SIGN3(sys32_epoll_ctl, sys_epoll_ctl, %o0, %o1, %o2) | ||
79 | SIGN3(sys32_epoll_wait, sys_epoll_wait, %o0, %o2, %o3) | ||
80 | SIGN1(sys32_readahead, compat_sys_readahead, %o0) | ||
81 | SIGN2(sys32_fadvise64, compat_sys_fadvise64, %o0, %o4) | ||
82 | SIGN2(sys32_fadvise64_64, compat_sys_fadvise64_64, %o0, %o5) | ||
83 | SIGN2(sys32_bdflush, sys_bdflush, %o0, %o1) | ||
84 | SIGN1(sys32_mlockall, sys_mlockall, %o0) | ||
85 | SIGN1(sys32_nfsservctl, compat_sys_nfsservctl, %o0) | ||
86 | SIGN1(sys32_clock_nanosleep, compat_sys_clock_nanosleep, %o1) | ||
87 | SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1) | ||
88 | SIGN1(sys32_io_submit, compat_sys_io_submit, %o1) | ||
89 | SIGN1(sys32_mq_open, compat_sys_mq_open, %o1) | ||
90 | SIGN1(sys32_select, compat_sys_select, %o0) | ||
91 | SIGN1(sys32_mkdir, sys_mkdir, %o1) | ||
92 | SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5) | ||
93 | SIGN1(sys32_sysfs, compat_sys_sysfs, %o0) | ||
94 | SIGN2(sys32_sendfile, compat_sys_sendfile, %o0, %o1) | ||
95 | SIGN2(sys32_sendfile64, compat_sys_sendfile64, %o0, %o1) | ||
96 | SIGN1(sys32_prctl, sys_prctl, %o0) | ||
97 | SIGN1(sys32_sched_rr_get_interval, compat_sys_sched_rr_get_interval, %o0) | ||
98 | SIGN2(sys32_waitpid, sys_waitpid, %o0, %o2) | ||
99 | SIGN1(sys32_getgroups, sys_getgroups, %o0) | ||
100 | SIGN1(sys32_getpgid, sys_getpgid, %o0) | ||
101 | SIGN2(sys32_getpriority, sys_getpriority, %o0, %o1) | ||
102 | SIGN1(sys32_getsid, sys_getsid, %o0) | ||
103 | SIGN2(sys32_kill, sys_kill, %o0, %o1) | ||
104 | SIGN1(sys32_nice, sys_nice, %o0) | ||
105 | SIGN1(sys32_lseek, sys_lseek, %o1) | ||
106 | SIGN2(sys32_open, sparc32_open, %o1, %o2) | ||
107 | SIGN1(sys32_readlink, sys_readlink, %o2) | ||
108 | SIGN1(sys32_sched_get_priority_max, sys_sched_get_priority_max, %o0) | ||
109 | SIGN1(sys32_sched_get_priority_min, sys_sched_get_priority_min, %o0) | ||
110 | SIGN1(sys32_sched_getparam, sys_sched_getparam, %o0) | ||
111 | SIGN1(sys32_sched_getscheduler, sys_sched_getscheduler, %o0) | ||
112 | SIGN1(sys32_sched_setparam, sys_sched_setparam, %o0) | ||
113 | SIGN2(sys32_sched_setscheduler, sys_sched_setscheduler, %o0, %o1) | ||
114 | SIGN1(sys32_getdomainname, sys_getdomainname, %o1) | ||
115 | SIGN1(sys32_setdomainname, sys_setdomainname, %o1) | ||
116 | SIGN1(sys32_setgroups, sys_setgroups, %o0) | ||
117 | SIGN2(sys32_setpgid, sys_setpgid, %o0, %o1) | ||
118 | SIGN3(sys32_setpriority, sys_setpriority, %o0, %o1, %o2) | ||
119 | SIGN1(sys32_ssetmask, sys_ssetmask, %o0) | ||
120 | SIGN2(sys32_syslog, sys_syslog, %o0, %o2) | ||
121 | SIGN1(sys32_umask, sys_umask, %o0) | ||
122 | SIGN3(sys32_tgkill, sys_tgkill, %o0, %o1, %o2) | ||
123 | SIGN1(sys32_sendto, sys_sendto, %o0) | ||
124 | SIGN1(sys32_recvfrom, sys_recvfrom, %o0) | ||
125 | SIGN3(sys32_socket, sys_socket, %o0, %o1, %o2) | ||
126 | SIGN2(sys32_connect, sys_connect, %o0, %o2) | ||
127 | SIGN2(sys32_bind, sys_bind, %o0, %o2) | ||
128 | SIGN2(sys32_listen, sys_listen, %o0, %o1) | ||
129 | SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0) | ||
130 | SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0) | ||
131 | SIGN2(sys32_shutdown, sys_shutdown, %o0, %o1) | ||
132 | SIGN3(sys32_socketpair, sys_socketpair, %o0, %o1, %o2) | ||
133 | SIGN1(sys32_getpeername, sys_getpeername, %o0) | ||
134 | SIGN1(sys32_getsockname, sys_getsockname, %o0) | ||
135 | SIGN2(sys32_ioprio_get, sys_ioprio_get, %o0, %o1) | ||
136 | SIGN3(sys32_ioprio_set, sys_ioprio_set, %o0, %o1, %o2) | ||
137 | SIGN2(sys32_splice, sys_splice, %o0, %o1) | ||
138 | SIGN2(sys32_sync_file_range, compat_sync_file_range, %o0, %o5) | ||
139 | SIGN2(sys32_tee, sys_tee, %o0, %o1) | ||
140 | SIGN1(sys32_vmsplice, compat_sys_vmsplice, %o0) | ||
141 | |||
142 | .globl sys32_mmap2 | ||
143 | sys32_mmap2: | ||
144 | sethi %hi(sys_mmap), %g1 | ||
145 | jmpl %g1 + %lo(sys_mmap), %g0 | ||
146 | sllx %o5, 12, %o5 | ||
147 | |||
148 | .align 32 | ||
149 | .globl sys32_socketcall | ||
150 | sys32_socketcall: /* %o0=call, %o1=args */ | ||
151 | cmp %o0, 1 | ||
152 | bl,pn %xcc, do_einval | ||
153 | cmp %o0, 18 | ||
154 | bg,pn %xcc, do_einval | ||
155 | sub %o0, 1, %o0 | ||
156 | sllx %o0, 5, %o0 | ||
157 | sethi %hi(__socketcall_table_begin), %g2 | ||
158 | or %g2, %lo(__socketcall_table_begin), %g2 | ||
159 | jmpl %g2 + %o0, %g0 | ||
160 | nop | ||
161 | do_einval: | ||
162 | retl | ||
163 | mov -EINVAL, %o0 | ||
164 | |||
165 | .align 32 | ||
166 | __socketcall_table_begin: | ||
167 | |||
168 | /* Each entry is exactly 32 bytes. */ | ||
169 | do_sys_socket: /* sys_socket(int, int, int) */ | ||
170 | 1: ldswa [%o1 + 0x0] %asi, %o0 | ||
171 | sethi %hi(sys_socket), %g1 | ||
172 | 2: ldswa [%o1 + 0x8] %asi, %o2 | ||
173 | jmpl %g1 + %lo(sys_socket), %g0 | ||
174 | 3: ldswa [%o1 + 0x4] %asi, %o1 | ||
175 | nop | ||
176 | nop | ||
177 | nop | ||
178 | do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */ | ||
179 | 4: ldswa [%o1 + 0x0] %asi, %o0 | ||
180 | sethi %hi(sys_bind), %g1 | ||
181 | 5: ldswa [%o1 + 0x8] %asi, %o2 | ||
182 | jmpl %g1 + %lo(sys_bind), %g0 | ||
183 | 6: lduwa [%o1 + 0x4] %asi, %o1 | ||
184 | nop | ||
185 | nop | ||
186 | nop | ||
187 | do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */ | ||
188 | 7: ldswa [%o1 + 0x0] %asi, %o0 | ||
189 | sethi %hi(sys_connect), %g1 | ||
190 | 8: ldswa [%o1 + 0x8] %asi, %o2 | ||
191 | jmpl %g1 + %lo(sys_connect), %g0 | ||
192 | 9: lduwa [%o1 + 0x4] %asi, %o1 | ||
193 | nop | ||
194 | nop | ||
195 | nop | ||
196 | do_sys_listen: /* sys_listen(int, int) */ | ||
197 | 10: ldswa [%o1 + 0x0] %asi, %o0 | ||
198 | sethi %hi(sys_listen), %g1 | ||
199 | jmpl %g1 + %lo(sys_listen), %g0 | ||
200 | 11: ldswa [%o1 + 0x4] %asi, %o1 | ||
201 | nop | ||
202 | nop | ||
203 | nop | ||
204 | nop | ||
205 | do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */ | ||
206 | 12: ldswa [%o1 + 0x0] %asi, %o0 | ||
207 | sethi %hi(sys_accept), %g1 | ||
208 | 13: lduwa [%o1 + 0x8] %asi, %o2 | ||
209 | jmpl %g1 + %lo(sys_accept), %g0 | ||
210 | 14: lduwa [%o1 + 0x4] %asi, %o1 | ||
211 | nop | ||
212 | nop | ||
213 | nop | ||
214 | do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */ | ||
215 | 15: ldswa [%o1 + 0x0] %asi, %o0 | ||
216 | sethi %hi(sys_getsockname), %g1 | ||
217 | 16: lduwa [%o1 + 0x8] %asi, %o2 | ||
218 | jmpl %g1 + %lo(sys_getsockname), %g0 | ||
219 | 17: lduwa [%o1 + 0x4] %asi, %o1 | ||
220 | nop | ||
221 | nop | ||
222 | nop | ||
223 | do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */ | ||
224 | 18: ldswa [%o1 + 0x0] %asi, %o0 | ||
225 | sethi %hi(sys_getpeername), %g1 | ||
226 | 19: lduwa [%o1 + 0x8] %asi, %o2 | ||
227 | jmpl %g1 + %lo(sys_getpeername), %g0 | ||
228 | 20: lduwa [%o1 + 0x4] %asi, %o1 | ||
229 | nop | ||
230 | nop | ||
231 | nop | ||
232 | do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */ | ||
233 | 21: ldswa [%o1 + 0x0] %asi, %o0 | ||
234 | sethi %hi(sys_socketpair), %g1 | ||
235 | 22: ldswa [%o1 + 0x8] %asi, %o2 | ||
236 | 23: lduwa [%o1 + 0xc] %asi, %o3 | ||
237 | jmpl %g1 + %lo(sys_socketpair), %g0 | ||
238 | 24: ldswa [%o1 + 0x4] %asi, %o1 | ||
239 | nop | ||
240 | nop | ||
241 | do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */ | ||
242 | 25: ldswa [%o1 + 0x0] %asi, %o0 | ||
243 | sethi %hi(sys_send), %g1 | ||
244 | 26: lduwa [%o1 + 0x8] %asi, %o2 | ||
245 | 27: lduwa [%o1 + 0xc] %asi, %o3 | ||
246 | jmpl %g1 + %lo(sys_send), %g0 | ||
247 | 28: lduwa [%o1 + 0x4] %asi, %o1 | ||
248 | nop | ||
249 | nop | ||
250 | do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */ | ||
251 | 29: ldswa [%o1 + 0x0] %asi, %o0 | ||
252 | sethi %hi(sys_recv), %g1 | ||
253 | 30: lduwa [%o1 + 0x8] %asi, %o2 | ||
254 | 31: lduwa [%o1 + 0xc] %asi, %o3 | ||
255 | jmpl %g1 + %lo(sys_recv), %g0 | ||
256 | 32: lduwa [%o1 + 0x4] %asi, %o1 | ||
257 | nop | ||
258 | nop | ||
259 | do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */ | ||
260 | 33: ldswa [%o1 + 0x0] %asi, %o0 | ||
261 | sethi %hi(sys_sendto), %g1 | ||
262 | 34: lduwa [%o1 + 0x8] %asi, %o2 | ||
263 | 35: lduwa [%o1 + 0xc] %asi, %o3 | ||
264 | 36: lduwa [%o1 + 0x10] %asi, %o4 | ||
265 | 37: ldswa [%o1 + 0x14] %asi, %o5 | ||
266 | jmpl %g1 + %lo(sys_sendto), %g0 | ||
267 | 38: lduwa [%o1 + 0x4] %asi, %o1 | ||
268 | do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */ | ||
269 | 39: ldswa [%o1 + 0x0] %asi, %o0 | ||
270 | sethi %hi(sys_recvfrom), %g1 | ||
271 | 40: lduwa [%o1 + 0x8] %asi, %o2 | ||
272 | 41: lduwa [%o1 + 0xc] %asi, %o3 | ||
273 | 42: lduwa [%o1 + 0x10] %asi, %o4 | ||
274 | 43: lduwa [%o1 + 0x14] %asi, %o5 | ||
275 | jmpl %g1 + %lo(sys_recvfrom), %g0 | ||
276 | 44: lduwa [%o1 + 0x4] %asi, %o1 | ||
277 | do_sys_shutdown: /* sys_shutdown(int, int) */ | ||
278 | 45: ldswa [%o1 + 0x0] %asi, %o0 | ||
279 | sethi %hi(sys_shutdown), %g1 | ||
280 | jmpl %g1 + %lo(sys_shutdown), %g0 | ||
281 | 46: ldswa [%o1 + 0x4] %asi, %o1 | ||
282 | nop | ||
283 | nop | ||
284 | nop | ||
285 | nop | ||
286 | do_sys_setsockopt: /* compat_sys_setsockopt(int, int, int, char *, int) */ | ||
287 | 47: ldswa [%o1 + 0x0] %asi, %o0 | ||
288 | sethi %hi(compat_sys_setsockopt), %g1 | ||
289 | 48: ldswa [%o1 + 0x8] %asi, %o2 | ||
290 | 49: lduwa [%o1 + 0xc] %asi, %o3 | ||
291 | 50: ldswa [%o1 + 0x10] %asi, %o4 | ||
292 | jmpl %g1 + %lo(compat_sys_setsockopt), %g0 | ||
293 | 51: ldswa [%o1 + 0x4] %asi, %o1 | ||
294 | nop | ||
295 | do_sys_getsockopt: /* compat_sys_getsockopt(int, int, int, u32, u32) */ | ||
296 | 52: ldswa [%o1 + 0x0] %asi, %o0 | ||
297 | sethi %hi(compat_sys_getsockopt), %g1 | ||
298 | 53: ldswa [%o1 + 0x8] %asi, %o2 | ||
299 | 54: lduwa [%o1 + 0xc] %asi, %o3 | ||
300 | 55: lduwa [%o1 + 0x10] %asi, %o4 | ||
301 | jmpl %g1 + %lo(compat_sys_getsockopt), %g0 | ||
302 | 56: ldswa [%o1 + 0x4] %asi, %o1 | ||
303 | nop | ||
304 | do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */ | ||
305 | 57: ldswa [%o1 + 0x0] %asi, %o0 | ||
306 | sethi %hi(compat_sys_sendmsg), %g1 | ||
307 | 58: lduwa [%o1 + 0x8] %asi, %o2 | ||
308 | jmpl %g1 + %lo(compat_sys_sendmsg), %g0 | ||
309 | 59: lduwa [%o1 + 0x4] %asi, %o1 | ||
310 | nop | ||
311 | nop | ||
312 | nop | ||
313 | do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */ | ||
314 | 60: ldswa [%o1 + 0x0] %asi, %o0 | ||
315 | sethi %hi(compat_sys_recvmsg), %g1 | ||
316 | 61: lduwa [%o1 + 0x8] %asi, %o2 | ||
317 | jmpl %g1 + %lo(compat_sys_recvmsg), %g0 | ||
318 | 62: lduwa [%o1 + 0x4] %asi, %o1 | ||
319 | nop | ||
320 | nop | ||
321 | nop | ||
322 | do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */ | ||
323 | 63: ldswa [%o1 + 0x0] %asi, %o0 | ||
324 | sethi %hi(sys_accept4), %g1 | ||
325 | 64: lduwa [%o1 + 0x8] %asi, %o2 | ||
326 | 65: ldswa [%o1 + 0xc] %asi, %o3 | ||
327 | jmpl %g1 + %lo(sys_accept4), %g0 | ||
328 | 66: lduwa [%o1 + 0x4] %asi, %o1 | ||
329 | nop | ||
330 | nop | ||
331 | |||
332 | .section __ex_table,"a" | ||
333 | .align 4 | ||
334 | .word 1b, __retl_efault, 2b, __retl_efault | ||
335 | .word 3b, __retl_efault, 4b, __retl_efault | ||
336 | .word 5b, __retl_efault, 6b, __retl_efault | ||
337 | .word 7b, __retl_efault, 8b, __retl_efault | ||
338 | .word 9b, __retl_efault, 10b, __retl_efault | ||
339 | .word 11b, __retl_efault, 12b, __retl_efault | ||
340 | .word 13b, __retl_efault, 14b, __retl_efault | ||
341 | .word 15b, __retl_efault, 16b, __retl_efault | ||
342 | .word 17b, __retl_efault, 18b, __retl_efault | ||
343 | .word 19b, __retl_efault, 20b, __retl_efault | ||
344 | .word 21b, __retl_efault, 22b, __retl_efault | ||
345 | .word 23b, __retl_efault, 24b, __retl_efault | ||
346 | .word 25b, __retl_efault, 26b, __retl_efault | ||
347 | .word 27b, __retl_efault, 28b, __retl_efault | ||
348 | .word 29b, __retl_efault, 30b, __retl_efault | ||
349 | .word 31b, __retl_efault, 32b, __retl_efault | ||
350 | .word 33b, __retl_efault, 34b, __retl_efault | ||
351 | .word 35b, __retl_efault, 36b, __retl_efault | ||
352 | .word 37b, __retl_efault, 38b, __retl_efault | ||
353 | .word 39b, __retl_efault, 40b, __retl_efault | ||
354 | .word 41b, __retl_efault, 42b, __retl_efault | ||
355 | .word 43b, __retl_efault, 44b, __retl_efault | ||
356 | .word 45b, __retl_efault, 46b, __retl_efault | ||
357 | .word 47b, __retl_efault, 48b, __retl_efault | ||
358 | .word 49b, __retl_efault, 50b, __retl_efault | ||
359 | .word 51b, __retl_efault, 52b, __retl_efault | ||
360 | .word 53b, __retl_efault, 54b, __retl_efault | ||
361 | .word 55b, __retl_efault, 56b, __retl_efault | ||
362 | .word 57b, __retl_efault, 58b, __retl_efault | ||
363 | .word 59b, __retl_efault, 60b, __retl_efault | ||
364 | .word 61b, __retl_efault, 62b, __retl_efault | ||
365 | .word 63b, __retl_efault, 64b, __retl_efault | ||
366 | .word 65b, __retl_efault, 66b, __retl_efault | ||
367 | .previous | ||
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c new file mode 100644 index 000000000000..e800503879e4 --- /dev/null +++ b/arch/sparc/kernel/sys_sparc32.c | |||
@@ -0,0 +1,682 @@ | |||
1 | /* sys_sparc32.c: Conversion between 32bit and 64bit native syscalls. | ||
2 | * | ||
3 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
4 | * Copyright (C) 1997, 2007 David S. Miller (davem@davemloft.net) | ||
5 | * | ||
6 | * These routines maintain argument size conversion between 32bit and 64bit | ||
7 | * environment. | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/capability.h> | ||
13 | #include <linux/fs.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/file.h> | ||
16 | #include <linux/signal.h> | ||
17 | #include <linux/resource.h> | ||
18 | #include <linux/times.h> | ||
19 | #include <linux/utsname.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <linux/smp_lock.h> | ||
22 | #include <linux/sem.h> | ||
23 | #include <linux/msg.h> | ||
24 | #include <linux/shm.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/uio.h> | ||
27 | #include <linux/nfs_fs.h> | ||
28 | #include <linux/quota.h> | ||
29 | #include <linux/module.h> | ||
30 | #include <linux/sunrpc/svc.h> | ||
31 | #include <linux/nfsd/nfsd.h> | ||
32 | #include <linux/nfsd/cache.h> | ||
33 | #include <linux/nfsd/xdr.h> | ||
34 | #include <linux/nfsd/syscall.h> | ||
35 | #include <linux/poll.h> | ||
36 | #include <linux/personality.h> | ||
37 | #include <linux/stat.h> | ||
38 | #include <linux/filter.h> | ||
39 | #include <linux/highmem.h> | ||
40 | #include <linux/highuid.h> | ||
41 | #include <linux/mman.h> | ||
42 | #include <linux/ipv6.h> | ||
43 | #include <linux/in.h> | ||
44 | #include <linux/icmpv6.h> | ||
45 | #include <linux/syscalls.h> | ||
46 | #include <linux/sysctl.h> | ||
47 | #include <linux/binfmts.h> | ||
48 | #include <linux/dnotify.h> | ||
49 | #include <linux/security.h> | ||
50 | #include <linux/compat.h> | ||
51 | #include <linux/vfs.h> | ||
52 | #include <linux/netfilter_ipv4/ip_tables.h> | ||
53 | #include <linux/ptrace.h> | ||
54 | |||
55 | #include <asm/types.h> | ||
56 | #include <asm/uaccess.h> | ||
57 | #include <asm/fpumacro.h> | ||
58 | #include <asm/mmu_context.h> | ||
59 | #include <asm/compat_signal.h> | ||
60 | |||
61 | #ifdef CONFIG_SYSVIPC | ||
62 | asmlinkage long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr, u32 fifth) | ||
63 | { | ||
64 | int version; | ||
65 | |||
66 | version = call >> 16; /* hack for backward compatibility */ | ||
67 | call &= 0xffff; | ||
68 | |||
69 | switch (call) { | ||
70 | case SEMTIMEDOP: | ||
71 | if (fifth) | ||
72 | /* sign extend semid */ | ||
73 | return compat_sys_semtimedop((int)first, | ||
74 | compat_ptr(ptr), second, | ||
75 | compat_ptr(fifth)); | ||
76 | /* else fall through for normal semop() */ | ||
77 | case SEMOP: | ||
78 | /* struct sembuf is the same on 32 and 64bit :)) */ | ||
79 | /* sign extend semid */ | ||
80 | return sys_semtimedop((int)first, compat_ptr(ptr), second, | ||
81 | NULL); | ||
82 | case SEMGET: | ||
83 | /* sign extend key, nsems */ | ||
84 | return sys_semget((int)first, (int)second, third); | ||
85 | case SEMCTL: | ||
86 | /* sign extend semid, semnum */ | ||
87 | return compat_sys_semctl((int)first, (int)second, third, | ||
88 | compat_ptr(ptr)); | ||
89 | |||
90 | case MSGSND: | ||
91 | /* sign extend msqid */ | ||
92 | return compat_sys_msgsnd((int)first, (int)second, third, | ||
93 | compat_ptr(ptr)); | ||
94 | case MSGRCV: | ||
95 | /* sign extend msqid, msgtyp */ | ||
96 | return compat_sys_msgrcv((int)first, second, (int)fifth, | ||
97 | third, version, compat_ptr(ptr)); | ||
98 | case MSGGET: | ||
99 | /* sign extend key */ | ||
100 | return sys_msgget((int)first, second); | ||
101 | case MSGCTL: | ||
102 | /* sign extend msqid */ | ||
103 | return compat_sys_msgctl((int)first, second, compat_ptr(ptr)); | ||
104 | |||
105 | case SHMAT: | ||
106 | /* sign extend shmid */ | ||
107 | return compat_sys_shmat((int)first, second, third, version, | ||
108 | compat_ptr(ptr)); | ||
109 | case SHMDT: | ||
110 | return sys_shmdt(compat_ptr(ptr)); | ||
111 | case SHMGET: | ||
112 | /* sign extend key_t */ | ||
113 | return sys_shmget((int)first, second, third); | ||
114 | case SHMCTL: | ||
115 | /* sign extend shmid */ | ||
116 | return compat_sys_shmctl((int)first, second, compat_ptr(ptr)); | ||
117 | |||
118 | default: | ||
119 | return -ENOSYS; | ||
120 | }; | ||
121 | |||
122 | return -ENOSYS; | ||
123 | } | ||
124 | #endif | ||
125 | |||
126 | asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low) | ||
127 | { | ||
128 | if ((int)high < 0) | ||
129 | return -EINVAL; | ||
130 | else | ||
131 | return sys_truncate(path, (high << 32) | low); | ||
132 | } | ||
133 | |||
134 | asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low) | ||
135 | { | ||
136 | if ((int)high < 0) | ||
137 | return -EINVAL; | ||
138 | else | ||
139 | return sys_ftruncate(fd, (high << 32) | low); | ||
140 | } | ||
141 | |||
142 | static int cp_compat_stat64(struct kstat *stat, | ||
143 | struct compat_stat64 __user *statbuf) | ||
144 | { | ||
145 | int err; | ||
146 | |||
147 | err = put_user(huge_encode_dev(stat->dev), &statbuf->st_dev); | ||
148 | err |= put_user(stat->ino, &statbuf->st_ino); | ||
149 | err |= put_user(stat->mode, &statbuf->st_mode); | ||
150 | err |= put_user(stat->nlink, &statbuf->st_nlink); | ||
151 | err |= put_user(stat->uid, &statbuf->st_uid); | ||
152 | err |= put_user(stat->gid, &statbuf->st_gid); | ||
153 | err |= put_user(huge_encode_dev(stat->rdev), &statbuf->st_rdev); | ||
154 | err |= put_user(0, (unsigned long __user *) &statbuf->__pad3[0]); | ||
155 | err |= put_user(stat->size, &statbuf->st_size); | ||
156 | err |= put_user(stat->blksize, &statbuf->st_blksize); | ||
157 | err |= put_user(0, (unsigned int __user *) &statbuf->__pad4[0]); | ||
158 | err |= put_user(0, (unsigned int __user *) &statbuf->__pad4[4]); | ||
159 | err |= put_user(stat->blocks, &statbuf->st_blocks); | ||
160 | err |= put_user(stat->atime.tv_sec, &statbuf->st_atime); | ||
161 | err |= put_user(stat->atime.tv_nsec, &statbuf->st_atime_nsec); | ||
162 | err |= put_user(stat->mtime.tv_sec, &statbuf->st_mtime); | ||
163 | err |= put_user(stat->mtime.tv_nsec, &statbuf->st_mtime_nsec); | ||
164 | err |= put_user(stat->ctime.tv_sec, &statbuf->st_ctime); | ||
165 | err |= put_user(stat->ctime.tv_nsec, &statbuf->st_ctime_nsec); | ||
166 | err |= put_user(0, &statbuf->__unused4); | ||
167 | err |= put_user(0, &statbuf->__unused5); | ||
168 | |||
169 | return err; | ||
170 | } | ||
171 | |||
172 | asmlinkage long compat_sys_stat64(char __user * filename, | ||
173 | struct compat_stat64 __user *statbuf) | ||
174 | { | ||
175 | struct kstat stat; | ||
176 | int error = vfs_stat(filename, &stat); | ||
177 | |||
178 | if (!error) | ||
179 | error = cp_compat_stat64(&stat, statbuf); | ||
180 | return error; | ||
181 | } | ||
182 | |||
183 | asmlinkage long compat_sys_lstat64(char __user * filename, | ||
184 | struct compat_stat64 __user *statbuf) | ||
185 | { | ||
186 | struct kstat stat; | ||
187 | int error = vfs_lstat(filename, &stat); | ||
188 | |||
189 | if (!error) | ||
190 | error = cp_compat_stat64(&stat, statbuf); | ||
191 | return error; | ||
192 | } | ||
193 | |||
194 | asmlinkage long compat_sys_fstat64(unsigned int fd, | ||
195 | struct compat_stat64 __user * statbuf) | ||
196 | { | ||
197 | struct kstat stat; | ||
198 | int error = vfs_fstat(fd, &stat); | ||
199 | |||
200 | if (!error) | ||
201 | error = cp_compat_stat64(&stat, statbuf); | ||
202 | return error; | ||
203 | } | ||
204 | |||
205 | asmlinkage long compat_sys_fstatat64(unsigned int dfd, char __user *filename, | ||
206 | struct compat_stat64 __user * statbuf, int flag) | ||
207 | { | ||
208 | struct kstat stat; | ||
209 | int error = -EINVAL; | ||
210 | |||
211 | if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0) | ||
212 | goto out; | ||
213 | |||
214 | if (flag & AT_SYMLINK_NOFOLLOW) | ||
215 | error = vfs_lstat_fd(dfd, filename, &stat); | ||
216 | else | ||
217 | error = vfs_stat_fd(dfd, filename, &stat); | ||
218 | |||
219 | if (!error) | ||
220 | error = cp_compat_stat64(&stat, statbuf); | ||
221 | |||
222 | out: | ||
223 | return error; | ||
224 | } | ||
225 | |||
226 | asmlinkage long compat_sys_sysfs(int option, u32 arg1, u32 arg2) | ||
227 | { | ||
228 | return sys_sysfs(option, arg1, arg2); | ||
229 | } | ||
230 | |||
231 | asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, struct compat_timespec __user *interval) | ||
232 | { | ||
233 | struct timespec t; | ||
234 | int ret; | ||
235 | mm_segment_t old_fs = get_fs (); | ||
236 | |||
237 | set_fs (KERNEL_DS); | ||
238 | ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t); | ||
239 | set_fs (old_fs); | ||
240 | if (put_compat_timespec(&t, interval)) | ||
241 | return -EFAULT; | ||
242 | return ret; | ||
243 | } | ||
244 | |||
245 | asmlinkage long compat_sys_rt_sigprocmask(int how, | ||
246 | compat_sigset_t __user *set, | ||
247 | compat_sigset_t __user *oset, | ||
248 | compat_size_t sigsetsize) | ||
249 | { | ||
250 | sigset_t s; | ||
251 | compat_sigset_t s32; | ||
252 | int ret; | ||
253 | mm_segment_t old_fs = get_fs(); | ||
254 | |||
255 | if (set) { | ||
256 | if (copy_from_user (&s32, set, sizeof(compat_sigset_t))) | ||
257 | return -EFAULT; | ||
258 | switch (_NSIG_WORDS) { | ||
259 | case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32); | ||
260 | case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32); | ||
261 | case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32); | ||
262 | case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); | ||
263 | } | ||
264 | } | ||
265 | set_fs (KERNEL_DS); | ||
266 | ret = sys_rt_sigprocmask(how, | ||
267 | set ? (sigset_t __user *) &s : NULL, | ||
268 | oset ? (sigset_t __user *) &s : NULL, | ||
269 | sigsetsize); | ||
270 | set_fs (old_fs); | ||
271 | if (ret) return ret; | ||
272 | if (oset) { | ||
273 | switch (_NSIG_WORDS) { | ||
274 | case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3]; | ||
275 | case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2]; | ||
276 | case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1]; | ||
277 | case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0]; | ||
278 | } | ||
279 | if (copy_to_user (oset, &s32, sizeof(compat_sigset_t))) | ||
280 | return -EFAULT; | ||
281 | } | ||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set, | ||
286 | compat_size_t sigsetsize) | ||
287 | { | ||
288 | sigset_t s; | ||
289 | compat_sigset_t s32; | ||
290 | int ret; | ||
291 | mm_segment_t old_fs = get_fs(); | ||
292 | |||
293 | set_fs (KERNEL_DS); | ||
294 | ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize); | ||
295 | set_fs (old_fs); | ||
296 | if (!ret) { | ||
297 | switch (_NSIG_WORDS) { | ||
298 | case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3]; | ||
299 | case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2]; | ||
300 | case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1]; | ||
301 | case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0]; | ||
302 | } | ||
303 | if (copy_to_user (set, &s32, sizeof(compat_sigset_t))) | ||
304 | return -EFAULT; | ||
305 | } | ||
306 | return ret; | ||
307 | } | ||
308 | |||
309 | asmlinkage long compat_sys_rt_sigqueueinfo(int pid, int sig, | ||
310 | struct compat_siginfo __user *uinfo) | ||
311 | { | ||
312 | siginfo_t info; | ||
313 | int ret; | ||
314 | mm_segment_t old_fs = get_fs(); | ||
315 | |||
316 | if (copy_siginfo_from_user32(&info, uinfo)) | ||
317 | return -EFAULT; | ||
318 | |||
319 | set_fs (KERNEL_DS); | ||
320 | ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info); | ||
321 | set_fs (old_fs); | ||
322 | return ret; | ||
323 | } | ||
324 | |||
325 | asmlinkage long compat_sys_sigaction(int sig, struct old_sigaction32 __user *act, | ||
326 | struct old_sigaction32 __user *oact) | ||
327 | { | ||
328 | struct k_sigaction new_ka, old_ka; | ||
329 | int ret; | ||
330 | |||
331 | WARN_ON_ONCE(sig >= 0); | ||
332 | sig = -sig; | ||
333 | |||
334 | if (act) { | ||
335 | compat_old_sigset_t mask; | ||
336 | u32 u_handler, u_restorer; | ||
337 | |||
338 | ret = get_user(u_handler, &act->sa_handler); | ||
339 | new_ka.sa.sa_handler = compat_ptr(u_handler); | ||
340 | ret |= __get_user(u_restorer, &act->sa_restorer); | ||
341 | new_ka.sa.sa_restorer = compat_ptr(u_restorer); | ||
342 | ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); | ||
343 | ret |= __get_user(mask, &act->sa_mask); | ||
344 | if (ret) | ||
345 | return ret; | ||
346 | new_ka.ka_restorer = NULL; | ||
347 | siginitset(&new_ka.sa.sa_mask, mask); | ||
348 | } | ||
349 | |||
350 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | ||
351 | |||
352 | if (!ret && oact) { | ||
353 | ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler); | ||
354 | ret |= __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer); | ||
355 | ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | ||
356 | ret |= __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); | ||
357 | } | ||
358 | |||
359 | return ret; | ||
360 | } | ||
361 | |||
362 | asmlinkage long compat_sys_rt_sigaction(int sig, | ||
363 | struct sigaction32 __user *act, | ||
364 | struct sigaction32 __user *oact, | ||
365 | void __user *restorer, | ||
366 | compat_size_t sigsetsize) | ||
367 | { | ||
368 | struct k_sigaction new_ka, old_ka; | ||
369 | int ret; | ||
370 | compat_sigset_t set32; | ||
371 | |||
372 | /* XXX: Don't preclude handling different sized sigset_t's. */ | ||
373 | if (sigsetsize != sizeof(compat_sigset_t)) | ||
374 | return -EINVAL; | ||
375 | |||
376 | if (act) { | ||
377 | u32 u_handler, u_restorer; | ||
378 | |||
379 | new_ka.ka_restorer = restorer; | ||
380 | ret = get_user(u_handler, &act->sa_handler); | ||
381 | new_ka.sa.sa_handler = compat_ptr(u_handler); | ||
382 | ret |= __copy_from_user(&set32, &act->sa_mask, sizeof(compat_sigset_t)); | ||
383 | switch (_NSIG_WORDS) { | ||
384 | case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6] | (((long)set32.sig[7]) << 32); | ||
385 | case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4] | (((long)set32.sig[5]) << 32); | ||
386 | case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2] | (((long)set32.sig[3]) << 32); | ||
387 | case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0] | (((long)set32.sig[1]) << 32); | ||
388 | } | ||
389 | ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); | ||
390 | ret |= __get_user(u_restorer, &act->sa_restorer); | ||
391 | new_ka.sa.sa_restorer = compat_ptr(u_restorer); | ||
392 | if (ret) | ||
393 | return -EFAULT; | ||
394 | } | ||
395 | |||
396 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | ||
397 | |||
398 | if (!ret && oact) { | ||
399 | switch (_NSIG_WORDS) { | ||
400 | case 4: set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32); set32.sig[6] = old_ka.sa.sa_mask.sig[3]; | ||
401 | case 3: set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32); set32.sig[4] = old_ka.sa.sa_mask.sig[2]; | ||
402 | case 2: set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32); set32.sig[2] = old_ka.sa.sa_mask.sig[1]; | ||
403 | case 1: set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32); set32.sig[0] = old_ka.sa.sa_mask.sig[0]; | ||
404 | } | ||
405 | ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler); | ||
406 | ret |= __copy_to_user(&oact->sa_mask, &set32, sizeof(compat_sigset_t)); | ||
407 | ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | ||
408 | ret |= __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer); | ||
409 | if (ret) | ||
410 | ret = -EFAULT; | ||
411 | } | ||
412 | |||
413 | return ret; | ||
414 | } | ||
415 | |||
416 | /* | ||
417 | * sparc32_execve() executes a new program after the asm stub has set | ||
418 | * things up for us. This should basically do what I want it to. | ||
419 | */ | ||
420 | asmlinkage long sparc32_execve(struct pt_regs *regs) | ||
421 | { | ||
422 | int error, base = 0; | ||
423 | char *filename; | ||
424 | |||
425 | /* User register window flush is done by entry.S */ | ||
426 | |||
427 | /* Check for indirect call. */ | ||
428 | if ((u32)regs->u_regs[UREG_G1] == 0) | ||
429 | base = 1; | ||
430 | |||
431 | filename = getname(compat_ptr(regs->u_regs[base + UREG_I0])); | ||
432 | error = PTR_ERR(filename); | ||
433 | if (IS_ERR(filename)) | ||
434 | goto out; | ||
435 | |||
436 | error = compat_do_execve(filename, | ||
437 | compat_ptr(regs->u_regs[base + UREG_I1]), | ||
438 | compat_ptr(regs->u_regs[base + UREG_I2]), regs); | ||
439 | |||
440 | putname(filename); | ||
441 | |||
442 | if (!error) { | ||
443 | fprs_write(0); | ||
444 | current_thread_info()->xfsr[0] = 0; | ||
445 | current_thread_info()->fpsaved[0] = 0; | ||
446 | regs->tstate &= ~TSTATE_PEF; | ||
447 | } | ||
448 | out: | ||
449 | return error; | ||
450 | } | ||
451 | |||
452 | #ifdef CONFIG_MODULES | ||
453 | |||
454 | asmlinkage long sys32_init_module(void __user *umod, u32 len, | ||
455 | const char __user *uargs) | ||
456 | { | ||
457 | return sys_init_module(umod, len, uargs); | ||
458 | } | ||
459 | |||
460 | asmlinkage long sys32_delete_module(const char __user *name_user, | ||
461 | unsigned int flags) | ||
462 | { | ||
463 | return sys_delete_module(name_user, flags); | ||
464 | } | ||
465 | |||
466 | #else /* CONFIG_MODULES */ | ||
467 | |||
468 | asmlinkage long sys32_init_module(const char __user *name_user, | ||
469 | struct module __user *mod_user) | ||
470 | { | ||
471 | return -ENOSYS; | ||
472 | } | ||
473 | |||
474 | asmlinkage long sys32_delete_module(const char __user *name_user) | ||
475 | { | ||
476 | return -ENOSYS; | ||
477 | } | ||
478 | |||
479 | #endif /* CONFIG_MODULES */ | ||
480 | |||
481 | asmlinkage compat_ssize_t sys32_pread64(unsigned int fd, | ||
482 | char __user *ubuf, | ||
483 | compat_size_t count, | ||
484 | unsigned long poshi, | ||
485 | unsigned long poslo) | ||
486 | { | ||
487 | return sys_pread64(fd, ubuf, count, (poshi << 32) | poslo); | ||
488 | } | ||
489 | |||
490 | asmlinkage compat_ssize_t sys32_pwrite64(unsigned int fd, | ||
491 | char __user *ubuf, | ||
492 | compat_size_t count, | ||
493 | unsigned long poshi, | ||
494 | unsigned long poslo) | ||
495 | { | ||
496 | return sys_pwrite64(fd, ubuf, count, (poshi << 32) | poslo); | ||
497 | } | ||
498 | |||
499 | asmlinkage long compat_sys_readahead(int fd, | ||
500 | unsigned long offhi, | ||
501 | unsigned long offlo, | ||
502 | compat_size_t count) | ||
503 | { | ||
504 | return sys_readahead(fd, (offhi << 32) | offlo, count); | ||
505 | } | ||
506 | |||
507 | long compat_sys_fadvise64(int fd, | ||
508 | unsigned long offhi, | ||
509 | unsigned long offlo, | ||
510 | compat_size_t len, int advice) | ||
511 | { | ||
512 | return sys_fadvise64_64(fd, (offhi << 32) | offlo, len, advice); | ||
513 | } | ||
514 | |||
515 | long compat_sys_fadvise64_64(int fd, | ||
516 | unsigned long offhi, unsigned long offlo, | ||
517 | unsigned long lenhi, unsigned long lenlo, | ||
518 | int advice) | ||
519 | { | ||
520 | return sys_fadvise64_64(fd, | ||
521 | (offhi << 32) | offlo, | ||
522 | (lenhi << 32) | lenlo, | ||
523 | advice); | ||
524 | } | ||
525 | |||
526 | asmlinkage long compat_sys_sendfile(int out_fd, int in_fd, | ||
527 | compat_off_t __user *offset, | ||
528 | compat_size_t count) | ||
529 | { | ||
530 | mm_segment_t old_fs = get_fs(); | ||
531 | int ret; | ||
532 | off_t of; | ||
533 | |||
534 | if (offset && get_user(of, offset)) | ||
535 | return -EFAULT; | ||
536 | |||
537 | set_fs(KERNEL_DS); | ||
538 | ret = sys_sendfile(out_fd, in_fd, | ||
539 | offset ? (off_t __user *) &of : NULL, | ||
540 | count); | ||
541 | set_fs(old_fs); | ||
542 | |||
543 | if (offset && put_user(of, offset)) | ||
544 | return -EFAULT; | ||
545 | |||
546 | return ret; | ||
547 | } | ||
548 | |||
549 | asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd, | ||
550 | compat_loff_t __user *offset, | ||
551 | compat_size_t count) | ||
552 | { | ||
553 | mm_segment_t old_fs = get_fs(); | ||
554 | int ret; | ||
555 | loff_t lof; | ||
556 | |||
557 | if (offset && get_user(lof, offset)) | ||
558 | return -EFAULT; | ||
559 | |||
560 | set_fs(KERNEL_DS); | ||
561 | ret = sys_sendfile64(out_fd, in_fd, | ||
562 | offset ? (loff_t __user *) &lof : NULL, | ||
563 | count); | ||
564 | set_fs(old_fs); | ||
565 | |||
566 | if (offset && put_user(lof, offset)) | ||
567 | return -EFAULT; | ||
568 | |||
569 | return ret; | ||
570 | } | ||
571 | |||
572 | /* This is just a version for 32-bit applications which does | ||
573 | * not force O_LARGEFILE on. | ||
574 | */ | ||
575 | |||
576 | asmlinkage long sparc32_open(const char __user *filename, | ||
577 | int flags, int mode) | ||
578 | { | ||
579 | return do_sys_open(AT_FDCWD, filename, flags, mode); | ||
580 | } | ||
581 | |||
582 | extern unsigned long do_mremap(unsigned long addr, | ||
583 | unsigned long old_len, unsigned long new_len, | ||
584 | unsigned long flags, unsigned long new_addr); | ||
585 | |||
586 | asmlinkage unsigned long sys32_mremap(unsigned long addr, | ||
587 | unsigned long old_len, unsigned long new_len, | ||
588 | unsigned long flags, u32 __new_addr) | ||
589 | { | ||
590 | unsigned long ret = -EINVAL; | ||
591 | unsigned long new_addr = __new_addr; | ||
592 | |||
593 | if (unlikely(sparc_mmap_check(addr, old_len))) | ||
594 | goto out; | ||
595 | if (unlikely(sparc_mmap_check(new_addr, new_len))) | ||
596 | goto out; | ||
597 | down_write(¤t->mm->mmap_sem); | ||
598 | ret = do_mremap(addr, old_len, new_len, flags, new_addr); | ||
599 | up_write(¤t->mm->mmap_sem); | ||
600 | out: | ||
601 | return ret; | ||
602 | } | ||
603 | |||
604 | struct __sysctl_args32 { | ||
605 | u32 name; | ||
606 | int nlen; | ||
607 | u32 oldval; | ||
608 | u32 oldlenp; | ||
609 | u32 newval; | ||
610 | u32 newlen; | ||
611 | u32 __unused[4]; | ||
612 | }; | ||
613 | |||
614 | asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args) | ||
615 | { | ||
616 | #ifndef CONFIG_SYSCTL_SYSCALL | ||
617 | return -ENOSYS; | ||
618 | #else | ||
619 | struct __sysctl_args32 tmp; | ||
620 | int error; | ||
621 | size_t oldlen, __user *oldlenp = NULL; | ||
622 | unsigned long addr = (((unsigned long)&args->__unused[0]) + 7UL) & ~7UL; | ||
623 | |||
624 | if (copy_from_user(&tmp, args, sizeof(tmp))) | ||
625 | return -EFAULT; | ||
626 | |||
627 | if (tmp.oldval && tmp.oldlenp) { | ||
628 | /* Duh, this is ugly and might not work if sysctl_args | ||
629 | is in read-only memory, but do_sysctl does indirectly | ||
630 | a lot of uaccess in both directions and we'd have to | ||
631 | basically copy the whole sysctl.c here, and | ||
632 | glibc's __sysctl uses rw memory for the structure | ||
633 | anyway. */ | ||
634 | if (get_user(oldlen, (u32 __user *)(unsigned long)tmp.oldlenp) || | ||
635 | put_user(oldlen, (size_t __user *)addr)) | ||
636 | return -EFAULT; | ||
637 | oldlenp = (size_t __user *)addr; | ||
638 | } | ||
639 | |||
640 | lock_kernel(); | ||
641 | error = do_sysctl((int __user *)(unsigned long) tmp.name, | ||
642 | tmp.nlen, | ||
643 | (void __user *)(unsigned long) tmp.oldval, | ||
644 | oldlenp, | ||
645 | (void __user *)(unsigned long) tmp.newval, | ||
646 | tmp.newlen); | ||
647 | unlock_kernel(); | ||
648 | if (oldlenp) { | ||
649 | if (!error) { | ||
650 | if (get_user(oldlen, (size_t __user *)addr) || | ||
651 | put_user(oldlen, (u32 __user *)(unsigned long) tmp.oldlenp)) | ||
652 | error = -EFAULT; | ||
653 | } | ||
654 | if (copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused))) | ||
655 | error = -EFAULT; | ||
656 | } | ||
657 | return error; | ||
658 | #endif | ||
659 | } | ||
660 | |||
661 | long sys32_lookup_dcookie(unsigned long cookie_high, | ||
662 | unsigned long cookie_low, | ||
663 | char __user *buf, size_t len) | ||
664 | { | ||
665 | return sys_lookup_dcookie((cookie_high << 32) | cookie_low, | ||
666 | buf, len); | ||
667 | } | ||
668 | |||
669 | long compat_sync_file_range(int fd, unsigned long off_high, unsigned long off_low, unsigned long nb_high, unsigned long nb_low, int flags) | ||
670 | { | ||
671 | return sys_sync_file_range(fd, | ||
672 | (off_high << 32) | off_low, | ||
673 | (nb_high << 32) | nb_low, | ||
674 | flags); | ||
675 | } | ||
676 | |||
677 | asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo, | ||
678 | u32 lenhi, u32 lenlo) | ||
679 | { | ||
680 | return sys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo, | ||
681 | ((loff_t)lenhi << 32) | lenlo); | ||
682 | } | ||
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c new file mode 100644 index 000000000000..39749e32dc7e --- /dev/null +++ b/arch/sparc/kernel/sys_sparc_64.c | |||
@@ -0,0 +1,914 @@ | |||
1 | /* linux/arch/sparc64/kernel/sys_sparc.c | ||
2 | * | ||
3 | * This file contains various random system calls that | ||
4 | * have a non-standard calling sequence on the Linux/sparc | ||
5 | * platform. | ||
6 | */ | ||
7 | |||
8 | #include <linux/errno.h> | ||
9 | #include <linux/types.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/fs.h> | ||
12 | #include <linux/file.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/sem.h> | ||
15 | #include <linux/msg.h> | ||
16 | #include <linux/shm.h> | ||
17 | #include <linux/stat.h> | ||
18 | #include <linux/mman.h> | ||
19 | #include <linux/utsname.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/syscalls.h> | ||
23 | #include <linux/ipc.h> | ||
24 | #include <linux/personality.h> | ||
25 | #include <linux/random.h> | ||
26 | |||
27 | #include <asm/uaccess.h> | ||
28 | #include <asm/utrap.h> | ||
29 | #include <asm/perfctr.h> | ||
30 | #include <asm/unistd.h> | ||
31 | |||
32 | #include "entry.h" | ||
33 | #include "systbls.h" | ||
34 | |||
35 | /* #define DEBUG_UNIMP_SYSCALL */ | ||
36 | |||
37 | asmlinkage unsigned long sys_getpagesize(void) | ||
38 | { | ||
39 | return PAGE_SIZE; | ||
40 | } | ||
41 | |||
42 | #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL)) | ||
43 | #define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL)) | ||
44 | |||
45 | /* Does addr --> addr+len fall within 4GB of the VA-space hole or | ||
46 | * overflow past the end of the 64-bit address space? | ||
47 | */ | ||
48 | static inline int invalid_64bit_range(unsigned long addr, unsigned long len) | ||
49 | { | ||
50 | unsigned long va_exclude_start, va_exclude_end; | ||
51 | |||
52 | va_exclude_start = VA_EXCLUDE_START; | ||
53 | va_exclude_end = VA_EXCLUDE_END; | ||
54 | |||
55 | if (unlikely(len >= va_exclude_start)) | ||
56 | return 1; | ||
57 | |||
58 | if (unlikely((addr + len) < addr)) | ||
59 | return 1; | ||
60 | |||
61 | if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) || | ||
62 | ((addr + len) >= va_exclude_start && | ||
63 | (addr + len) < va_exclude_end))) | ||
64 | return 1; | ||
65 | |||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | /* Does start,end straddle the VA-space hole? */ | ||
70 | static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end) | ||
71 | { | ||
72 | unsigned long va_exclude_start, va_exclude_end; | ||
73 | |||
74 | va_exclude_start = VA_EXCLUDE_START; | ||
75 | va_exclude_end = VA_EXCLUDE_END; | ||
76 | |||
77 | if (likely(start < va_exclude_start && end < va_exclude_start)) | ||
78 | return 0; | ||
79 | |||
80 | if (likely(start >= va_exclude_end && end >= va_exclude_end)) | ||
81 | return 0; | ||
82 | |||
83 | return 1; | ||
84 | } | ||
85 | |||
86 | /* These functions differ from the default implementations in | ||
87 | * mm/mmap.c in two ways: | ||
88 | * | ||
89 | * 1) For file backed MAP_SHARED mmap()'s we D-cache color align, | ||
90 | * for fixed such mappings we just validate what the user gave us. | ||
91 | * 2) For 64-bit tasks we avoid mapping anything within 4GB of | ||
92 | * the spitfire/niagara VA-hole. | ||
93 | */ | ||
94 | |||
95 | static inline unsigned long COLOUR_ALIGN(unsigned long addr, | ||
96 | unsigned long pgoff) | ||
97 | { | ||
98 | unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1); | ||
99 | unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1); | ||
100 | |||
101 | return base + off; | ||
102 | } | ||
103 | |||
104 | static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, | ||
105 | unsigned long pgoff) | ||
106 | { | ||
107 | unsigned long base = addr & ~(SHMLBA-1); | ||
108 | unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1); | ||
109 | |||
110 | if (base + off <= addr) | ||
111 | return base + off; | ||
112 | return base - off; | ||
113 | } | ||
114 | |||
115 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) | ||
116 | { | ||
117 | struct mm_struct *mm = current->mm; | ||
118 | struct vm_area_struct * vma; | ||
119 | unsigned long task_size = TASK_SIZE; | ||
120 | unsigned long start_addr; | ||
121 | int do_color_align; | ||
122 | |||
123 | if (flags & MAP_FIXED) { | ||
124 | /* We do not accept a shared mapping if it would violate | ||
125 | * cache aliasing constraints. | ||
126 | */ | ||
127 | if ((flags & MAP_SHARED) && | ||
128 | ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) | ||
129 | return -EINVAL; | ||
130 | return addr; | ||
131 | } | ||
132 | |||
133 | if (test_thread_flag(TIF_32BIT)) | ||
134 | task_size = STACK_TOP32; | ||
135 | if (unlikely(len > task_size || len >= VA_EXCLUDE_START)) | ||
136 | return -ENOMEM; | ||
137 | |||
138 | do_color_align = 0; | ||
139 | if (filp || (flags & MAP_SHARED)) | ||
140 | do_color_align = 1; | ||
141 | |||
142 | if (addr) { | ||
143 | if (do_color_align) | ||
144 | addr = COLOUR_ALIGN(addr, pgoff); | ||
145 | else | ||
146 | addr = PAGE_ALIGN(addr); | ||
147 | |||
148 | vma = find_vma(mm, addr); | ||
149 | if (task_size - len >= addr && | ||
150 | (!vma || addr + len <= vma->vm_start)) | ||
151 | return addr; | ||
152 | } | ||
153 | |||
154 | if (len > mm->cached_hole_size) { | ||
155 | start_addr = addr = mm->free_area_cache; | ||
156 | } else { | ||
157 | start_addr = addr = TASK_UNMAPPED_BASE; | ||
158 | mm->cached_hole_size = 0; | ||
159 | } | ||
160 | |||
161 | task_size -= len; | ||
162 | |||
163 | full_search: | ||
164 | if (do_color_align) | ||
165 | addr = COLOUR_ALIGN(addr, pgoff); | ||
166 | else | ||
167 | addr = PAGE_ALIGN(addr); | ||
168 | |||
169 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | ||
170 | /* At this point: (!vma || addr < vma->vm_end). */ | ||
171 | if (addr < VA_EXCLUDE_START && | ||
172 | (addr + len) >= VA_EXCLUDE_START) { | ||
173 | addr = VA_EXCLUDE_END; | ||
174 | vma = find_vma(mm, VA_EXCLUDE_END); | ||
175 | } | ||
176 | if (unlikely(task_size < addr)) { | ||
177 | if (start_addr != TASK_UNMAPPED_BASE) { | ||
178 | start_addr = addr = TASK_UNMAPPED_BASE; | ||
179 | mm->cached_hole_size = 0; | ||
180 | goto full_search; | ||
181 | } | ||
182 | return -ENOMEM; | ||
183 | } | ||
184 | if (likely(!vma || addr + len <= vma->vm_start)) { | ||
185 | /* | ||
186 | * Remember the place where we stopped the search: | ||
187 | */ | ||
188 | mm->free_area_cache = addr + len; | ||
189 | return addr; | ||
190 | } | ||
191 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
192 | mm->cached_hole_size = vma->vm_start - addr; | ||
193 | |||
194 | addr = vma->vm_end; | ||
195 | if (do_color_align) | ||
196 | addr = COLOUR_ALIGN(addr, pgoff); | ||
197 | } | ||
198 | } | ||
199 | |||
200 | unsigned long | ||
201 | arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | ||
202 | const unsigned long len, const unsigned long pgoff, | ||
203 | const unsigned long flags) | ||
204 | { | ||
205 | struct vm_area_struct *vma; | ||
206 | struct mm_struct *mm = current->mm; | ||
207 | unsigned long task_size = STACK_TOP32; | ||
208 | unsigned long addr = addr0; | ||
209 | int do_color_align; | ||
210 | |||
211 | /* This should only ever run for 32-bit processes. */ | ||
212 | BUG_ON(!test_thread_flag(TIF_32BIT)); | ||
213 | |||
214 | if (flags & MAP_FIXED) { | ||
215 | /* We do not accept a shared mapping if it would violate | ||
216 | * cache aliasing constraints. | ||
217 | */ | ||
218 | if ((flags & MAP_SHARED) && | ||
219 | ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) | ||
220 | return -EINVAL; | ||
221 | return addr; | ||
222 | } | ||
223 | |||
224 | if (unlikely(len > task_size)) | ||
225 | return -ENOMEM; | ||
226 | |||
227 | do_color_align = 0; | ||
228 | if (filp || (flags & MAP_SHARED)) | ||
229 | do_color_align = 1; | ||
230 | |||
231 | /* requesting a specific address */ | ||
232 | if (addr) { | ||
233 | if (do_color_align) | ||
234 | addr = COLOUR_ALIGN(addr, pgoff); | ||
235 | else | ||
236 | addr = PAGE_ALIGN(addr); | ||
237 | |||
238 | vma = find_vma(mm, addr); | ||
239 | if (task_size - len >= addr && | ||
240 | (!vma || addr + len <= vma->vm_start)) | ||
241 | return addr; | ||
242 | } | ||
243 | |||
244 | /* check if free_area_cache is useful for us */ | ||
245 | if (len <= mm->cached_hole_size) { | ||
246 | mm->cached_hole_size = 0; | ||
247 | mm->free_area_cache = mm->mmap_base; | ||
248 | } | ||
249 | |||
250 | /* either no address requested or can't fit in requested address hole */ | ||
251 | addr = mm->free_area_cache; | ||
252 | if (do_color_align) { | ||
253 | unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff); | ||
254 | |||
255 | addr = base + len; | ||
256 | } | ||
257 | |||
258 | /* make sure it can fit in the remaining address space */ | ||
259 | if (likely(addr > len)) { | ||
260 | vma = find_vma(mm, addr-len); | ||
261 | if (!vma || addr <= vma->vm_start) { | ||
262 | /* remember the address as a hint for next time */ | ||
263 | return (mm->free_area_cache = addr-len); | ||
264 | } | ||
265 | } | ||
266 | |||
267 | if (unlikely(mm->mmap_base < len)) | ||
268 | goto bottomup; | ||
269 | |||
270 | addr = mm->mmap_base-len; | ||
271 | if (do_color_align) | ||
272 | addr = COLOUR_ALIGN_DOWN(addr, pgoff); | ||
273 | |||
274 | do { | ||
275 | /* | ||
276 | * Lookup failure means no vma is above this address, | ||
277 | * else if new region fits below vma->vm_start, | ||
278 | * return with success: | ||
279 | */ | ||
280 | vma = find_vma(mm, addr); | ||
281 | if (likely(!vma || addr+len <= vma->vm_start)) { | ||
282 | /* remember the address as a hint for next time */ | ||
283 | return (mm->free_area_cache = addr); | ||
284 | } | ||
285 | |||
286 | /* remember the largest hole we saw so far */ | ||
287 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
288 | mm->cached_hole_size = vma->vm_start - addr; | ||
289 | |||
290 | /* try just below the current vma->vm_start */ | ||
291 | addr = vma->vm_start-len; | ||
292 | if (do_color_align) | ||
293 | addr = COLOUR_ALIGN_DOWN(addr, pgoff); | ||
294 | } while (likely(len < vma->vm_start)); | ||
295 | |||
296 | bottomup: | ||
297 | /* | ||
298 | * A failed mmap() very likely causes application failure, | ||
299 | * so fall back to the bottom-up function here. This scenario | ||
300 | * can happen with large stack limits and large mmap() | ||
301 | * allocations. | ||
302 | */ | ||
303 | mm->cached_hole_size = ~0UL; | ||
304 | mm->free_area_cache = TASK_UNMAPPED_BASE; | ||
305 | addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); | ||
306 | /* | ||
307 | * Restore the topdown base: | ||
308 | */ | ||
309 | mm->free_area_cache = mm->mmap_base; | ||
310 | mm->cached_hole_size = ~0UL; | ||
311 | |||
312 | return addr; | ||
313 | } | ||
314 | |||
315 | /* Try to align mapping such that we align it as much as possible. */ | ||
316 | unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags) | ||
317 | { | ||
318 | unsigned long align_goal, addr = -ENOMEM; | ||
319 | |||
320 | if (flags & MAP_FIXED) { | ||
321 | /* Ok, don't mess with it. */ | ||
322 | return get_unmapped_area(NULL, orig_addr, len, pgoff, flags); | ||
323 | } | ||
324 | flags &= ~MAP_SHARED; | ||
325 | |||
326 | align_goal = PAGE_SIZE; | ||
327 | if (len >= (4UL * 1024 * 1024)) | ||
328 | align_goal = (4UL * 1024 * 1024); | ||
329 | else if (len >= (512UL * 1024)) | ||
330 | align_goal = (512UL * 1024); | ||
331 | else if (len >= (64UL * 1024)) | ||
332 | align_goal = (64UL * 1024); | ||
333 | |||
334 | do { | ||
335 | addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags); | ||
336 | if (!(addr & ~PAGE_MASK)) { | ||
337 | addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL); | ||
338 | break; | ||
339 | } | ||
340 | |||
341 | if (align_goal == (4UL * 1024 * 1024)) | ||
342 | align_goal = (512UL * 1024); | ||
343 | else if (align_goal == (512UL * 1024)) | ||
344 | align_goal = (64UL * 1024); | ||
345 | else | ||
346 | align_goal = PAGE_SIZE; | ||
347 | } while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE); | ||
348 | |||
349 | /* Mapping is smaller than 64K or larger areas could not | ||
350 | * be obtained. | ||
351 | */ | ||
352 | if (addr & ~PAGE_MASK) | ||
353 | addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags); | ||
354 | |||
355 | return addr; | ||
356 | } | ||
357 | |||
358 | /* Essentially the same as PowerPC... */ | ||
359 | void arch_pick_mmap_layout(struct mm_struct *mm) | ||
360 | { | ||
361 | unsigned long random_factor = 0UL; | ||
362 | |||
363 | if (current->flags & PF_RANDOMIZE) { | ||
364 | random_factor = get_random_int(); | ||
365 | if (test_thread_flag(TIF_32BIT)) | ||
366 | random_factor &= ((1 * 1024 * 1024) - 1); | ||
367 | else | ||
368 | random_factor = ((random_factor << PAGE_SHIFT) & | ||
369 | 0xffffffffUL); | ||
370 | } | ||
371 | |||
372 | /* | ||
373 | * Fall back to the standard layout if the personality | ||
374 | * bit is set, or if the expected stack growth is unlimited: | ||
375 | */ | ||
376 | if (!test_thread_flag(TIF_32BIT) || | ||
377 | (current->personality & ADDR_COMPAT_LAYOUT) || | ||
378 | current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY || | ||
379 | sysctl_legacy_va_layout) { | ||
380 | mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; | ||
381 | mm->get_unmapped_area = arch_get_unmapped_area; | ||
382 | mm->unmap_area = arch_unmap_area; | ||
383 | } else { | ||
384 | /* We know it's 32-bit */ | ||
385 | unsigned long task_size = STACK_TOP32; | ||
386 | unsigned long gap; | ||
387 | |||
388 | gap = current->signal->rlim[RLIMIT_STACK].rlim_cur; | ||
389 | if (gap < 128 * 1024 * 1024) | ||
390 | gap = 128 * 1024 * 1024; | ||
391 | if (gap > (task_size / 6 * 5)) | ||
392 | gap = (task_size / 6 * 5); | ||
393 | |||
394 | mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); | ||
395 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; | ||
396 | mm->unmap_area = arch_unmap_area_topdown; | ||
397 | } | ||
398 | } | ||
399 | |||
400 | asmlinkage unsigned long sparc_brk(unsigned long brk) | ||
401 | { | ||
402 | /* People could try to be nasty and use ta 0x6d in 32bit programs */ | ||
403 | if (test_thread_flag(TIF_32BIT) && brk >= STACK_TOP32) | ||
404 | return current->mm->brk; | ||
405 | |||
406 | if (unlikely(straddles_64bit_va_hole(current->mm->brk, brk))) | ||
407 | return current->mm->brk; | ||
408 | |||
409 | return sys_brk(brk); | ||
410 | } | ||
411 | |||
412 | /* | ||
413 | * sys_pipe() is the normal C calling standard for creating | ||
414 | * a pipe. It's not the way unix traditionally does this, though. | ||
415 | */ | ||
416 | asmlinkage long sparc_pipe(struct pt_regs *regs) | ||
417 | { | ||
418 | int fd[2]; | ||
419 | int error; | ||
420 | |||
421 | error = do_pipe_flags(fd, 0); | ||
422 | if (error) | ||
423 | goto out; | ||
424 | regs->u_regs[UREG_I1] = fd[1]; | ||
425 | error = fd[0]; | ||
426 | out: | ||
427 | return error; | ||
428 | } | ||
429 | |||
430 | /* | ||
431 | * sys_ipc() is the de-multiplexer for the SysV IPC calls.. | ||
432 | * | ||
433 | * This is really horribly ugly. | ||
434 | */ | ||
435 | |||
436 | asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second, | ||
437 | unsigned long third, void __user *ptr, long fifth) | ||
438 | { | ||
439 | long err; | ||
440 | |||
441 | /* No need for backward compatibility. We can start fresh... */ | ||
442 | if (call <= SEMCTL) { | ||
443 | switch (call) { | ||
444 | case SEMOP: | ||
445 | err = sys_semtimedop(first, ptr, | ||
446 | (unsigned)second, NULL); | ||
447 | goto out; | ||
448 | case SEMTIMEDOP: | ||
449 | err = sys_semtimedop(first, ptr, (unsigned)second, | ||
450 | (const struct timespec __user *) | ||
451 | (unsigned long) fifth); | ||
452 | goto out; | ||
453 | case SEMGET: | ||
454 | err = sys_semget(first, (int)second, (int)third); | ||
455 | goto out; | ||
456 | case SEMCTL: { | ||
457 | err = sys_semctl(first, second, | ||
458 | (int)third | IPC_64, | ||
459 | (union semun) ptr); | ||
460 | goto out; | ||
461 | } | ||
462 | default: | ||
463 | err = -ENOSYS; | ||
464 | goto out; | ||
465 | }; | ||
466 | } | ||
467 | if (call <= MSGCTL) { | ||
468 | switch (call) { | ||
469 | case MSGSND: | ||
470 | err = sys_msgsnd(first, ptr, (size_t)second, | ||
471 | (int)third); | ||
472 | goto out; | ||
473 | case MSGRCV: | ||
474 | err = sys_msgrcv(first, ptr, (size_t)second, fifth, | ||
475 | (int)third); | ||
476 | goto out; | ||
477 | case MSGGET: | ||
478 | err = sys_msgget((key_t)first, (int)second); | ||
479 | goto out; | ||
480 | case MSGCTL: | ||
481 | err = sys_msgctl(first, (int)second | IPC_64, ptr); | ||
482 | goto out; | ||
483 | default: | ||
484 | err = -ENOSYS; | ||
485 | goto out; | ||
486 | }; | ||
487 | } | ||
488 | if (call <= SHMCTL) { | ||
489 | switch (call) { | ||
490 | case SHMAT: { | ||
491 | ulong raddr; | ||
492 | err = do_shmat(first, ptr, (int)second, &raddr); | ||
493 | if (!err) { | ||
494 | if (put_user(raddr, | ||
495 | (ulong __user *) third)) | ||
496 | err = -EFAULT; | ||
497 | } | ||
498 | goto out; | ||
499 | } | ||
500 | case SHMDT: | ||
501 | err = sys_shmdt(ptr); | ||
502 | goto out; | ||
503 | case SHMGET: | ||
504 | err = sys_shmget(first, (size_t)second, (int)third); | ||
505 | goto out; | ||
506 | case SHMCTL: | ||
507 | err = sys_shmctl(first, (int)second | IPC_64, ptr); | ||
508 | goto out; | ||
509 | default: | ||
510 | err = -ENOSYS; | ||
511 | goto out; | ||
512 | }; | ||
513 | } else { | ||
514 | err = -ENOSYS; | ||
515 | } | ||
516 | out: | ||
517 | return err; | ||
518 | } | ||
519 | |||
520 | asmlinkage long sparc64_newuname(struct new_utsname __user *name) | ||
521 | { | ||
522 | int ret = sys_newuname(name); | ||
523 | |||
524 | if (current->personality == PER_LINUX32 && !ret) { | ||
525 | ret = (copy_to_user(name->machine, "sparc\0\0", 8) | ||
526 | ? -EFAULT : 0); | ||
527 | } | ||
528 | return ret; | ||
529 | } | ||
530 | |||
531 | asmlinkage long sparc64_personality(unsigned long personality) | ||
532 | { | ||
533 | int ret; | ||
534 | |||
535 | if (current->personality == PER_LINUX32 && | ||
536 | personality == PER_LINUX) | ||
537 | personality = PER_LINUX32; | ||
538 | ret = sys_personality(personality); | ||
539 | if (ret == PER_LINUX32) | ||
540 | ret = PER_LINUX; | ||
541 | |||
542 | return ret; | ||
543 | } | ||
544 | |||
545 | int sparc_mmap_check(unsigned long addr, unsigned long len) | ||
546 | { | ||
547 | if (test_thread_flag(TIF_32BIT)) { | ||
548 | if (len >= STACK_TOP32) | ||
549 | return -EINVAL; | ||
550 | |||
551 | if (addr > STACK_TOP32 - len) | ||
552 | return -EINVAL; | ||
553 | } else { | ||
554 | if (len >= VA_EXCLUDE_START) | ||
555 | return -EINVAL; | ||
556 | |||
557 | if (invalid_64bit_range(addr, len)) | ||
558 | return -EINVAL; | ||
559 | } | ||
560 | |||
561 | return 0; | ||
562 | } | ||
563 | |||
564 | /* Linux version of mmap */ | ||
565 | asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, | ||
566 | unsigned long prot, unsigned long flags, unsigned long fd, | ||
567 | unsigned long off) | ||
568 | { | ||
569 | struct file * file = NULL; | ||
570 | unsigned long retval = -EBADF; | ||
571 | |||
572 | if (!(flags & MAP_ANONYMOUS)) { | ||
573 | file = fget(fd); | ||
574 | if (!file) | ||
575 | goto out; | ||
576 | } | ||
577 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
578 | len = PAGE_ALIGN(len); | ||
579 | |||
580 | down_write(¤t->mm->mmap_sem); | ||
581 | retval = do_mmap(file, addr, len, prot, flags, off); | ||
582 | up_write(¤t->mm->mmap_sem); | ||
583 | |||
584 | if (file) | ||
585 | fput(file); | ||
586 | out: | ||
587 | return retval; | ||
588 | } | ||
589 | |||
590 | asmlinkage long sys64_munmap(unsigned long addr, size_t len) | ||
591 | { | ||
592 | long ret; | ||
593 | |||
594 | if (invalid_64bit_range(addr, len)) | ||
595 | return -EINVAL; | ||
596 | |||
597 | down_write(¤t->mm->mmap_sem); | ||
598 | ret = do_munmap(current->mm, addr, len); | ||
599 | up_write(¤t->mm->mmap_sem); | ||
600 | return ret; | ||
601 | } | ||
602 | |||
603 | extern unsigned long do_mremap(unsigned long addr, | ||
604 | unsigned long old_len, unsigned long new_len, | ||
605 | unsigned long flags, unsigned long new_addr); | ||
606 | |||
607 | asmlinkage unsigned long sys64_mremap(unsigned long addr, | ||
608 | unsigned long old_len, unsigned long new_len, | ||
609 | unsigned long flags, unsigned long new_addr) | ||
610 | { | ||
611 | unsigned long ret = -EINVAL; | ||
612 | |||
613 | if (test_thread_flag(TIF_32BIT)) | ||
614 | goto out; | ||
615 | if (unlikely(new_len >= VA_EXCLUDE_START)) | ||
616 | goto out; | ||
617 | if (unlikely(sparc_mmap_check(addr, old_len))) | ||
618 | goto out; | ||
619 | if (unlikely(sparc_mmap_check(new_addr, new_len))) | ||
620 | goto out; | ||
621 | |||
622 | down_write(¤t->mm->mmap_sem); | ||
623 | ret = do_mremap(addr, old_len, new_len, flags, new_addr); | ||
624 | up_write(¤t->mm->mmap_sem); | ||
625 | out: | ||
626 | return ret; | ||
627 | } | ||
628 | |||
629 | /* we come to here via sys_nis_syscall so it can setup the regs argument */ | ||
630 | asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs) | ||
631 | { | ||
632 | static int count; | ||
633 | |||
634 | /* Don't make the system unusable, if someone goes stuck */ | ||
635 | if (count++ > 5) | ||
636 | return -ENOSYS; | ||
637 | |||
638 | printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]); | ||
639 | #ifdef DEBUG_UNIMP_SYSCALL | ||
640 | show_regs (regs); | ||
641 | #endif | ||
642 | |||
643 | return -ENOSYS; | ||
644 | } | ||
645 | |||
646 | /* #define DEBUG_SPARC_BREAKPOINT */ | ||
647 | |||
648 | asmlinkage void sparc_breakpoint(struct pt_regs *regs) | ||
649 | { | ||
650 | siginfo_t info; | ||
651 | |||
652 | if (test_thread_flag(TIF_32BIT)) { | ||
653 | regs->tpc &= 0xffffffff; | ||
654 | regs->tnpc &= 0xffffffff; | ||
655 | } | ||
656 | #ifdef DEBUG_SPARC_BREAKPOINT | ||
657 | printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc); | ||
658 | #endif | ||
659 | info.si_signo = SIGTRAP; | ||
660 | info.si_errno = 0; | ||
661 | info.si_code = TRAP_BRKPT; | ||
662 | info.si_addr = (void __user *)regs->tpc; | ||
663 | info.si_trapno = 0; | ||
664 | force_sig_info(SIGTRAP, &info, current); | ||
665 | #ifdef DEBUG_SPARC_BREAKPOINT | ||
666 | printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc); | ||
667 | #endif | ||
668 | } | ||
669 | |||
670 | extern void check_pending(int signum); | ||
671 | |||
672 | asmlinkage long sys_getdomainname(char __user *name, int len) | ||
673 | { | ||
674 | int nlen, err; | ||
675 | |||
676 | if (len < 0) | ||
677 | return -EINVAL; | ||
678 | |||
679 | down_read(&uts_sem); | ||
680 | |||
681 | nlen = strlen(utsname()->domainname) + 1; | ||
682 | err = -EINVAL; | ||
683 | if (nlen > len) | ||
684 | goto out; | ||
685 | |||
686 | err = -EFAULT; | ||
687 | if (!copy_to_user(name, utsname()->domainname, nlen)) | ||
688 | err = 0; | ||
689 | |||
690 | out: | ||
691 | up_read(&uts_sem); | ||
692 | return err; | ||
693 | } | ||
694 | |||
695 | asmlinkage long sys_utrap_install(utrap_entry_t type, | ||
696 | utrap_handler_t new_p, | ||
697 | utrap_handler_t new_d, | ||
698 | utrap_handler_t __user *old_p, | ||
699 | utrap_handler_t __user *old_d) | ||
700 | { | ||
701 | if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31) | ||
702 | return -EINVAL; | ||
703 | if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) { | ||
704 | if (old_p) { | ||
705 | if (!current_thread_info()->utraps) { | ||
706 | if (put_user(NULL, old_p)) | ||
707 | return -EFAULT; | ||
708 | } else { | ||
709 | if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p)) | ||
710 | return -EFAULT; | ||
711 | } | ||
712 | } | ||
713 | if (old_d) { | ||
714 | if (put_user(NULL, old_d)) | ||
715 | return -EFAULT; | ||
716 | } | ||
717 | return 0; | ||
718 | } | ||
719 | if (!current_thread_info()->utraps) { | ||
720 | current_thread_info()->utraps = | ||
721 | kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL); | ||
722 | if (!current_thread_info()->utraps) | ||
723 | return -ENOMEM; | ||
724 | current_thread_info()->utraps[0] = 1; | ||
725 | } else { | ||
726 | if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p && | ||
727 | current_thread_info()->utraps[0] > 1) { | ||
728 | unsigned long *p = current_thread_info()->utraps; | ||
729 | |||
730 | current_thread_info()->utraps = | ||
731 | kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), | ||
732 | GFP_KERNEL); | ||
733 | if (!current_thread_info()->utraps) { | ||
734 | current_thread_info()->utraps = p; | ||
735 | return -ENOMEM; | ||
736 | } | ||
737 | p[0]--; | ||
738 | current_thread_info()->utraps[0] = 1; | ||
739 | memcpy(current_thread_info()->utraps+1, p+1, | ||
740 | UT_TRAP_INSTRUCTION_31*sizeof(long)); | ||
741 | } | ||
742 | } | ||
743 | if (old_p) { | ||
744 | if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p)) | ||
745 | return -EFAULT; | ||
746 | } | ||
747 | if (old_d) { | ||
748 | if (put_user(NULL, old_d)) | ||
749 | return -EFAULT; | ||
750 | } | ||
751 | current_thread_info()->utraps[type] = (long)new_p; | ||
752 | |||
753 | return 0; | ||
754 | } | ||
755 | |||
756 | asmlinkage long sparc_memory_ordering(unsigned long model, | ||
757 | struct pt_regs *regs) | ||
758 | { | ||
759 | if (model >= 3) | ||
760 | return -EINVAL; | ||
761 | regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14); | ||
762 | return 0; | ||
763 | } | ||
764 | |||
765 | asmlinkage long sys_rt_sigaction(int sig, | ||
766 | const struct sigaction __user *act, | ||
767 | struct sigaction __user *oact, | ||
768 | void __user *restorer, | ||
769 | size_t sigsetsize) | ||
770 | { | ||
771 | struct k_sigaction new_ka, old_ka; | ||
772 | int ret; | ||
773 | |||
774 | /* XXX: Don't preclude handling different sized sigset_t's. */ | ||
775 | if (sigsetsize != sizeof(sigset_t)) | ||
776 | return -EINVAL; | ||
777 | |||
778 | if (act) { | ||
779 | new_ka.ka_restorer = restorer; | ||
780 | if (copy_from_user(&new_ka.sa, act, sizeof(*act))) | ||
781 | return -EFAULT; | ||
782 | } | ||
783 | |||
784 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | ||
785 | |||
786 | if (!ret && oact) { | ||
787 | if (copy_to_user(oact, &old_ka.sa, sizeof(*oact))) | ||
788 | return -EFAULT; | ||
789 | } | ||
790 | |||
791 | return ret; | ||
792 | } | ||
793 | |||
794 | /* Invoked by rtrap code to update performance counters in | ||
795 | * user space. | ||
796 | */ | ||
797 | asmlinkage void update_perfctrs(void) | ||
798 | { | ||
799 | unsigned long pic, tmp; | ||
800 | |||
801 | read_pic(pic); | ||
802 | tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic); | ||
803 | __put_user(tmp, current_thread_info()->user_cntd0); | ||
804 | tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32)); | ||
805 | __put_user(tmp, current_thread_info()->user_cntd1); | ||
806 | reset_pic(); | ||
807 | } | ||
808 | |||
809 | asmlinkage long sys_perfctr(int opcode, unsigned long arg0, unsigned long arg1, unsigned long arg2) | ||
810 | { | ||
811 | int err = 0; | ||
812 | |||
813 | switch(opcode) { | ||
814 | case PERFCTR_ON: | ||
815 | current_thread_info()->pcr_reg = arg2; | ||
816 | current_thread_info()->user_cntd0 = (u64 __user *) arg0; | ||
817 | current_thread_info()->user_cntd1 = (u64 __user *) arg1; | ||
818 | current_thread_info()->kernel_cntd0 = | ||
819 | current_thread_info()->kernel_cntd1 = 0; | ||
820 | write_pcr(arg2); | ||
821 | reset_pic(); | ||
822 | set_thread_flag(TIF_PERFCTR); | ||
823 | break; | ||
824 | |||
825 | case PERFCTR_OFF: | ||
826 | err = -EINVAL; | ||
827 | if (test_thread_flag(TIF_PERFCTR)) { | ||
828 | current_thread_info()->user_cntd0 = | ||
829 | current_thread_info()->user_cntd1 = NULL; | ||
830 | current_thread_info()->pcr_reg = 0; | ||
831 | write_pcr(0); | ||
832 | clear_thread_flag(TIF_PERFCTR); | ||
833 | err = 0; | ||
834 | } | ||
835 | break; | ||
836 | |||
837 | case PERFCTR_READ: { | ||
838 | unsigned long pic, tmp; | ||
839 | |||
840 | if (!test_thread_flag(TIF_PERFCTR)) { | ||
841 | err = -EINVAL; | ||
842 | break; | ||
843 | } | ||
844 | read_pic(pic); | ||
845 | tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic); | ||
846 | err |= __put_user(tmp, current_thread_info()->user_cntd0); | ||
847 | tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32)); | ||
848 | err |= __put_user(tmp, current_thread_info()->user_cntd1); | ||
849 | reset_pic(); | ||
850 | break; | ||
851 | } | ||
852 | |||
853 | case PERFCTR_CLRPIC: | ||
854 | if (!test_thread_flag(TIF_PERFCTR)) { | ||
855 | err = -EINVAL; | ||
856 | break; | ||
857 | } | ||
858 | current_thread_info()->kernel_cntd0 = | ||
859 | current_thread_info()->kernel_cntd1 = 0; | ||
860 | reset_pic(); | ||
861 | break; | ||
862 | |||
863 | case PERFCTR_SETPCR: { | ||
864 | u64 __user *user_pcr = (u64 __user *)arg0; | ||
865 | |||
866 | if (!test_thread_flag(TIF_PERFCTR)) { | ||
867 | err = -EINVAL; | ||
868 | break; | ||
869 | } | ||
870 | err |= __get_user(current_thread_info()->pcr_reg, user_pcr); | ||
871 | write_pcr(current_thread_info()->pcr_reg); | ||
872 | current_thread_info()->kernel_cntd0 = | ||
873 | current_thread_info()->kernel_cntd1 = 0; | ||
874 | reset_pic(); | ||
875 | break; | ||
876 | } | ||
877 | |||
878 | case PERFCTR_GETPCR: { | ||
879 | u64 __user *user_pcr = (u64 __user *)arg0; | ||
880 | |||
881 | if (!test_thread_flag(TIF_PERFCTR)) { | ||
882 | err = -EINVAL; | ||
883 | break; | ||
884 | } | ||
885 | err |= __put_user(current_thread_info()->pcr_reg, user_pcr); | ||
886 | break; | ||
887 | } | ||
888 | |||
889 | default: | ||
890 | err = -EINVAL; | ||
891 | break; | ||
892 | }; | ||
893 | return err; | ||
894 | } | ||
895 | |||
896 | /* | ||
897 | * Do a system call from kernel instead of calling sys_execve so we | ||
898 | * end up with proper pt_regs. | ||
899 | */ | ||
900 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]) | ||
901 | { | ||
902 | long __res; | ||
903 | register long __g1 __asm__ ("g1") = __NR_execve; | ||
904 | register long __o0 __asm__ ("o0") = (long)(filename); | ||
905 | register long __o1 __asm__ ("o1") = (long)(argv); | ||
906 | register long __o2 __asm__ ("o2") = (long)(envp); | ||
907 | asm volatile ("t 0x6d\n\t" | ||
908 | "sub %%g0, %%o0, %0\n\t" | ||
909 | "movcc %%xcc, %%o0, %0\n\t" | ||
910 | : "=r" (__res), "=&r" (__o0) | ||
911 | : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1) | ||
912 | : "cc"); | ||
913 | return __res; | ||
914 | } | ||
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S new file mode 100644 index 000000000000..7a6786a71363 --- /dev/null +++ b/arch/sparc/kernel/syscalls.S | |||
@@ -0,0 +1,279 @@ | |||
1 | /* SunOS's execv() call only specifies the argv argument, the | ||
2 | * environment settings are the same as the calling processes. | ||
3 | */ | ||
4 | sys_execve: | ||
5 | sethi %hi(sparc_execve), %g1 | ||
6 | ba,pt %xcc, execve_merge | ||
7 | or %g1, %lo(sparc_execve), %g1 | ||
8 | |||
9 | #ifdef CONFIG_COMPAT | ||
10 | sunos_execv: | ||
11 | stx %g0, [%sp + PTREGS_OFF + PT_V9_I2] | ||
12 | sys32_execve: | ||
13 | sethi %hi(sparc32_execve), %g1 | ||
14 | or %g1, %lo(sparc32_execve), %g1 | ||
15 | #endif | ||
16 | |||
17 | execve_merge: | ||
18 | flushw | ||
19 | jmpl %g1, %g0 | ||
20 | add %sp, PTREGS_OFF, %o0 | ||
21 | |||
22 | .align 32 | ||
23 | sys_pipe: | ||
24 | ba,pt %xcc, sparc_pipe | ||
25 | add %sp, PTREGS_OFF, %o0 | ||
26 | sys_nis_syscall: | ||
27 | ba,pt %xcc, c_sys_nis_syscall | ||
28 | add %sp, PTREGS_OFF, %o0 | ||
29 | sys_memory_ordering: | ||
30 | ba,pt %xcc, sparc_memory_ordering | ||
31 | add %sp, PTREGS_OFF, %o1 | ||
32 | sys_sigaltstack: | ||
33 | ba,pt %xcc, do_sigaltstack | ||
34 | add %i6, STACK_BIAS, %o2 | ||
35 | #ifdef CONFIG_COMPAT | ||
36 | sys32_sigstack: | ||
37 | ba,pt %xcc, do_sys32_sigstack | ||
38 | mov %i6, %o2 | ||
39 | sys32_sigaltstack: | ||
40 | ba,pt %xcc, do_sys32_sigaltstack | ||
41 | mov %i6, %o2 | ||
42 | #endif | ||
43 | .align 32 | ||
44 | #ifdef CONFIG_COMPAT | ||
45 | sys32_sigreturn: | ||
46 | add %sp, PTREGS_OFF, %o0 | ||
47 | call do_sigreturn32 | ||
48 | add %o7, 1f-.-4, %o7 | ||
49 | nop | ||
50 | #endif | ||
51 | sys_rt_sigreturn: | ||
52 | add %sp, PTREGS_OFF, %o0 | ||
53 | call do_rt_sigreturn | ||
54 | add %o7, 1f-.-4, %o7 | ||
55 | nop | ||
56 | #ifdef CONFIG_COMPAT | ||
57 | sys32_rt_sigreturn: | ||
58 | add %sp, PTREGS_OFF, %o0 | ||
59 | call do_rt_sigreturn32 | ||
60 | add %o7, 1f-.-4, %o7 | ||
61 | nop | ||
62 | #endif | ||
63 | .align 32 | ||
64 | 1: ldx [%g6 + TI_FLAGS], %l5 | ||
65 | andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0 | ||
66 | be,pt %icc, rtrap | ||
67 | nop | ||
68 | call syscall_trace_leave | ||
69 | add %sp, PTREGS_OFF, %o0 | ||
70 | ba,pt %xcc, rtrap | ||
71 | nop | ||
72 | |||
73 | /* This is how fork() was meant to be done, 8 instruction entry. | ||
74 | * | ||
75 | * I questioned the following code briefly, let me clear things | ||
76 | * up so you must not reason on it like I did. | ||
77 | * | ||
78 | * Know the fork_kpsr etc. we use in the sparc32 port? We don't | ||
79 | * need it here because the only piece of window state we copy to | ||
80 | * the child is the CWP register. Even if the parent sleeps, | ||
81 | * we are safe because we stuck it into pt_regs of the parent | ||
82 | * so it will not change. | ||
83 | * | ||
84 | * XXX This raises the question, whether we can do the same on | ||
85 | * XXX sparc32 to get rid of fork_kpsr _and_ fork_kwim. The | ||
86 | * XXX answer is yes. We stick fork_kpsr in UREG_G0 and | ||
87 | * XXX fork_kwim in UREG_G1 (global registers are considered | ||
88 | * XXX volatile across a system call in the sparc ABI I think | ||
89 | * XXX if it isn't we can use regs->y instead, anyone who depends | ||
90 | * XXX upon the Y register being preserved across a fork deserves | ||
91 | * XXX to lose). | ||
92 | * | ||
93 | * In fact we should take advantage of that fact for other things | ||
94 | * during system calls... | ||
95 | */ | ||
96 | .align 32 | ||
97 | sys_vfork: /* Under Linux, vfork and fork are just special cases of clone. */ | ||
98 | sethi %hi(0x4000 | 0x0100 | SIGCHLD), %o0 | ||
99 | or %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0 | ||
100 | ba,pt %xcc, sys_clone | ||
101 | sys_fork: | ||
102 | clr %o1 | ||
103 | mov SIGCHLD, %o0 | ||
104 | sys_clone: | ||
105 | flushw | ||
106 | movrz %o1, %fp, %o1 | ||
107 | mov 0, %o3 | ||
108 | ba,pt %xcc, sparc_do_fork | ||
109 | add %sp, PTREGS_OFF, %o2 | ||
110 | |||
111 | .globl ret_from_syscall | ||
112 | ret_from_syscall: | ||
113 | /* Clear current_thread_info()->new_child, and | ||
114 | * check performance counter stuff too. | ||
115 | */ | ||
116 | stb %g0, [%g6 + TI_NEW_CHILD] | ||
117 | ldx [%g6 + TI_FLAGS], %l0 | ||
118 | call schedule_tail | ||
119 | mov %g7, %o0 | ||
120 | andcc %l0, _TIF_PERFCTR, %g0 | ||
121 | be,pt %icc, 1f | ||
122 | nop | ||
123 | ldx [%g6 + TI_PCR], %o7 | ||
124 | wr %g0, %o7, %pcr | ||
125 | |||
126 | /* Blackbird errata workaround. See commentary in | ||
127 | * smp.c:smp_percpu_timer_interrupt() for more | ||
128 | * information. | ||
129 | */ | ||
130 | ba,pt %xcc, 99f | ||
131 | nop | ||
132 | |||
133 | .align 64 | ||
134 | 99: wr %g0, %g0, %pic | ||
135 | rd %pic, %g0 | ||
136 | |||
137 | 1: ba,pt %xcc, ret_sys_call | ||
138 | ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0 | ||
139 | |||
140 | .globl sparc_exit | ||
141 | .type sparc_exit,#function | ||
142 | sparc_exit: | ||
143 | rdpr %pstate, %g2 | ||
144 | wrpr %g2, PSTATE_IE, %pstate | ||
145 | rdpr %otherwin, %g1 | ||
146 | rdpr %cansave, %g3 | ||
147 | add %g3, %g1, %g3 | ||
148 | wrpr %g3, 0x0, %cansave | ||
149 | wrpr %g0, 0x0, %otherwin | ||
150 | wrpr %g2, 0x0, %pstate | ||
151 | ba,pt %xcc, sys_exit | ||
152 | stb %g0, [%g6 + TI_WSAVED] | ||
153 | .size sparc_exit,.-sparc_exit | ||
154 | |||
155 | linux_sparc_ni_syscall: | ||
156 | sethi %hi(sys_ni_syscall), %l7 | ||
157 | ba,pt %xcc, 4f | ||
158 | or %l7, %lo(sys_ni_syscall), %l7 | ||
159 | |||
160 | linux_syscall_trace32: | ||
161 | call syscall_trace_enter | ||
162 | add %sp, PTREGS_OFF, %o0 | ||
163 | brnz,pn %o0, 3f | ||
164 | mov -ENOSYS, %o0 | ||
165 | srl %i0, 0, %o0 | ||
166 | srl %i4, 0, %o4 | ||
167 | srl %i1, 0, %o1 | ||
168 | srl %i2, 0, %o2 | ||
169 | ba,pt %xcc, 2f | ||
170 | srl %i3, 0, %o3 | ||
171 | |||
172 | linux_syscall_trace: | ||
173 | call syscall_trace_enter | ||
174 | add %sp, PTREGS_OFF, %o0 | ||
175 | brnz,pn %o0, 3f | ||
176 | mov -ENOSYS, %o0 | ||
177 | mov %i0, %o0 | ||
178 | mov %i1, %o1 | ||
179 | mov %i2, %o2 | ||
180 | mov %i3, %o3 | ||
181 | b,pt %xcc, 2f | ||
182 | mov %i4, %o4 | ||
183 | |||
184 | |||
185 | /* Linux 32-bit system calls enter here... */ | ||
186 | .align 32 | ||
187 | .globl linux_sparc_syscall32 | ||
188 | linux_sparc_syscall32: | ||
189 | /* Direct access to user regs, much faster. */ | ||
190 | cmp %g1, NR_SYSCALLS ! IEU1 Group | ||
191 | bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI | ||
192 | srl %i0, 0, %o0 ! IEU0 | ||
193 | sll %g1, 2, %l4 ! IEU0 Group | ||
194 | srl %i4, 0, %o4 ! IEU1 | ||
195 | lduw [%l7 + %l4], %l7 ! Load | ||
196 | srl %i1, 0, %o1 ! IEU0 Group | ||
197 | ldx [%g6 + TI_FLAGS], %l0 ! Load | ||
198 | |||
199 | srl %i5, 0, %o5 ! IEU1 | ||
200 | srl %i2, 0, %o2 ! IEU0 Group | ||
201 | andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0 | ||
202 | bne,pn %icc, linux_syscall_trace32 ! CTI | ||
203 | mov %i0, %l5 ! IEU1 | ||
204 | call %l7 ! CTI Group brk forced | ||
205 | srl %i3, 0, %o3 ! IEU0 | ||
206 | ba,a,pt %xcc, 3f | ||
207 | |||
208 | /* Linux native system calls enter here... */ | ||
209 | .align 32 | ||
210 | .globl linux_sparc_syscall | ||
211 | linux_sparc_syscall: | ||
212 | /* Direct access to user regs, much faster. */ | ||
213 | cmp %g1, NR_SYSCALLS ! IEU1 Group | ||
214 | bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI | ||
215 | mov %i0, %o0 ! IEU0 | ||
216 | sll %g1, 2, %l4 ! IEU0 Group | ||
217 | mov %i1, %o1 ! IEU1 | ||
218 | lduw [%l7 + %l4], %l7 ! Load | ||
219 | 4: mov %i2, %o2 ! IEU0 Group | ||
220 | ldx [%g6 + TI_FLAGS], %l0 ! Load | ||
221 | |||
222 | mov %i3, %o3 ! IEU1 | ||
223 | mov %i4, %o4 ! IEU0 Group | ||
224 | andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0 | ||
225 | bne,pn %icc, linux_syscall_trace ! CTI Group | ||
226 | mov %i0, %l5 ! IEU0 | ||
227 | 2: call %l7 ! CTI Group brk forced | ||
228 | mov %i5, %o5 ! IEU0 | ||
229 | nop | ||
230 | |||
231 | 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] | ||
232 | ret_sys_call: | ||
233 | ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3 | ||
234 | ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc | ||
235 | sra %o0, 0, %o0 | ||
236 | mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2 | ||
237 | sllx %g2, 32, %g2 | ||
238 | |||
239 | /* Check if force_successful_syscall_return() | ||
240 | * was invoked. | ||
241 | */ | ||
242 | ldub [%g6 + TI_SYS_NOERROR], %l2 | ||
243 | brnz,a,pn %l2, 80f | ||
244 | stb %g0, [%g6 + TI_SYS_NOERROR] | ||
245 | |||
246 | cmp %o0, -ERESTART_RESTARTBLOCK | ||
247 | bgeu,pn %xcc, 1f | ||
248 | andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6 | ||
249 | 80: | ||
250 | /* System call success, clear Carry condition code. */ | ||
251 | andn %g3, %g2, %g3 | ||
252 | stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE] | ||
253 | bne,pn %icc, linux_syscall_trace2 | ||
254 | add %l1, 0x4, %l2 ! npc = npc+4 | ||
255 | stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC] | ||
256 | ba,pt %xcc, rtrap | ||
257 | stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] | ||
258 | |||
259 | 1: | ||
260 | /* System call failure, set Carry condition code. | ||
261 | * Also, get abs(errno) to return to the process. | ||
262 | */ | ||
263 | andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6 | ||
264 | sub %g0, %o0, %o0 | ||
265 | or %g3, %g2, %g3 | ||
266 | stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] | ||
267 | stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE] | ||
268 | bne,pn %icc, linux_syscall_trace2 | ||
269 | add %l1, 0x4, %l2 ! npc = npc+4 | ||
270 | stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC] | ||
271 | |||
272 | b,pt %xcc, rtrap | ||
273 | stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] | ||
274 | linux_syscall_trace2: | ||
275 | call syscall_trace_leave | ||
276 | add %sp, PTREGS_OFF, %o0 | ||
277 | stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC] | ||
278 | ba,pt %xcc, rtrap | ||
279 | stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] | ||
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c new file mode 100644 index 000000000000..84e5ce146713 --- /dev/null +++ b/arch/sparc/kernel/sysfs.c | |||
@@ -0,0 +1,313 @@ | |||
1 | /* sysfs.c: Toplogy sysfs support code for sparc64. | ||
2 | * | ||
3 | * Copyright (C) 2007 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | #include <linux/sysdev.h> | ||
6 | #include <linux/cpu.h> | ||
7 | #include <linux/smp.h> | ||
8 | #include <linux/percpu.h> | ||
9 | #include <linux/init.h> | ||
10 | |||
11 | #include <asm/hypervisor.h> | ||
12 | #include <asm/spitfire.h> | ||
13 | |||
14 | static DEFINE_PER_CPU(struct hv_mmu_statistics, mmu_stats) __attribute__((aligned(64))); | ||
15 | |||
16 | #define SHOW_MMUSTAT_ULONG(NAME) \ | ||
17 | static ssize_t show_##NAME(struct sys_device *dev, \ | ||
18 | struct sysdev_attribute *attr, char *buf) \ | ||
19 | { \ | ||
20 | struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \ | ||
21 | return sprintf(buf, "%lu\n", p->NAME); \ | ||
22 | } \ | ||
23 | static SYSDEV_ATTR(NAME, 0444, show_##NAME, NULL) | ||
24 | |||
25 | SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_8k_tte); | ||
26 | SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_8k_tte); | ||
27 | SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_64k_tte); | ||
28 | SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_64k_tte); | ||
29 | SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_4mb_tte); | ||
30 | SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_4mb_tte); | ||
31 | SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_256mb_tte); | ||
32 | SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_256mb_tte); | ||
33 | SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_8k_tte); | ||
34 | SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_8k_tte); | ||
35 | SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_64k_tte); | ||
36 | SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_64k_tte); | ||
37 | SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_4mb_tte); | ||
38 | SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_4mb_tte); | ||
39 | SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_256mb_tte); | ||
40 | SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_256mb_tte); | ||
41 | SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_8k_tte); | ||
42 | SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_8k_tte); | ||
43 | SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_64k_tte); | ||
44 | SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_64k_tte); | ||
45 | SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_4mb_tte); | ||
46 | SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_4mb_tte); | ||
47 | SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_256mb_tte); | ||
48 | SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_256mb_tte); | ||
49 | SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_8k_tte); | ||
50 | SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_8k_tte); | ||
51 | SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_64k_tte); | ||
52 | SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_64k_tte); | ||
53 | SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_4mb_tte); | ||
54 | SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_4mb_tte); | ||
55 | SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_256mb_tte); | ||
56 | SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_256mb_tte); | ||
57 | |||
58 | static struct attribute *mmu_stat_attrs[] = { | ||
59 | &attr_immu_tsb_hits_ctx0_8k_tte.attr, | ||
60 | &attr_immu_tsb_ticks_ctx0_8k_tte.attr, | ||
61 | &attr_immu_tsb_hits_ctx0_64k_tte.attr, | ||
62 | &attr_immu_tsb_ticks_ctx0_64k_tte.attr, | ||
63 | &attr_immu_tsb_hits_ctx0_4mb_tte.attr, | ||
64 | &attr_immu_tsb_ticks_ctx0_4mb_tte.attr, | ||
65 | &attr_immu_tsb_hits_ctx0_256mb_tte.attr, | ||
66 | &attr_immu_tsb_ticks_ctx0_256mb_tte.attr, | ||
67 | &attr_immu_tsb_hits_ctxnon0_8k_tte.attr, | ||
68 | &attr_immu_tsb_ticks_ctxnon0_8k_tte.attr, | ||
69 | &attr_immu_tsb_hits_ctxnon0_64k_tte.attr, | ||
70 | &attr_immu_tsb_ticks_ctxnon0_64k_tte.attr, | ||
71 | &attr_immu_tsb_hits_ctxnon0_4mb_tte.attr, | ||
72 | &attr_immu_tsb_ticks_ctxnon0_4mb_tte.attr, | ||
73 | &attr_immu_tsb_hits_ctxnon0_256mb_tte.attr, | ||
74 | &attr_immu_tsb_ticks_ctxnon0_256mb_tte.attr, | ||
75 | &attr_dmmu_tsb_hits_ctx0_8k_tte.attr, | ||
76 | &attr_dmmu_tsb_ticks_ctx0_8k_tte.attr, | ||
77 | &attr_dmmu_tsb_hits_ctx0_64k_tte.attr, | ||
78 | &attr_dmmu_tsb_ticks_ctx0_64k_tte.attr, | ||
79 | &attr_dmmu_tsb_hits_ctx0_4mb_tte.attr, | ||
80 | &attr_dmmu_tsb_ticks_ctx0_4mb_tte.attr, | ||
81 | &attr_dmmu_tsb_hits_ctx0_256mb_tte.attr, | ||
82 | &attr_dmmu_tsb_ticks_ctx0_256mb_tte.attr, | ||
83 | &attr_dmmu_tsb_hits_ctxnon0_8k_tte.attr, | ||
84 | &attr_dmmu_tsb_ticks_ctxnon0_8k_tte.attr, | ||
85 | &attr_dmmu_tsb_hits_ctxnon0_64k_tte.attr, | ||
86 | &attr_dmmu_tsb_ticks_ctxnon0_64k_tte.attr, | ||
87 | &attr_dmmu_tsb_hits_ctxnon0_4mb_tte.attr, | ||
88 | &attr_dmmu_tsb_ticks_ctxnon0_4mb_tte.attr, | ||
89 | &attr_dmmu_tsb_hits_ctxnon0_256mb_tte.attr, | ||
90 | &attr_dmmu_tsb_ticks_ctxnon0_256mb_tte.attr, | ||
91 | NULL, | ||
92 | }; | ||
93 | |||
94 | static struct attribute_group mmu_stat_group = { | ||
95 | .attrs = mmu_stat_attrs, | ||
96 | .name = "mmu_stats", | ||
97 | }; | ||
98 | |||
99 | /* XXX convert to rusty's on_one_cpu */ | ||
100 | static unsigned long run_on_cpu(unsigned long cpu, | ||
101 | unsigned long (*func)(unsigned long), | ||
102 | unsigned long arg) | ||
103 | { | ||
104 | cpumask_t old_affinity = current->cpus_allowed; | ||
105 | unsigned long ret; | ||
106 | |||
107 | /* should return -EINVAL to userspace */ | ||
108 | if (set_cpus_allowed(current, cpumask_of_cpu(cpu))) | ||
109 | return 0; | ||
110 | |||
111 | ret = func(arg); | ||
112 | |||
113 | set_cpus_allowed(current, old_affinity); | ||
114 | |||
115 | return ret; | ||
116 | } | ||
117 | |||
118 | static unsigned long read_mmustat_enable(unsigned long junk) | ||
119 | { | ||
120 | unsigned long ra = 0; | ||
121 | |||
122 | sun4v_mmustat_info(&ra); | ||
123 | |||
124 | return ra != 0; | ||
125 | } | ||
126 | |||
127 | static unsigned long write_mmustat_enable(unsigned long val) | ||
128 | { | ||
129 | unsigned long ra, orig_ra; | ||
130 | |||
131 | if (val) | ||
132 | ra = __pa(&per_cpu(mmu_stats, smp_processor_id())); | ||
133 | else | ||
134 | ra = 0UL; | ||
135 | |||
136 | return sun4v_mmustat_conf(ra, &orig_ra); | ||
137 | } | ||
138 | |||
139 | static ssize_t show_mmustat_enable(struct sys_device *s, | ||
140 | struct sysdev_attribute *attr, char *buf) | ||
141 | { | ||
142 | unsigned long val = run_on_cpu(s->id, read_mmustat_enable, 0); | ||
143 | return sprintf(buf, "%lx\n", val); | ||
144 | } | ||
145 | |||
146 | static ssize_t store_mmustat_enable(struct sys_device *s, | ||
147 | struct sysdev_attribute *attr, const char *buf, | ||
148 | size_t count) | ||
149 | { | ||
150 | unsigned long val, err; | ||
151 | int ret = sscanf(buf, "%ld", &val); | ||
152 | |||
153 | if (ret != 1) | ||
154 | return -EINVAL; | ||
155 | |||
156 | err = run_on_cpu(s->id, write_mmustat_enable, val); | ||
157 | if (err) | ||
158 | return -EIO; | ||
159 | |||
160 | return count; | ||
161 | } | ||
162 | |||
163 | static SYSDEV_ATTR(mmustat_enable, 0644, show_mmustat_enable, store_mmustat_enable); | ||
164 | |||
165 | static int mmu_stats_supported; | ||
166 | |||
167 | static int register_mmu_stats(struct sys_device *s) | ||
168 | { | ||
169 | if (!mmu_stats_supported) | ||
170 | return 0; | ||
171 | sysdev_create_file(s, &attr_mmustat_enable); | ||
172 | return sysfs_create_group(&s->kobj, &mmu_stat_group); | ||
173 | } | ||
174 | |||
175 | #ifdef CONFIG_HOTPLUG_CPU | ||
176 | static void unregister_mmu_stats(struct sys_device *s) | ||
177 | { | ||
178 | if (!mmu_stats_supported) | ||
179 | return; | ||
180 | sysfs_remove_group(&s->kobj, &mmu_stat_group); | ||
181 | sysdev_remove_file(s, &attr_mmustat_enable); | ||
182 | } | ||
183 | #endif | ||
184 | |||
185 | #define SHOW_CPUDATA_ULONG_NAME(NAME, MEMBER) \ | ||
186 | static ssize_t show_##NAME(struct sys_device *dev, \ | ||
187 | struct sysdev_attribute *attr, char *buf) \ | ||
188 | { \ | ||
189 | cpuinfo_sparc *c = &cpu_data(dev->id); \ | ||
190 | return sprintf(buf, "%lu\n", c->MEMBER); \ | ||
191 | } | ||
192 | |||
193 | #define SHOW_CPUDATA_UINT_NAME(NAME, MEMBER) \ | ||
194 | static ssize_t show_##NAME(struct sys_device *dev, \ | ||
195 | struct sysdev_attribute *attr, char *buf) \ | ||
196 | { \ | ||
197 | cpuinfo_sparc *c = &cpu_data(dev->id); \ | ||
198 | return sprintf(buf, "%u\n", c->MEMBER); \ | ||
199 | } | ||
200 | |||
201 | SHOW_CPUDATA_ULONG_NAME(clock_tick, clock_tick); | ||
202 | SHOW_CPUDATA_UINT_NAME(l1_dcache_size, dcache_size); | ||
203 | SHOW_CPUDATA_UINT_NAME(l1_dcache_line_size, dcache_line_size); | ||
204 | SHOW_CPUDATA_UINT_NAME(l1_icache_size, icache_size); | ||
205 | SHOW_CPUDATA_UINT_NAME(l1_icache_line_size, icache_line_size); | ||
206 | SHOW_CPUDATA_UINT_NAME(l2_cache_size, ecache_size); | ||
207 | SHOW_CPUDATA_UINT_NAME(l2_cache_line_size, ecache_line_size); | ||
208 | |||
209 | static struct sysdev_attribute cpu_core_attrs[] = { | ||
210 | _SYSDEV_ATTR(clock_tick, 0444, show_clock_tick, NULL), | ||
211 | _SYSDEV_ATTR(l1_dcache_size, 0444, show_l1_dcache_size, NULL), | ||
212 | _SYSDEV_ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL), | ||
213 | _SYSDEV_ATTR(l1_icache_size, 0444, show_l1_icache_size, NULL), | ||
214 | _SYSDEV_ATTR(l1_icache_line_size, 0444, show_l1_icache_line_size, NULL), | ||
215 | _SYSDEV_ATTR(l2_cache_size, 0444, show_l2_cache_size, NULL), | ||
216 | _SYSDEV_ATTR(l2_cache_line_size, 0444, show_l2_cache_line_size, NULL), | ||
217 | }; | ||
218 | |||
219 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | ||
220 | |||
221 | static void register_cpu_online(unsigned int cpu) | ||
222 | { | ||
223 | struct cpu *c = &per_cpu(cpu_devices, cpu); | ||
224 | struct sys_device *s = &c->sysdev; | ||
225 | int i; | ||
226 | |||
227 | for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++) | ||
228 | sysdev_create_file(s, &cpu_core_attrs[i]); | ||
229 | |||
230 | register_mmu_stats(s); | ||
231 | } | ||
232 | |||
233 | #ifdef CONFIG_HOTPLUG_CPU | ||
234 | static void unregister_cpu_online(unsigned int cpu) | ||
235 | { | ||
236 | struct cpu *c = &per_cpu(cpu_devices, cpu); | ||
237 | struct sys_device *s = &c->sysdev; | ||
238 | int i; | ||
239 | |||
240 | unregister_mmu_stats(s); | ||
241 | for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++) | ||
242 | sysdev_remove_file(s, &cpu_core_attrs[i]); | ||
243 | } | ||
244 | #endif | ||
245 | |||
246 | static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, | ||
247 | unsigned long action, void *hcpu) | ||
248 | { | ||
249 | unsigned int cpu = (unsigned int)(long)hcpu; | ||
250 | |||
251 | switch (action) { | ||
252 | case CPU_ONLINE: | ||
253 | case CPU_ONLINE_FROZEN: | ||
254 | register_cpu_online(cpu); | ||
255 | break; | ||
256 | #ifdef CONFIG_HOTPLUG_CPU | ||
257 | case CPU_DEAD: | ||
258 | case CPU_DEAD_FROZEN: | ||
259 | unregister_cpu_online(cpu); | ||
260 | break; | ||
261 | #endif | ||
262 | } | ||
263 | return NOTIFY_OK; | ||
264 | } | ||
265 | |||
266 | static struct notifier_block __cpuinitdata sysfs_cpu_nb = { | ||
267 | .notifier_call = sysfs_cpu_notify, | ||
268 | }; | ||
269 | |||
270 | static void __init check_mmu_stats(void) | ||
271 | { | ||
272 | unsigned long dummy1, err; | ||
273 | |||
274 | if (tlb_type != hypervisor) | ||
275 | return; | ||
276 | |||
277 | err = sun4v_mmustat_info(&dummy1); | ||
278 | if (!err) | ||
279 | mmu_stats_supported = 1; | ||
280 | } | ||
281 | |||
282 | static void register_nodes(void) | ||
283 | { | ||
284 | #ifdef CONFIG_NUMA | ||
285 | int i; | ||
286 | |||
287 | for (i = 0; i < MAX_NUMNODES; i++) | ||
288 | register_one_node(i); | ||
289 | #endif | ||
290 | } | ||
291 | |||
292 | static int __init topology_init(void) | ||
293 | { | ||
294 | int cpu; | ||
295 | |||
296 | register_nodes(); | ||
297 | |||
298 | check_mmu_stats(); | ||
299 | |||
300 | register_cpu_notifier(&sysfs_cpu_nb); | ||
301 | |||
302 | for_each_possible_cpu(cpu) { | ||
303 | struct cpu *c = &per_cpu(cpu_devices, cpu); | ||
304 | |||
305 | register_cpu(c, cpu); | ||
306 | if (cpu_online(cpu)) | ||
307 | register_cpu_online(cpu); | ||
308 | } | ||
309 | |||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | subsys_initcall(topology_init); | ||
diff --git a/arch/sparc/kernel/systbls.h b/arch/sparc/kernel/systbls.h new file mode 100644 index 000000000000..bc9f5dac4069 --- /dev/null +++ b/arch/sparc/kernel/systbls.h | |||
@@ -0,0 +1,51 @@ | |||
1 | #ifndef _SYSTBLS_H | ||
2 | #define _SYSTBLS_H | ||
3 | |||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <linux/utsname.h> | ||
7 | #include <asm/utrap.h> | ||
8 | #include <asm/signal.h> | ||
9 | |||
10 | extern asmlinkage unsigned long sys_getpagesize(void); | ||
11 | extern asmlinkage unsigned long sparc_brk(unsigned long brk); | ||
12 | extern asmlinkage long sparc_pipe(struct pt_regs *regs); | ||
13 | extern asmlinkage long sys_ipc(unsigned int call, int first, | ||
14 | unsigned long second, | ||
15 | unsigned long third, | ||
16 | void __user *ptr, long fifth); | ||
17 | extern asmlinkage long sparc64_newuname(struct new_utsname __user *name); | ||
18 | extern asmlinkage long sparc64_personality(unsigned long personality); | ||
19 | extern asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, | ||
20 | unsigned long prot, unsigned long flags, | ||
21 | unsigned long fd, unsigned long off); | ||
22 | extern asmlinkage long sys64_munmap(unsigned long addr, size_t len); | ||
23 | extern asmlinkage unsigned long sys64_mremap(unsigned long addr, | ||
24 | unsigned long old_len, | ||
25 | unsigned long new_len, | ||
26 | unsigned long flags, | ||
27 | unsigned long new_addr); | ||
28 | extern asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs); | ||
29 | extern asmlinkage long sys_getdomainname(char __user *name, int len); | ||
30 | extern asmlinkage long sys_utrap_install(utrap_entry_t type, | ||
31 | utrap_handler_t new_p, | ||
32 | utrap_handler_t new_d, | ||
33 | utrap_handler_t __user *old_p, | ||
34 | utrap_handler_t __user *old_d); | ||
35 | extern asmlinkage long sparc_memory_ordering(unsigned long model, | ||
36 | struct pt_regs *regs); | ||
37 | extern asmlinkage long sys_rt_sigaction(int sig, | ||
38 | const struct sigaction __user *act, | ||
39 | struct sigaction __user *oact, | ||
40 | void __user *restorer, | ||
41 | size_t sigsetsize); | ||
42 | extern asmlinkage long sys_perfctr(int opcode, unsigned long arg0, | ||
43 | unsigned long arg1, unsigned long arg2); | ||
44 | |||
45 | extern asmlinkage void sparc64_set_context(struct pt_regs *regs); | ||
46 | extern asmlinkage void sparc64_get_context(struct pt_regs *regs); | ||
47 | extern asmlinkage long sys_sigpause(unsigned int set); | ||
48 | extern asmlinkage long sys_sigsuspend(old_sigset_t set); | ||
49 | extern void do_rt_sigreturn(struct pt_regs *regs); | ||
50 | |||
51 | #endif /* _SYSTBLS_H */ | ||
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S new file mode 100644 index 000000000000..9fc78cf354bd --- /dev/null +++ b/arch/sparc/kernel/systbls_64.S | |||
@@ -0,0 +1,159 @@ | |||
1 | /* systbls.S: System call entry point tables for OS compatibility. | ||
2 | * The native Linux system call table lives here also. | ||
3 | * | ||
4 | * Copyright (C) 1995, 1996, 2007 David S. Miller (davem@davemloft.net) | ||
5 | * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
6 | * | ||
7 | * Based upon preliminary work which is: | ||
8 | * | ||
9 | * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu) | ||
10 | */ | ||
11 | |||
12 | |||
13 | .text | ||
14 | .align 4 | ||
15 | |||
16 | #ifdef CONFIG_COMPAT | ||
17 | /* First, the 32-bit Linux native syscall table. */ | ||
18 | |||
19 | .globl sys_call_table32 | ||
20 | sys_call_table32: | ||
21 | /*0*/ .word sys_restart_syscall, sys32_exit, sys_fork, sys_read, sys_write | ||
22 | /*5*/ .word sys32_open, sys_close, sys32_wait4, sys32_creat, sys_link | ||
23 | /*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys32_mknod | ||
24 | /*15*/ .word sys_chmod, sys_lchown16, sparc_brk, sys32_perfctr, sys32_lseek | ||
25 | /*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16 | ||
26 | /*25*/ .word sys32_vmsplice, compat_sys_ptrace, sys_alarm, sys32_sigaltstack, sys_pause | ||
27 | /*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice | ||
28 | .word sys_chown, sys_sync, sys32_kill, compat_sys_newstat, sys32_sendfile | ||
29 | /*40*/ .word compat_sys_newlstat, sys_dup, sys_pipe, compat_sys_times, sys_getuid | ||
30 | .word sys32_umount, sys_setgid16, sys_getgid16, sys32_signal, sys_geteuid16 | ||
31 | /*50*/ .word sys_getegid16, sys_acct, sys_nis_syscall, sys_getgid, compat_sys_ioctl | ||
32 | .word sys32_reboot, sys32_mmap2, sys_symlink, sys32_readlink, sys32_execve | ||
33 | /*60*/ .word sys32_umask, sys_chroot, compat_sys_newfstat, compat_sys_fstat64, sys_getpagesize | ||
34 | .word sys32_msync, sys_vfork, sys32_pread64, sys32_pwrite64, sys_geteuid | ||
35 | /*70*/ .word sys_getegid, sys_mmap, sys_setreuid, sys_munmap, sys_mprotect | ||
36 | .word sys_madvise, sys_vhangup, sys32_truncate64, sys_mincore, sys_getgroups16 | ||
37 | /*80*/ .word sys_setgroups16, sys_getpgrp, sys32_setgroups, sys32_setitimer, sys32_ftruncate64 | ||
38 | .word sys32_swapon, sys32_getitimer, sys_setuid, sys32_sethostname, sys_setgid | ||
39 | /*90*/ .word sys_dup2, sys_setfsuid, compat_sys_fcntl, sys32_select, sys_setfsgid | ||
40 | .word sys_fsync, sys32_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall | ||
41 | /*100*/ .word sys32_getpriority, sys32_rt_sigreturn, sys32_rt_sigaction, sys32_rt_sigprocmask, sys32_rt_sigpending | ||
42 | .word compat_sys_rt_sigtimedwait, sys32_rt_sigqueueinfo, compat_sys_rt_sigsuspend, sys_setresuid, sys_getresuid | ||
43 | /*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall | ||
44 | .word sys32_getgroups, compat_sys_gettimeofday, sys32_getrusage, sys_nis_syscall, sys_getcwd | ||
45 | /*120*/ .word compat_sys_readv, compat_sys_writev, compat_sys_settimeofday, sys_fchown16, sys_fchmod | ||
46 | .word sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, sys_truncate | ||
47 | /*130*/ .word sys_ftruncate, sys_flock, compat_sys_lstat64, sys_nis_syscall, sys_nis_syscall | ||
48 | .word sys_nis_syscall, sys32_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64 | ||
49 | /*140*/ .word sys32_sendfile64, sys_nis_syscall, sys32_futex, sys_gettid, compat_sys_getrlimit | ||
50 | .word compat_sys_setrlimit, sys_pivot_root, sys32_prctl, sys_pciconfig_read, sys_pciconfig_write | ||
51 | /*150*/ .word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64 | ||
52 | .word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount | ||
53 | /*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys32_getdomainname, sys32_setdomainname, sys_nis_syscall | ||
54 | .word sys_quotactl, sys_set_tid_address, compat_sys_mount, sys_ustat, sys32_setxattr | ||
55 | /*170*/ .word sys32_lsetxattr, sys32_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents | ||
56 | .word sys_setsid, sys_fchdir, sys32_fgetxattr, sys_listxattr, sys_llistxattr | ||
57 | /*180*/ .word sys32_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall | ||
58 | .word sys32_setpgid, sys32_fremovexattr, sys32_tkill, sys32_exit_group, sparc64_newuname | ||
59 | /*190*/ .word sys32_init_module, sparc64_personality, sys_remap_file_pages, sys32_epoll_create, sys32_epoll_ctl | ||
60 | .word sys32_epoll_wait, sys32_ioprio_set, sys_getppid, sys32_sigaction, sys_sgetmask | ||
61 | /*200*/ .word sys32_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir | ||
62 | .word sys32_readahead, sys32_socketcall, sys32_syslog, sys32_lookup_dcookie, sys32_fadvise64 | ||
63 | /*210*/ .word sys32_fadvise64_64, sys32_tgkill, sys32_waitpid, sys_swapoff, compat_sys_sysinfo | ||
64 | .word compat_sys_ipc, sys32_sigreturn, sys_clone, sys32_ioprio_get, compat_sys_adjtimex | ||
65 | /*220*/ .word sys32_sigprocmask, sys_ni_syscall, sys32_delete_module, sys_ni_syscall, sys32_getpgid | ||
66 | .word sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys_setfsuid16, sys_setfsgid16 | ||
67 | /*230*/ .word sys32_select, compat_sys_time, sys32_splice, compat_sys_stime, compat_sys_statfs64 | ||
68 | .word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys32_mlockall | ||
69 | /*240*/ .word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler | ||
70 | .word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep | ||
71 | /*250*/ .word sys32_mremap, sys32_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl | ||
72 | .word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep | ||
73 | /*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun | ||
74 | .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy | ||
75 | /*270*/ .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink | ||
76 | .word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid | ||
77 | /*280*/ .word sys32_tee, sys_add_key, sys_request_key, sys_keyctl, compat_sys_openat | ||
78 | .word sys_mkdirat, sys_mknodat, sys_fchownat, compat_sys_futimesat, compat_sys_fstatat64 | ||
79 | /*290*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat | ||
80 | .word sys_fchmodat, sys_faccessat, compat_sys_pselect6, compat_sys_ppoll, sys_unshare | ||
81 | /*300*/ .word compat_sys_set_robust_list, compat_sys_get_robust_list, compat_sys_migrate_pages, compat_sys_mbind, compat_sys_get_mempolicy | ||
82 | .word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait | ||
83 | /*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate | ||
84 | .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 | ||
85 | /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4 | ||
86 | |||
87 | #endif /* CONFIG_COMPAT */ | ||
88 | |||
89 | /* Now the 64-bit native Linux syscall table. */ | ||
90 | |||
91 | .align 4 | ||
92 | .globl sys_call_table64, sys_call_table | ||
93 | sys_call_table64: | ||
94 | sys_call_table: | ||
95 | /*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write | ||
96 | /*5*/ .word sys_open, sys_close, sys_wait4, sys_creat, sys_link | ||
97 | /*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod | ||
98 | /*15*/ .word sys_chmod, sys_lchown, sparc_brk, sys_perfctr, sys_lseek | ||
99 | /*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid | ||
100 | /*25*/ .word sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall | ||
101 | /*30*/ .word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice | ||
102 | .word sys_nis_syscall, sys_sync, sys_kill, sys_newstat, sys_sendfile64 | ||
103 | /*40*/ .word sys_newlstat, sys_dup, sys_pipe, sys_times, sys_nis_syscall | ||
104 | .word sys_umount, sys_setgid, sys_getgid, sys_signal, sys_geteuid | ||
105 | /*50*/ .word sys_getegid, sys_acct, sys_memory_ordering, sys_nis_syscall, sys_ioctl | ||
106 | .word sys_reboot, sys_nis_syscall, sys_symlink, sys_readlink, sys_execve | ||
107 | /*60*/ .word sys_umask, sys_chroot, sys_newfstat, sys_fstat64, sys_getpagesize | ||
108 | .word sys_msync, sys_vfork, sys_pread64, sys_pwrite64, sys_nis_syscall | ||
109 | /*70*/ .word sys_nis_syscall, sys_mmap, sys_nis_syscall, sys64_munmap, sys_mprotect | ||
110 | .word sys_madvise, sys_vhangup, sys_nis_syscall, sys_mincore, sys_getgroups | ||
111 | /*80*/ .word sys_setgroups, sys_getpgrp, sys_nis_syscall, sys_setitimer, sys_nis_syscall | ||
112 | .word sys_swapon, sys_getitimer, sys_nis_syscall, sys_sethostname, sys_nis_syscall | ||
113 | /*90*/ .word sys_dup2, sys_nis_syscall, sys_fcntl, sys_select, sys_nis_syscall | ||
114 | .word sys_fsync, sys_setpriority, sys_socket, sys_connect, sys_accept | ||
115 | /*100*/ .word sys_getpriority, sys_rt_sigreturn, sys_rt_sigaction, sys_rt_sigprocmask, sys_rt_sigpending | ||
116 | .word sys_rt_sigtimedwait, sys_rt_sigqueueinfo, sys_rt_sigsuspend, sys_setresuid, sys_getresuid | ||
117 | /*110*/ .word sys_setresgid, sys_getresgid, sys_nis_syscall, sys_recvmsg, sys_sendmsg | ||
118 | .word sys_nis_syscall, sys_gettimeofday, sys_getrusage, sys_getsockopt, sys_getcwd | ||
119 | /*120*/ .word sys_readv, sys_writev, sys_settimeofday, sys_fchown, sys_fchmod | ||
120 | .word sys_recvfrom, sys_setreuid, sys_setregid, sys_rename, sys_truncate | ||
121 | /*130*/ .word sys_ftruncate, sys_flock, sys_lstat64, sys_sendto, sys_shutdown | ||
122 | .word sys_socketpair, sys_mkdir, sys_rmdir, sys_utimes, sys_stat64 | ||
123 | /*140*/ .word sys_sendfile64, sys_getpeername, sys_futex, sys_gettid, sys_getrlimit | ||
124 | .word sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write | ||
125 | /*150*/ .word sys_getsockname, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64 | ||
126 | .word sys_nis_syscall, sys_inotify_rm_watch, sys_statfs, sys_fstatfs, sys_oldumount | ||
127 | /*160*/ .word sys_sched_setaffinity, sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_utrap_install | ||
128 | .word sys_quotactl, sys_set_tid_address, sys_mount, sys_ustat, sys_setxattr | ||
129 | /*170*/ .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents | ||
130 | .word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr | ||
131 | /*180*/ .word sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_nis_syscall, sys_ni_syscall | ||
132 | .word sys_setpgid, sys_fremovexattr, sys_tkill, sys_exit_group, sparc64_newuname | ||
133 | /*190*/ .word sys_init_module, sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl | ||
134 | .word sys_epoll_wait, sys_ioprio_set, sys_getppid, sys_nis_syscall, sys_sgetmask | ||
135 | /*200*/ .word sys_ssetmask, sys_nis_syscall, sys_newlstat, sys_uselib, sys_nis_syscall | ||
136 | .word sys_readahead, sys_socketcall, sys_syslog, sys_lookup_dcookie, sys_fadvise64 | ||
137 | /*210*/ .word sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, sys_sysinfo | ||
138 | .word sys_ipc, sys_nis_syscall, sys_clone, sys_ioprio_get, sys_adjtimex | ||
139 | /*220*/ .word sys_nis_syscall, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid | ||
140 | .word sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid, sys_setfsgid | ||
141 | /*230*/ .word sys_select, sys_nis_syscall, sys_splice, sys_stime, sys_statfs64 | ||
142 | .word sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall | ||
143 | /*240*/ .word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler | ||
144 | .word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep | ||
145 | /*250*/ .word sys64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl | ||
146 | .word sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep | ||
147 | /*260*/ .word sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun | ||
148 | .word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy | ||
149 | /*270*/ .word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink | ||
150 | .word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid | ||
151 | /*280*/ .word sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat | ||
152 | .word sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, sys_fstatat64 | ||
153 | /*290*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat | ||
154 | .word sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare | ||
155 | /*300*/ .word sys_set_robust_list, sys_get_robust_list, sys_migrate_pages, sys_mbind, sys_get_mempolicy | ||
156 | .word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait | ||
157 | /*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate | ||
158 | .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 | ||
159 | /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4 | ||
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c new file mode 100644 index 000000000000..141da3759091 --- /dev/null +++ b/arch/sparc/kernel/time_64.c | |||
@@ -0,0 +1,862 @@ | |||
1 | /* time.c: UltraSparc timer and TOD clock support. | ||
2 | * | ||
3 | * Copyright (C) 1997, 2008 David S. Miller (davem@davemloft.net) | ||
4 | * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) | ||
5 | * | ||
6 | * Based largely on code which is: | ||
7 | * | ||
8 | * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu) | ||
9 | */ | ||
10 | |||
11 | #include <linux/errno.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/smp_lock.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/param.h> | ||
17 | #include <linux/string.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/time.h> | ||
21 | #include <linux/timex.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/ioport.h> | ||
24 | #include <linux/mc146818rtc.h> | ||
25 | #include <linux/delay.h> | ||
26 | #include <linux/profile.h> | ||
27 | #include <linux/bcd.h> | ||
28 | #include <linux/jiffies.h> | ||
29 | #include <linux/cpufreq.h> | ||
30 | #include <linux/percpu.h> | ||
31 | #include <linux/miscdevice.h> | ||
32 | #include <linux/rtc.h> | ||
33 | #include <linux/rtc/m48t59.h> | ||
34 | #include <linux/kernel_stat.h> | ||
35 | #include <linux/clockchips.h> | ||
36 | #include <linux/clocksource.h> | ||
37 | #include <linux/of_device.h> | ||
38 | #include <linux/platform_device.h> | ||
39 | |||
40 | #include <asm/oplib.h> | ||
41 | #include <asm/timer.h> | ||
42 | #include <asm/irq.h> | ||
43 | #include <asm/io.h> | ||
44 | #include <asm/prom.h> | ||
45 | #include <asm/starfire.h> | ||
46 | #include <asm/smp.h> | ||
47 | #include <asm/sections.h> | ||
48 | #include <asm/cpudata.h> | ||
49 | #include <asm/uaccess.h> | ||
50 | #include <asm/irq_regs.h> | ||
51 | |||
52 | #include "entry.h" | ||
53 | |||
54 | DEFINE_SPINLOCK(rtc_lock); | ||
55 | |||
56 | #define TICK_PRIV_BIT (1UL << 63) | ||
57 | #define TICKCMP_IRQ_BIT (1UL << 63) | ||
58 | |||
59 | #ifdef CONFIG_SMP | ||
60 | unsigned long profile_pc(struct pt_regs *regs) | ||
61 | { | ||
62 | unsigned long pc = instruction_pointer(regs); | ||
63 | |||
64 | if (in_lock_functions(pc)) | ||
65 | return regs->u_regs[UREG_RETPC]; | ||
66 | return pc; | ||
67 | } | ||
68 | EXPORT_SYMBOL(profile_pc); | ||
69 | #endif | ||
70 | |||
71 | static void tick_disable_protection(void) | ||
72 | { | ||
73 | /* Set things up so user can access tick register for profiling | ||
74 | * purposes. Also workaround BB_ERRATA_1 by doing a dummy | ||
75 | * read back of %tick after writing it. | ||
76 | */ | ||
77 | __asm__ __volatile__( | ||
78 | " ba,pt %%xcc, 1f\n" | ||
79 | " nop\n" | ||
80 | " .align 64\n" | ||
81 | "1: rd %%tick, %%g2\n" | ||
82 | " add %%g2, 6, %%g2\n" | ||
83 | " andn %%g2, %0, %%g2\n" | ||
84 | " wrpr %%g2, 0, %%tick\n" | ||
85 | " rdpr %%tick, %%g0" | ||
86 | : /* no outputs */ | ||
87 | : "r" (TICK_PRIV_BIT) | ||
88 | : "g2"); | ||
89 | } | ||
90 | |||
91 | static void tick_disable_irq(void) | ||
92 | { | ||
93 | __asm__ __volatile__( | ||
94 | " ba,pt %%xcc, 1f\n" | ||
95 | " nop\n" | ||
96 | " .align 64\n" | ||
97 | "1: wr %0, 0x0, %%tick_cmpr\n" | ||
98 | " rd %%tick_cmpr, %%g0" | ||
99 | : /* no outputs */ | ||
100 | : "r" (TICKCMP_IRQ_BIT)); | ||
101 | } | ||
102 | |||
103 | static void tick_init_tick(void) | ||
104 | { | ||
105 | tick_disable_protection(); | ||
106 | tick_disable_irq(); | ||
107 | } | ||
108 | |||
109 | static unsigned long tick_get_tick(void) | ||
110 | { | ||
111 | unsigned long ret; | ||
112 | |||
113 | __asm__ __volatile__("rd %%tick, %0\n\t" | ||
114 | "mov %0, %0" | ||
115 | : "=r" (ret)); | ||
116 | |||
117 | return ret & ~TICK_PRIV_BIT; | ||
118 | } | ||
119 | |||
120 | static int tick_add_compare(unsigned long adj) | ||
121 | { | ||
122 | unsigned long orig_tick, new_tick, new_compare; | ||
123 | |||
124 | __asm__ __volatile__("rd %%tick, %0" | ||
125 | : "=r" (orig_tick)); | ||
126 | |||
127 | orig_tick &= ~TICKCMP_IRQ_BIT; | ||
128 | |||
129 | /* Workaround for Spitfire Errata (#54 I think??), I discovered | ||
130 | * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch | ||
131 | * number 103640. | ||
132 | * | ||
133 | * On Blackbird writes to %tick_cmpr can fail, the | ||
134 | * workaround seems to be to execute the wr instruction | ||
135 | * at the start of an I-cache line, and perform a dummy | ||
136 | * read back from %tick_cmpr right after writing to it. -DaveM | ||
137 | */ | ||
138 | __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" | ||
139 | " add %1, %2, %0\n\t" | ||
140 | ".align 64\n" | ||
141 | "1:\n\t" | ||
142 | "wr %0, 0, %%tick_cmpr\n\t" | ||
143 | "rd %%tick_cmpr, %%g0\n\t" | ||
144 | : "=r" (new_compare) | ||
145 | : "r" (orig_tick), "r" (adj)); | ||
146 | |||
147 | __asm__ __volatile__("rd %%tick, %0" | ||
148 | : "=r" (new_tick)); | ||
149 | new_tick &= ~TICKCMP_IRQ_BIT; | ||
150 | |||
151 | return ((long)(new_tick - (orig_tick+adj))) > 0L; | ||
152 | } | ||
153 | |||
154 | static unsigned long tick_add_tick(unsigned long adj) | ||
155 | { | ||
156 | unsigned long new_tick; | ||
157 | |||
158 | /* Also need to handle Blackbird bug here too. */ | ||
159 | __asm__ __volatile__("rd %%tick, %0\n\t" | ||
160 | "add %0, %1, %0\n\t" | ||
161 | "wrpr %0, 0, %%tick\n\t" | ||
162 | : "=&r" (new_tick) | ||
163 | : "r" (adj)); | ||
164 | |||
165 | return new_tick; | ||
166 | } | ||
167 | |||
168 | static struct sparc64_tick_ops tick_operations __read_mostly = { | ||
169 | .name = "tick", | ||
170 | .init_tick = tick_init_tick, | ||
171 | .disable_irq = tick_disable_irq, | ||
172 | .get_tick = tick_get_tick, | ||
173 | .add_tick = tick_add_tick, | ||
174 | .add_compare = tick_add_compare, | ||
175 | .softint_mask = 1UL << 0, | ||
176 | }; | ||
177 | |||
178 | struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations; | ||
179 | |||
180 | static void stick_disable_irq(void) | ||
181 | { | ||
182 | __asm__ __volatile__( | ||
183 | "wr %0, 0x0, %%asr25" | ||
184 | : /* no outputs */ | ||
185 | : "r" (TICKCMP_IRQ_BIT)); | ||
186 | } | ||
187 | |||
188 | static void stick_init_tick(void) | ||
189 | { | ||
190 | /* Writes to the %tick and %stick register are not | ||
191 | * allowed on sun4v. The Hypervisor controls that | ||
192 | * bit, per-strand. | ||
193 | */ | ||
194 | if (tlb_type != hypervisor) { | ||
195 | tick_disable_protection(); | ||
196 | tick_disable_irq(); | ||
197 | |||
198 | /* Let the user get at STICK too. */ | ||
199 | __asm__ __volatile__( | ||
200 | " rd %%asr24, %%g2\n" | ||
201 | " andn %%g2, %0, %%g2\n" | ||
202 | " wr %%g2, 0, %%asr24" | ||
203 | : /* no outputs */ | ||
204 | : "r" (TICK_PRIV_BIT) | ||
205 | : "g1", "g2"); | ||
206 | } | ||
207 | |||
208 | stick_disable_irq(); | ||
209 | } | ||
210 | |||
211 | static unsigned long stick_get_tick(void) | ||
212 | { | ||
213 | unsigned long ret; | ||
214 | |||
215 | __asm__ __volatile__("rd %%asr24, %0" | ||
216 | : "=r" (ret)); | ||
217 | |||
218 | return ret & ~TICK_PRIV_BIT; | ||
219 | } | ||
220 | |||
221 | static unsigned long stick_add_tick(unsigned long adj) | ||
222 | { | ||
223 | unsigned long new_tick; | ||
224 | |||
225 | __asm__ __volatile__("rd %%asr24, %0\n\t" | ||
226 | "add %0, %1, %0\n\t" | ||
227 | "wr %0, 0, %%asr24\n\t" | ||
228 | : "=&r" (new_tick) | ||
229 | : "r" (adj)); | ||
230 | |||
231 | return new_tick; | ||
232 | } | ||
233 | |||
234 | static int stick_add_compare(unsigned long adj) | ||
235 | { | ||
236 | unsigned long orig_tick, new_tick; | ||
237 | |||
238 | __asm__ __volatile__("rd %%asr24, %0" | ||
239 | : "=r" (orig_tick)); | ||
240 | orig_tick &= ~TICKCMP_IRQ_BIT; | ||
241 | |||
242 | __asm__ __volatile__("wr %0, 0, %%asr25" | ||
243 | : /* no outputs */ | ||
244 | : "r" (orig_tick + adj)); | ||
245 | |||
246 | __asm__ __volatile__("rd %%asr24, %0" | ||
247 | : "=r" (new_tick)); | ||
248 | new_tick &= ~TICKCMP_IRQ_BIT; | ||
249 | |||
250 | return ((long)(new_tick - (orig_tick+adj))) > 0L; | ||
251 | } | ||
252 | |||
253 | static struct sparc64_tick_ops stick_operations __read_mostly = { | ||
254 | .name = "stick", | ||
255 | .init_tick = stick_init_tick, | ||
256 | .disable_irq = stick_disable_irq, | ||
257 | .get_tick = stick_get_tick, | ||
258 | .add_tick = stick_add_tick, | ||
259 | .add_compare = stick_add_compare, | ||
260 | .softint_mask = 1UL << 16, | ||
261 | }; | ||
262 | |||
263 | /* On Hummingbird the STICK/STICK_CMPR register is implemented | ||
264 | * in I/O space. There are two 64-bit registers each, the | ||
265 | * first holds the low 32-bits of the value and the second holds | ||
266 | * the high 32-bits. | ||
267 | * | ||
268 | * Since STICK is constantly updating, we have to access it carefully. | ||
269 | * | ||
270 | * The sequence we use to read is: | ||
271 | * 1) read high | ||
272 | * 2) read low | ||
273 | * 3) read high again, if it rolled re-read both low and high again. | ||
274 | * | ||
275 | * Writing STICK safely is also tricky: | ||
276 | * 1) write low to zero | ||
277 | * 2) write high | ||
278 | * 3) write low | ||
279 | */ | ||
280 | #define HBIRD_STICKCMP_ADDR 0x1fe0000f060UL | ||
281 | #define HBIRD_STICK_ADDR 0x1fe0000f070UL | ||
282 | |||
283 | static unsigned long __hbird_read_stick(void) | ||
284 | { | ||
285 | unsigned long ret, tmp1, tmp2, tmp3; | ||
286 | unsigned long addr = HBIRD_STICK_ADDR+8; | ||
287 | |||
288 | __asm__ __volatile__("ldxa [%1] %5, %2\n" | ||
289 | "1:\n\t" | ||
290 | "sub %1, 0x8, %1\n\t" | ||
291 | "ldxa [%1] %5, %3\n\t" | ||
292 | "add %1, 0x8, %1\n\t" | ||
293 | "ldxa [%1] %5, %4\n\t" | ||
294 | "cmp %4, %2\n\t" | ||
295 | "bne,a,pn %%xcc, 1b\n\t" | ||
296 | " mov %4, %2\n\t" | ||
297 | "sllx %4, 32, %4\n\t" | ||
298 | "or %3, %4, %0\n\t" | ||
299 | : "=&r" (ret), "=&r" (addr), | ||
300 | "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3) | ||
301 | : "i" (ASI_PHYS_BYPASS_EC_E), "1" (addr)); | ||
302 | |||
303 | return ret; | ||
304 | } | ||
305 | |||
306 | static void __hbird_write_stick(unsigned long val) | ||
307 | { | ||
308 | unsigned long low = (val & 0xffffffffUL); | ||
309 | unsigned long high = (val >> 32UL); | ||
310 | unsigned long addr = HBIRD_STICK_ADDR; | ||
311 | |||
312 | __asm__ __volatile__("stxa %%g0, [%0] %4\n\t" | ||
313 | "add %0, 0x8, %0\n\t" | ||
314 | "stxa %3, [%0] %4\n\t" | ||
315 | "sub %0, 0x8, %0\n\t" | ||
316 | "stxa %2, [%0] %4" | ||
317 | : "=&r" (addr) | ||
318 | : "0" (addr), "r" (low), "r" (high), | ||
319 | "i" (ASI_PHYS_BYPASS_EC_E)); | ||
320 | } | ||
321 | |||
322 | static void __hbird_write_compare(unsigned long val) | ||
323 | { | ||
324 | unsigned long low = (val & 0xffffffffUL); | ||
325 | unsigned long high = (val >> 32UL); | ||
326 | unsigned long addr = HBIRD_STICKCMP_ADDR + 0x8UL; | ||
327 | |||
328 | __asm__ __volatile__("stxa %3, [%0] %4\n\t" | ||
329 | "sub %0, 0x8, %0\n\t" | ||
330 | "stxa %2, [%0] %4" | ||
331 | : "=&r" (addr) | ||
332 | : "0" (addr), "r" (low), "r" (high), | ||
333 | "i" (ASI_PHYS_BYPASS_EC_E)); | ||
334 | } | ||
335 | |||
336 | static void hbtick_disable_irq(void) | ||
337 | { | ||
338 | __hbird_write_compare(TICKCMP_IRQ_BIT); | ||
339 | } | ||
340 | |||
341 | static void hbtick_init_tick(void) | ||
342 | { | ||
343 | tick_disable_protection(); | ||
344 | |||
345 | /* XXX This seems to be necessary to 'jumpstart' Hummingbird | ||
346 | * XXX into actually sending STICK interrupts. I think because | ||
347 | * XXX of how we store %tick_cmpr in head.S this somehow resets the | ||
348 | * XXX {TICK + STICK} interrupt mux. -DaveM | ||
349 | */ | ||
350 | __hbird_write_stick(__hbird_read_stick()); | ||
351 | |||
352 | hbtick_disable_irq(); | ||
353 | } | ||
354 | |||
355 | static unsigned long hbtick_get_tick(void) | ||
356 | { | ||
357 | return __hbird_read_stick() & ~TICK_PRIV_BIT; | ||
358 | } | ||
359 | |||
360 | static unsigned long hbtick_add_tick(unsigned long adj) | ||
361 | { | ||
362 | unsigned long val; | ||
363 | |||
364 | val = __hbird_read_stick() + adj; | ||
365 | __hbird_write_stick(val); | ||
366 | |||
367 | return val; | ||
368 | } | ||
369 | |||
370 | static int hbtick_add_compare(unsigned long adj) | ||
371 | { | ||
372 | unsigned long val = __hbird_read_stick(); | ||
373 | unsigned long val2; | ||
374 | |||
375 | val &= ~TICKCMP_IRQ_BIT; | ||
376 | val += adj; | ||
377 | __hbird_write_compare(val); | ||
378 | |||
379 | val2 = __hbird_read_stick() & ~TICKCMP_IRQ_BIT; | ||
380 | |||
381 | return ((long)(val2 - val)) > 0L; | ||
382 | } | ||
383 | |||
384 | static struct sparc64_tick_ops hbtick_operations __read_mostly = { | ||
385 | .name = "hbtick", | ||
386 | .init_tick = hbtick_init_tick, | ||
387 | .disable_irq = hbtick_disable_irq, | ||
388 | .get_tick = hbtick_get_tick, | ||
389 | .add_tick = hbtick_add_tick, | ||
390 | .add_compare = hbtick_add_compare, | ||
391 | .softint_mask = 1UL << 0, | ||
392 | }; | ||
393 | |||
394 | static unsigned long timer_ticks_per_nsec_quotient __read_mostly; | ||
395 | |||
396 | int update_persistent_clock(struct timespec now) | ||
397 | { | ||
398 | struct rtc_device *rtc = rtc_class_open("rtc0"); | ||
399 | int err = -1; | ||
400 | |||
401 | if (rtc) { | ||
402 | err = rtc_set_mmss(rtc, now.tv_sec); | ||
403 | rtc_class_close(rtc); | ||
404 | } | ||
405 | |||
406 | return err; | ||
407 | } | ||
408 | |||
409 | unsigned long cmos_regs; | ||
410 | EXPORT_SYMBOL(cmos_regs); | ||
411 | |||
412 | static struct resource rtc_cmos_resource; | ||
413 | |||
414 | static struct platform_device rtc_cmos_device = { | ||
415 | .name = "rtc_cmos", | ||
416 | .id = -1, | ||
417 | .resource = &rtc_cmos_resource, | ||
418 | .num_resources = 1, | ||
419 | }; | ||
420 | |||
421 | static int __devinit rtc_probe(struct of_device *op, const struct of_device_id *match) | ||
422 | { | ||
423 | struct resource *r; | ||
424 | |||
425 | printk(KERN_INFO "%s: RTC regs at 0x%lx\n", | ||
426 | op->node->full_name, op->resource[0].start); | ||
427 | |||
428 | /* The CMOS RTC driver only accepts IORESOURCE_IO, so cons | ||
429 | * up a fake resource so that the probe works for all cases. | ||
430 | * When the RTC is behind an ISA bus it will have IORESOURCE_IO | ||
431 | * already, whereas when it's behind EBUS is will be IORESOURCE_MEM. | ||
432 | */ | ||
433 | |||
434 | r = &rtc_cmos_resource; | ||
435 | r->flags = IORESOURCE_IO; | ||
436 | r->name = op->resource[0].name; | ||
437 | r->start = op->resource[0].start; | ||
438 | r->end = op->resource[0].end; | ||
439 | |||
440 | cmos_regs = op->resource[0].start; | ||
441 | return platform_device_register(&rtc_cmos_device); | ||
442 | } | ||
443 | |||
444 | static struct of_device_id __initdata rtc_match[] = { | ||
445 | { | ||
446 | .name = "rtc", | ||
447 | .compatible = "m5819", | ||
448 | }, | ||
449 | { | ||
450 | .name = "rtc", | ||
451 | .compatible = "isa-m5819p", | ||
452 | }, | ||
453 | { | ||
454 | .name = "rtc", | ||
455 | .compatible = "isa-m5823p", | ||
456 | }, | ||
457 | { | ||
458 | .name = "rtc", | ||
459 | .compatible = "ds1287", | ||
460 | }, | ||
461 | {}, | ||
462 | }; | ||
463 | |||
464 | static struct of_platform_driver rtc_driver = { | ||
465 | .match_table = rtc_match, | ||
466 | .probe = rtc_probe, | ||
467 | .driver = { | ||
468 | .name = "rtc", | ||
469 | }, | ||
470 | }; | ||
471 | |||
472 | static struct platform_device rtc_bq4802_device = { | ||
473 | .name = "rtc-bq4802", | ||
474 | .id = -1, | ||
475 | .num_resources = 1, | ||
476 | }; | ||
477 | |||
478 | static int __devinit bq4802_probe(struct of_device *op, const struct of_device_id *match) | ||
479 | { | ||
480 | |||
481 | printk(KERN_INFO "%s: BQ4802 regs at 0x%lx\n", | ||
482 | op->node->full_name, op->resource[0].start); | ||
483 | |||
484 | rtc_bq4802_device.resource = &op->resource[0]; | ||
485 | return platform_device_register(&rtc_bq4802_device); | ||
486 | } | ||
487 | |||
488 | static struct of_device_id __initdata bq4802_match[] = { | ||
489 | { | ||
490 | .name = "rtc", | ||
491 | .compatible = "bq4802", | ||
492 | }, | ||
493 | {}, | ||
494 | }; | ||
495 | |||
496 | static struct of_platform_driver bq4802_driver = { | ||
497 | .match_table = bq4802_match, | ||
498 | .probe = bq4802_probe, | ||
499 | .driver = { | ||
500 | .name = "bq4802", | ||
501 | }, | ||
502 | }; | ||
503 | |||
504 | static unsigned char mostek_read_byte(struct device *dev, u32 ofs) | ||
505 | { | ||
506 | struct platform_device *pdev = to_platform_device(dev); | ||
507 | void __iomem *regs = (void __iomem *) pdev->resource[0].start; | ||
508 | |||
509 | return readb(regs + ofs); | ||
510 | } | ||
511 | |||
512 | static void mostek_write_byte(struct device *dev, u32 ofs, u8 val) | ||
513 | { | ||
514 | struct platform_device *pdev = to_platform_device(dev); | ||
515 | void __iomem *regs = (void __iomem *) pdev->resource[0].start; | ||
516 | |||
517 | writeb(val, regs + ofs); | ||
518 | } | ||
519 | |||
520 | static struct m48t59_plat_data m48t59_data = { | ||
521 | .read_byte = mostek_read_byte, | ||
522 | .write_byte = mostek_write_byte, | ||
523 | }; | ||
524 | |||
525 | static struct platform_device m48t59_rtc = { | ||
526 | .name = "rtc-m48t59", | ||
527 | .id = 0, | ||
528 | .num_resources = 1, | ||
529 | .dev = { | ||
530 | .platform_data = &m48t59_data, | ||
531 | }, | ||
532 | }; | ||
533 | |||
534 | static int __devinit mostek_probe(struct of_device *op, const struct of_device_id *match) | ||
535 | { | ||
536 | struct device_node *dp = op->node; | ||
537 | |||
538 | /* On an Enterprise system there can be multiple mostek clocks. | ||
539 | * We should only match the one that is on the central FHC bus. | ||
540 | */ | ||
541 | if (!strcmp(dp->parent->name, "fhc") && | ||
542 | strcmp(dp->parent->parent->name, "central") != 0) | ||
543 | return -ENODEV; | ||
544 | |||
545 | printk(KERN_INFO "%s: Mostek regs at 0x%lx\n", | ||
546 | dp->full_name, op->resource[0].start); | ||
547 | |||
548 | m48t59_rtc.resource = &op->resource[0]; | ||
549 | return platform_device_register(&m48t59_rtc); | ||
550 | } | ||
551 | |||
552 | static struct of_device_id __initdata mostek_match[] = { | ||
553 | { | ||
554 | .name = "eeprom", | ||
555 | }, | ||
556 | {}, | ||
557 | }; | ||
558 | |||
559 | static struct of_platform_driver mostek_driver = { | ||
560 | .match_table = mostek_match, | ||
561 | .probe = mostek_probe, | ||
562 | .driver = { | ||
563 | .name = "mostek", | ||
564 | }, | ||
565 | }; | ||
566 | |||
567 | static struct platform_device rtc_sun4v_device = { | ||
568 | .name = "rtc-sun4v", | ||
569 | .id = -1, | ||
570 | }; | ||
571 | |||
572 | static struct platform_device rtc_starfire_device = { | ||
573 | .name = "rtc-starfire", | ||
574 | .id = -1, | ||
575 | }; | ||
576 | |||
577 | static int __init clock_init(void) | ||
578 | { | ||
579 | if (this_is_starfire) | ||
580 | return platform_device_register(&rtc_starfire_device); | ||
581 | |||
582 | if (tlb_type == hypervisor) | ||
583 | return platform_device_register(&rtc_sun4v_device); | ||
584 | |||
585 | (void) of_register_driver(&rtc_driver, &of_platform_bus_type); | ||
586 | (void) of_register_driver(&mostek_driver, &of_platform_bus_type); | ||
587 | (void) of_register_driver(&bq4802_driver, &of_platform_bus_type); | ||
588 | |||
589 | return 0; | ||
590 | } | ||
591 | |||
592 | /* Must be after subsys_initcall() so that busses are probed. Must | ||
593 | * be before device_initcall() because things like the RTC driver | ||
594 | * need to see the clock registers. | ||
595 | */ | ||
596 | fs_initcall(clock_init); | ||
597 | |||
598 | /* This is gets the master TICK_INT timer going. */ | ||
599 | static unsigned long sparc64_init_timers(void) | ||
600 | { | ||
601 | struct device_node *dp; | ||
602 | unsigned long freq; | ||
603 | |||
604 | dp = of_find_node_by_path("/"); | ||
605 | if (tlb_type == spitfire) { | ||
606 | unsigned long ver, manuf, impl; | ||
607 | |||
608 | __asm__ __volatile__ ("rdpr %%ver, %0" | ||
609 | : "=&r" (ver)); | ||
610 | manuf = ((ver >> 48) & 0xffff); | ||
611 | impl = ((ver >> 32) & 0xffff); | ||
612 | if (manuf == 0x17 && impl == 0x13) { | ||
613 | /* Hummingbird, aka Ultra-IIe */ | ||
614 | tick_ops = &hbtick_operations; | ||
615 | freq = of_getintprop_default(dp, "stick-frequency", 0); | ||
616 | } else { | ||
617 | tick_ops = &tick_operations; | ||
618 | freq = local_cpu_data().clock_tick; | ||
619 | } | ||
620 | } else { | ||
621 | tick_ops = &stick_operations; | ||
622 | freq = of_getintprop_default(dp, "stick-frequency", 0); | ||
623 | } | ||
624 | |||
625 | return freq; | ||
626 | } | ||
627 | |||
628 | struct freq_table { | ||
629 | unsigned long clock_tick_ref; | ||
630 | unsigned int ref_freq; | ||
631 | }; | ||
632 | static DEFINE_PER_CPU(struct freq_table, sparc64_freq_table) = { 0, 0 }; | ||
633 | |||
634 | unsigned long sparc64_get_clock_tick(unsigned int cpu) | ||
635 | { | ||
636 | struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu); | ||
637 | |||
638 | if (ft->clock_tick_ref) | ||
639 | return ft->clock_tick_ref; | ||
640 | return cpu_data(cpu).clock_tick; | ||
641 | } | ||
642 | |||
643 | #ifdef CONFIG_CPU_FREQ | ||
644 | |||
645 | static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | ||
646 | void *data) | ||
647 | { | ||
648 | struct cpufreq_freqs *freq = data; | ||
649 | unsigned int cpu = freq->cpu; | ||
650 | struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu); | ||
651 | |||
652 | if (!ft->ref_freq) { | ||
653 | ft->ref_freq = freq->old; | ||
654 | ft->clock_tick_ref = cpu_data(cpu).clock_tick; | ||
655 | } | ||
656 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || | ||
657 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || | ||
658 | (val == CPUFREQ_RESUMECHANGE)) { | ||
659 | cpu_data(cpu).clock_tick = | ||
660 | cpufreq_scale(ft->clock_tick_ref, | ||
661 | ft->ref_freq, | ||
662 | freq->new); | ||
663 | } | ||
664 | |||
665 | return 0; | ||
666 | } | ||
667 | |||
668 | static struct notifier_block sparc64_cpufreq_notifier_block = { | ||
669 | .notifier_call = sparc64_cpufreq_notifier | ||
670 | }; | ||
671 | |||
672 | static int __init register_sparc64_cpufreq_notifier(void) | ||
673 | { | ||
674 | |||
675 | cpufreq_register_notifier(&sparc64_cpufreq_notifier_block, | ||
676 | CPUFREQ_TRANSITION_NOTIFIER); | ||
677 | return 0; | ||
678 | } | ||
679 | |||
680 | core_initcall(register_sparc64_cpufreq_notifier); | ||
681 | |||
682 | #endif /* CONFIG_CPU_FREQ */ | ||
683 | |||
684 | static int sparc64_next_event(unsigned long delta, | ||
685 | struct clock_event_device *evt) | ||
686 | { | ||
687 | return tick_ops->add_compare(delta) ? -ETIME : 0; | ||
688 | } | ||
689 | |||
690 | static void sparc64_timer_setup(enum clock_event_mode mode, | ||
691 | struct clock_event_device *evt) | ||
692 | { | ||
693 | switch (mode) { | ||
694 | case CLOCK_EVT_MODE_ONESHOT: | ||
695 | case CLOCK_EVT_MODE_RESUME: | ||
696 | break; | ||
697 | |||
698 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
699 | tick_ops->disable_irq(); | ||
700 | break; | ||
701 | |||
702 | case CLOCK_EVT_MODE_PERIODIC: | ||
703 | case CLOCK_EVT_MODE_UNUSED: | ||
704 | WARN_ON(1); | ||
705 | break; | ||
706 | }; | ||
707 | } | ||
708 | |||
709 | static struct clock_event_device sparc64_clockevent = { | ||
710 | .features = CLOCK_EVT_FEAT_ONESHOT, | ||
711 | .set_mode = sparc64_timer_setup, | ||
712 | .set_next_event = sparc64_next_event, | ||
713 | .rating = 100, | ||
714 | .shift = 30, | ||
715 | .irq = -1, | ||
716 | }; | ||
717 | static DEFINE_PER_CPU(struct clock_event_device, sparc64_events); | ||
718 | |||
719 | void timer_interrupt(int irq, struct pt_regs *regs) | ||
720 | { | ||
721 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
722 | unsigned long tick_mask = tick_ops->softint_mask; | ||
723 | int cpu = smp_processor_id(); | ||
724 | struct clock_event_device *evt = &per_cpu(sparc64_events, cpu); | ||
725 | |||
726 | clear_softint(tick_mask); | ||
727 | |||
728 | irq_enter(); | ||
729 | |||
730 | kstat_this_cpu.irqs[0]++; | ||
731 | |||
732 | if (unlikely(!evt->event_handler)) { | ||
733 | printk(KERN_WARNING | ||
734 | "Spurious SPARC64 timer interrupt on cpu %d\n", cpu); | ||
735 | } else | ||
736 | evt->event_handler(evt); | ||
737 | |||
738 | irq_exit(); | ||
739 | |||
740 | set_irq_regs(old_regs); | ||
741 | } | ||
742 | |||
743 | void __devinit setup_sparc64_timer(void) | ||
744 | { | ||
745 | struct clock_event_device *sevt; | ||
746 | unsigned long pstate; | ||
747 | |||
748 | /* Guarantee that the following sequences execute | ||
749 | * uninterrupted. | ||
750 | */ | ||
751 | __asm__ __volatile__("rdpr %%pstate, %0\n\t" | ||
752 | "wrpr %0, %1, %%pstate" | ||
753 | : "=r" (pstate) | ||
754 | : "i" (PSTATE_IE)); | ||
755 | |||
756 | tick_ops->init_tick(); | ||
757 | |||
758 | /* Restore PSTATE_IE. */ | ||
759 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | ||
760 | : /* no outputs */ | ||
761 | : "r" (pstate)); | ||
762 | |||
763 | sevt = &__get_cpu_var(sparc64_events); | ||
764 | |||
765 | memcpy(sevt, &sparc64_clockevent, sizeof(*sevt)); | ||
766 | sevt->cpumask = cpumask_of_cpu(smp_processor_id()); | ||
767 | |||
768 | clockevents_register_device(sevt); | ||
769 | } | ||
770 | |||
771 | #define SPARC64_NSEC_PER_CYC_SHIFT 10UL | ||
772 | |||
773 | static struct clocksource clocksource_tick = { | ||
774 | .rating = 100, | ||
775 | .mask = CLOCKSOURCE_MASK(64), | ||
776 | .shift = 16, | ||
777 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
778 | }; | ||
779 | |||
780 | static void __init setup_clockevent_multiplier(unsigned long hz) | ||
781 | { | ||
782 | unsigned long mult, shift = 32; | ||
783 | |||
784 | while (1) { | ||
785 | mult = div_sc(hz, NSEC_PER_SEC, shift); | ||
786 | if (mult && (mult >> 32UL) == 0UL) | ||
787 | break; | ||
788 | |||
789 | shift--; | ||
790 | } | ||
791 | |||
792 | sparc64_clockevent.shift = shift; | ||
793 | sparc64_clockevent.mult = mult; | ||
794 | } | ||
795 | |||
796 | static unsigned long tb_ticks_per_usec __read_mostly; | ||
797 | |||
798 | void __delay(unsigned long loops) | ||
799 | { | ||
800 | unsigned long bclock, now; | ||
801 | |||
802 | bclock = tick_ops->get_tick(); | ||
803 | do { | ||
804 | now = tick_ops->get_tick(); | ||
805 | } while ((now-bclock) < loops); | ||
806 | } | ||
807 | EXPORT_SYMBOL(__delay); | ||
808 | |||
809 | void udelay(unsigned long usecs) | ||
810 | { | ||
811 | __delay(tb_ticks_per_usec * usecs); | ||
812 | } | ||
813 | EXPORT_SYMBOL(udelay); | ||
814 | |||
815 | void __init time_init(void) | ||
816 | { | ||
817 | unsigned long freq = sparc64_init_timers(); | ||
818 | |||
819 | tb_ticks_per_usec = freq / USEC_PER_SEC; | ||
820 | |||
821 | timer_ticks_per_nsec_quotient = | ||
822 | clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT); | ||
823 | |||
824 | clocksource_tick.name = tick_ops->name; | ||
825 | clocksource_tick.mult = | ||
826 | clocksource_hz2mult(freq, | ||
827 | clocksource_tick.shift); | ||
828 | clocksource_tick.read = tick_ops->get_tick; | ||
829 | |||
830 | printk("clocksource: mult[%x] shift[%d]\n", | ||
831 | clocksource_tick.mult, clocksource_tick.shift); | ||
832 | |||
833 | clocksource_register(&clocksource_tick); | ||
834 | |||
835 | sparc64_clockevent.name = tick_ops->name; | ||
836 | |||
837 | setup_clockevent_multiplier(freq); | ||
838 | |||
839 | sparc64_clockevent.max_delta_ns = | ||
840 | clockevent_delta2ns(0x7fffffffffffffffUL, &sparc64_clockevent); | ||
841 | sparc64_clockevent.min_delta_ns = | ||
842 | clockevent_delta2ns(0xF, &sparc64_clockevent); | ||
843 | |||
844 | printk("clockevent: mult[%lx] shift[%d]\n", | ||
845 | sparc64_clockevent.mult, sparc64_clockevent.shift); | ||
846 | |||
847 | setup_sparc64_timer(); | ||
848 | } | ||
849 | |||
850 | unsigned long long sched_clock(void) | ||
851 | { | ||
852 | unsigned long ticks = tick_ops->get_tick(); | ||
853 | |||
854 | return (ticks * timer_ticks_per_nsec_quotient) | ||
855 | >> SPARC64_NSEC_PER_CYC_SHIFT; | ||
856 | } | ||
857 | |||
858 | int __devinit read_current_timer(unsigned long *timer_val) | ||
859 | { | ||
860 | *timer_val = tick_ops->get_tick(); | ||
861 | return 0; | ||
862 | } | ||
diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S new file mode 100644 index 000000000000..da1b781b5e65 --- /dev/null +++ b/arch/sparc/kernel/trampoline_64.S | |||
@@ -0,0 +1,417 @@ | |||
1 | /* | ||
2 | * trampoline.S: Jump start slave processors on sparc64. | ||
3 | * | ||
4 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | ||
5 | */ | ||
6 | |||
7 | #include <linux/init.h> | ||
8 | |||
9 | #include <asm/head.h> | ||
10 | #include <asm/asi.h> | ||
11 | #include <asm/lsu.h> | ||
12 | #include <asm/dcr.h> | ||
13 | #include <asm/dcu.h> | ||
14 | #include <asm/pstate.h> | ||
15 | #include <asm/page.h> | ||
16 | #include <asm/pgtable.h> | ||
17 | #include <asm/spitfire.h> | ||
18 | #include <asm/processor.h> | ||
19 | #include <asm/thread_info.h> | ||
20 | #include <asm/mmu.h> | ||
21 | #include <asm/hypervisor.h> | ||
22 | #include <asm/cpudata.h> | ||
23 | |||
24 | .data | ||
25 | .align 8 | ||
26 | call_method: | ||
27 | .asciz "call-method" | ||
28 | .align 8 | ||
29 | itlb_load: | ||
30 | .asciz "SUNW,itlb-load" | ||
31 | .align 8 | ||
32 | dtlb_load: | ||
33 | .asciz "SUNW,dtlb-load" | ||
34 | |||
35 | /* XXX __cpuinit this thing XXX */ | ||
36 | #define TRAMP_STACK_SIZE 1024 | ||
37 | .align 16 | ||
38 | tramp_stack: | ||
39 | .skip TRAMP_STACK_SIZE | ||
40 | |||
41 | __CPUINIT | ||
42 | .align 8 | ||
43 | .globl sparc64_cpu_startup, sparc64_cpu_startup_end | ||
44 | sparc64_cpu_startup: | ||
45 | BRANCH_IF_SUN4V(g1, niagara_startup) | ||
46 | BRANCH_IF_CHEETAH_BASE(g1, g5, cheetah_startup) | ||
47 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1, g5, cheetah_plus_startup) | ||
48 | |||
49 | ba,pt %xcc, spitfire_startup | ||
50 | nop | ||
51 | |||
52 | cheetah_plus_startup: | ||
53 | /* Preserve OBP chosen DCU and DCR register settings. */ | ||
54 | ba,pt %xcc, cheetah_generic_startup | ||
55 | nop | ||
56 | |||
57 | cheetah_startup: | ||
58 | mov DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1 | ||
59 | wr %g1, %asr18 | ||
60 | |||
61 | sethi %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5 | ||
62 | or %g5, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5 | ||
63 | sllx %g5, 32, %g5 | ||
64 | or %g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5 | ||
65 | stxa %g5, [%g0] ASI_DCU_CONTROL_REG | ||
66 | membar #Sync | ||
67 | /* fallthru */ | ||
68 | |||
69 | cheetah_generic_startup: | ||
70 | mov TSB_EXTENSION_P, %g3 | ||
71 | stxa %g0, [%g3] ASI_DMMU | ||
72 | stxa %g0, [%g3] ASI_IMMU | ||
73 | membar #Sync | ||
74 | |||
75 | mov TSB_EXTENSION_S, %g3 | ||
76 | stxa %g0, [%g3] ASI_DMMU | ||
77 | membar #Sync | ||
78 | |||
79 | mov TSB_EXTENSION_N, %g3 | ||
80 | stxa %g0, [%g3] ASI_DMMU | ||
81 | stxa %g0, [%g3] ASI_IMMU | ||
82 | membar #Sync | ||
83 | /* fallthru */ | ||
84 | |||
85 | niagara_startup: | ||
86 | /* Disable STICK_INT interrupts. */ | ||
87 | sethi %hi(0x80000000), %g5 | ||
88 | sllx %g5, 32, %g5 | ||
89 | wr %g5, %asr25 | ||
90 | |||
91 | ba,pt %xcc, startup_continue | ||
92 | nop | ||
93 | |||
94 | spitfire_startup: | ||
95 | mov (LSU_CONTROL_IC | LSU_CONTROL_DC | LSU_CONTROL_IM | LSU_CONTROL_DM), %g1 | ||
96 | stxa %g1, [%g0] ASI_LSU_CONTROL | ||
97 | membar #Sync | ||
98 | |||
99 | startup_continue: | ||
100 | mov %o0, %l0 | ||
101 | BRANCH_IF_SUN4V(g1, niagara_lock_tlb) | ||
102 | |||
103 | sethi %hi(0x80000000), %g2 | ||
104 | sllx %g2, 32, %g2 | ||
105 | wr %g2, 0, %tick_cmpr | ||
106 | |||
107 | /* Call OBP by hand to lock KERNBASE into i/d tlbs. | ||
108 | * We lock 'num_kernel_image_mappings' consequetive entries. | ||
109 | */ | ||
110 | sethi %hi(prom_entry_lock), %g2 | ||
111 | 1: ldstub [%g2 + %lo(prom_entry_lock)], %g1 | ||
112 | brnz,pn %g1, 1b | ||
113 | nop | ||
114 | |||
115 | sethi %hi(p1275buf), %g2 | ||
116 | or %g2, %lo(p1275buf), %g2 | ||
117 | ldx [%g2 + 0x10], %l2 | ||
118 | add %l2, -(192 + 128), %sp | ||
119 | flushw | ||
120 | |||
121 | /* Setup the loop variables: | ||
122 | * %l3: VADDR base | ||
123 | * %l4: TTE base | ||
124 | * %l5: Loop iterator, iterates from 0 to 'num_kernel_image_mappings' | ||
125 | * %l6: Number of TTE entries to map | ||
126 | * %l7: Highest TTE entry number, we count down | ||
127 | */ | ||
128 | sethi %hi(KERNBASE), %l3 | ||
129 | sethi %hi(kern_locked_tte_data), %l4 | ||
130 | ldx [%l4 + %lo(kern_locked_tte_data)], %l4 | ||
131 | clr %l5 | ||
132 | sethi %hi(num_kernel_image_mappings), %l6 | ||
133 | lduw [%l6 + %lo(num_kernel_image_mappings)], %l6 | ||
134 | add %l6, 1, %l6 | ||
135 | |||
136 | mov 15, %l7 | ||
137 | BRANCH_IF_ANY_CHEETAH(g1,g5,2f) | ||
138 | |||
139 | mov 63, %l7 | ||
140 | 2: | ||
141 | |||
142 | 3: | ||
143 | /* Lock into I-MMU */ | ||
144 | sethi %hi(call_method), %g2 | ||
145 | or %g2, %lo(call_method), %g2 | ||
146 | stx %g2, [%sp + 2047 + 128 + 0x00] | ||
147 | mov 5, %g2 | ||
148 | stx %g2, [%sp + 2047 + 128 + 0x08] | ||
149 | mov 1, %g2 | ||
150 | stx %g2, [%sp + 2047 + 128 + 0x10] | ||
151 | sethi %hi(itlb_load), %g2 | ||
152 | or %g2, %lo(itlb_load), %g2 | ||
153 | stx %g2, [%sp + 2047 + 128 + 0x18] | ||
154 | sethi %hi(prom_mmu_ihandle_cache), %g2 | ||
155 | lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 | ||
156 | stx %g2, [%sp + 2047 + 128 + 0x20] | ||
157 | |||
158 | /* Each TTE maps 4MB, convert index to offset. */ | ||
159 | sllx %l5, 22, %g1 | ||
160 | |||
161 | add %l3, %g1, %g2 | ||
162 | stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR | ||
163 | add %l4, %g1, %g2 | ||
164 | stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE | ||
165 | |||
166 | /* TTE index is highest minus loop index. */ | ||
167 | sub %l7, %l5, %g2 | ||
168 | stx %g2, [%sp + 2047 + 128 + 0x38] | ||
169 | |||
170 | sethi %hi(p1275buf), %g2 | ||
171 | or %g2, %lo(p1275buf), %g2 | ||
172 | ldx [%g2 + 0x08], %o1 | ||
173 | call %o1 | ||
174 | add %sp, (2047 + 128), %o0 | ||
175 | |||
176 | /* Lock into D-MMU */ | ||
177 | sethi %hi(call_method), %g2 | ||
178 | or %g2, %lo(call_method), %g2 | ||
179 | stx %g2, [%sp + 2047 + 128 + 0x00] | ||
180 | mov 5, %g2 | ||
181 | stx %g2, [%sp + 2047 + 128 + 0x08] | ||
182 | mov 1, %g2 | ||
183 | stx %g2, [%sp + 2047 + 128 + 0x10] | ||
184 | sethi %hi(dtlb_load), %g2 | ||
185 | or %g2, %lo(dtlb_load), %g2 | ||
186 | stx %g2, [%sp + 2047 + 128 + 0x18] | ||
187 | sethi %hi(prom_mmu_ihandle_cache), %g2 | ||
188 | lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 | ||
189 | stx %g2, [%sp + 2047 + 128 + 0x20] | ||
190 | |||
191 | /* Each TTE maps 4MB, convert index to offset. */ | ||
192 | sllx %l5, 22, %g1 | ||
193 | |||
194 | add %l3, %g1, %g2 | ||
195 | stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR | ||
196 | add %l4, %g1, %g2 | ||
197 | stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE | ||
198 | |||
199 | /* TTE index is highest minus loop index. */ | ||
200 | sub %l7, %l5, %g2 | ||
201 | stx %g2, [%sp + 2047 + 128 + 0x38] | ||
202 | |||
203 | sethi %hi(p1275buf), %g2 | ||
204 | or %g2, %lo(p1275buf), %g2 | ||
205 | ldx [%g2 + 0x08], %o1 | ||
206 | call %o1 | ||
207 | add %sp, (2047 + 128), %o0 | ||
208 | |||
209 | add %l5, 1, %l5 | ||
210 | cmp %l5, %l6 | ||
211 | bne,pt %xcc, 3b | ||
212 | nop | ||
213 | |||
214 | sethi %hi(prom_entry_lock), %g2 | ||
215 | stb %g0, [%g2 + %lo(prom_entry_lock)] | ||
216 | |||
217 | ba,pt %xcc, after_lock_tlb | ||
218 | nop | ||
219 | |||
220 | niagara_lock_tlb: | ||
221 | sethi %hi(KERNBASE), %l3 | ||
222 | sethi %hi(kern_locked_tte_data), %l4 | ||
223 | ldx [%l4 + %lo(kern_locked_tte_data)], %l4 | ||
224 | clr %l5 | ||
225 | sethi %hi(num_kernel_image_mappings), %l6 | ||
226 | lduw [%l6 + %lo(num_kernel_image_mappings)], %l6 | ||
227 | add %l6, 1, %l6 | ||
228 | |||
229 | 1: | ||
230 | mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 | ||
231 | sllx %l5, 22, %g2 | ||
232 | add %l3, %g2, %o0 | ||
233 | clr %o1 | ||
234 | add %l4, %g2, %o2 | ||
235 | mov HV_MMU_IMMU, %o3 | ||
236 | ta HV_FAST_TRAP | ||
237 | |||
238 | mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 | ||
239 | sllx %l5, 22, %g2 | ||
240 | add %l3, %g2, %o0 | ||
241 | clr %o1 | ||
242 | add %l4, %g2, %o2 | ||
243 | mov HV_MMU_DMMU, %o3 | ||
244 | ta HV_FAST_TRAP | ||
245 | |||
246 | add %l5, 1, %l5 | ||
247 | cmp %l5, %l6 | ||
248 | bne,pt %xcc, 1b | ||
249 | nop | ||
250 | |||
251 | after_lock_tlb: | ||
252 | wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate | ||
253 | wr %g0, 0, %fprs | ||
254 | |||
255 | wr %g0, ASI_P, %asi | ||
256 | |||
257 | mov PRIMARY_CONTEXT, %g7 | ||
258 | |||
259 | 661: stxa %g0, [%g7] ASI_DMMU | ||
260 | .section .sun4v_1insn_patch, "ax" | ||
261 | .word 661b | ||
262 | stxa %g0, [%g7] ASI_MMU | ||
263 | .previous | ||
264 | |||
265 | membar #Sync | ||
266 | mov SECONDARY_CONTEXT, %g7 | ||
267 | |||
268 | 661: stxa %g0, [%g7] ASI_DMMU | ||
269 | .section .sun4v_1insn_patch, "ax" | ||
270 | .word 661b | ||
271 | stxa %g0, [%g7] ASI_MMU | ||
272 | .previous | ||
273 | |||
274 | membar #Sync | ||
275 | |||
276 | /* Everything we do here, until we properly take over the | ||
277 | * trap table, must be done with extreme care. We cannot | ||
278 | * make any references to %g6 (current thread pointer), | ||
279 | * %g4 (current task pointer), or %g5 (base of current cpu's | ||
280 | * per-cpu area) until we properly take over the trap table | ||
281 | * from the firmware and hypervisor. | ||
282 | * | ||
283 | * Get onto temporary stack which is in the locked kernel image. | ||
284 | */ | ||
285 | sethi %hi(tramp_stack), %g1 | ||
286 | or %g1, %lo(tramp_stack), %g1 | ||
287 | add %g1, TRAMP_STACK_SIZE, %g1 | ||
288 | sub %g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp | ||
289 | mov 0, %fp | ||
290 | |||
291 | /* Put garbage in these registers to trap any access to them. */ | ||
292 | set 0xdeadbeef, %g4 | ||
293 | set 0xdeadbeef, %g5 | ||
294 | set 0xdeadbeef, %g6 | ||
295 | |||
296 | call init_irqwork_curcpu | ||
297 | nop | ||
298 | |||
299 | sethi %hi(tlb_type), %g3 | ||
300 | lduw [%g3 + %lo(tlb_type)], %g2 | ||
301 | cmp %g2, 3 | ||
302 | bne,pt %icc, 1f | ||
303 | nop | ||
304 | |||
305 | call hard_smp_processor_id | ||
306 | nop | ||
307 | |||
308 | call sun4v_register_mondo_queues | ||
309 | nop | ||
310 | |||
311 | 1: call init_cur_cpu_trap | ||
312 | ldx [%l0], %o0 | ||
313 | |||
314 | /* Start using proper page size encodings in ctx register. */ | ||
315 | sethi %hi(sparc64_kern_pri_context), %g3 | ||
316 | ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 | ||
317 | mov PRIMARY_CONTEXT, %g1 | ||
318 | |||
319 | 661: stxa %g2, [%g1] ASI_DMMU | ||
320 | .section .sun4v_1insn_patch, "ax" | ||
321 | .word 661b | ||
322 | stxa %g2, [%g1] ASI_MMU | ||
323 | .previous | ||
324 | |||
325 | membar #Sync | ||
326 | |||
327 | wrpr %g0, 0, %wstate | ||
328 | |||
329 | sethi %hi(prom_entry_lock), %g2 | ||
330 | 1: ldstub [%g2 + %lo(prom_entry_lock)], %g1 | ||
331 | brnz,pn %g1, 1b | ||
332 | nop | ||
333 | |||
334 | /* As a hack, put &init_thread_union into %g6. | ||
335 | * prom_world() loads from here to restore the %asi | ||
336 | * register. | ||
337 | */ | ||
338 | sethi %hi(init_thread_union), %g6 | ||
339 | or %g6, %lo(init_thread_union), %g6 | ||
340 | |||
341 | sethi %hi(is_sun4v), %o0 | ||
342 | lduw [%o0 + %lo(is_sun4v)], %o0 | ||
343 | brz,pt %o0, 2f | ||
344 | nop | ||
345 | |||
346 | TRAP_LOAD_TRAP_BLOCK(%g2, %g3) | ||
347 | add %g2, TRAP_PER_CPU_FAULT_INFO, %g2 | ||
348 | stxa %g2, [%g0] ASI_SCRATCHPAD | ||
349 | |||
350 | /* Compute physical address: | ||
351 | * | ||
352 | * paddr = kern_base + (mmfsa_vaddr - KERNBASE) | ||
353 | */ | ||
354 | sethi %hi(KERNBASE), %g3 | ||
355 | sub %g2, %g3, %g2 | ||
356 | sethi %hi(kern_base), %g3 | ||
357 | ldx [%g3 + %lo(kern_base)], %g3 | ||
358 | add %g2, %g3, %o1 | ||
359 | sethi %hi(sparc64_ttable_tl0), %o0 | ||
360 | |||
361 | set prom_set_trap_table_name, %g2 | ||
362 | stx %g2, [%sp + 2047 + 128 + 0x00] | ||
363 | mov 2, %g2 | ||
364 | stx %g2, [%sp + 2047 + 128 + 0x08] | ||
365 | mov 0, %g2 | ||
366 | stx %g2, [%sp + 2047 + 128 + 0x10] | ||
367 | stx %o0, [%sp + 2047 + 128 + 0x18] | ||
368 | stx %o1, [%sp + 2047 + 128 + 0x20] | ||
369 | sethi %hi(p1275buf), %g2 | ||
370 | or %g2, %lo(p1275buf), %g2 | ||
371 | ldx [%g2 + 0x08], %o1 | ||
372 | call %o1 | ||
373 | add %sp, (2047 + 128), %o0 | ||
374 | |||
375 | ba,pt %xcc, 3f | ||
376 | nop | ||
377 | |||
378 | 2: sethi %hi(sparc64_ttable_tl0), %o0 | ||
379 | set prom_set_trap_table_name, %g2 | ||
380 | stx %g2, [%sp + 2047 + 128 + 0x00] | ||
381 | mov 1, %g2 | ||
382 | stx %g2, [%sp + 2047 + 128 + 0x08] | ||
383 | mov 0, %g2 | ||
384 | stx %g2, [%sp + 2047 + 128 + 0x10] | ||
385 | stx %o0, [%sp + 2047 + 128 + 0x18] | ||
386 | sethi %hi(p1275buf), %g2 | ||
387 | or %g2, %lo(p1275buf), %g2 | ||
388 | ldx [%g2 + 0x08], %o1 | ||
389 | call %o1 | ||
390 | add %sp, (2047 + 128), %o0 | ||
391 | |||
392 | 3: sethi %hi(prom_entry_lock), %g2 | ||
393 | stb %g0, [%g2 + %lo(prom_entry_lock)] | ||
394 | |||
395 | ldx [%l0], %g6 | ||
396 | ldx [%g6 + TI_TASK], %g4 | ||
397 | |||
398 | mov 1, %g5 | ||
399 | sllx %g5, THREAD_SHIFT, %g5 | ||
400 | sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5 | ||
401 | add %g6, %g5, %sp | ||
402 | mov 0, %fp | ||
403 | |||
404 | rdpr %pstate, %o1 | ||
405 | or %o1, PSTATE_IE, %o1 | ||
406 | wrpr %o1, 0, %pstate | ||
407 | |||
408 | call smp_callin | ||
409 | nop | ||
410 | call cpu_idle | ||
411 | mov 0, %o0 | ||
412 | call cpu_panic | ||
413 | nop | ||
414 | 1: b,a,pt %xcc, 1b | ||
415 | |||
416 | .align 8 | ||
417 | sparc64_cpu_startup_end: | ||
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c new file mode 100644 index 000000000000..4638af2f55a0 --- /dev/null +++ b/arch/sparc/kernel/traps_64.c | |||
@@ -0,0 +1,2600 @@ | |||
1 | /* arch/sparc64/kernel/traps.c | ||
2 | * | ||
3 | * Copyright (C) 1995,1997,2008 David S. Miller (davem@davemloft.net) | ||
4 | * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com) | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | * I like traps on v9, :)))) | ||
9 | */ | ||
10 | |||
11 | #include <linux/module.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/linkage.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/signal.h> | ||
16 | #include <linux/smp.h> | ||
17 | #include <linux/mm.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/kdebug.h> | ||
20 | |||
21 | #include <asm/smp.h> | ||
22 | #include <asm/delay.h> | ||
23 | #include <asm/system.h> | ||
24 | #include <asm/ptrace.h> | ||
25 | #include <asm/oplib.h> | ||
26 | #include <asm/page.h> | ||
27 | #include <asm/pgtable.h> | ||
28 | #include <asm/unistd.h> | ||
29 | #include <asm/uaccess.h> | ||
30 | #include <asm/fpumacro.h> | ||
31 | #include <asm/lsu.h> | ||
32 | #include <asm/dcu.h> | ||
33 | #include <asm/estate.h> | ||
34 | #include <asm/chafsr.h> | ||
35 | #include <asm/sfafsr.h> | ||
36 | #include <asm/psrcompat.h> | ||
37 | #include <asm/processor.h> | ||
38 | #include <asm/timer.h> | ||
39 | #include <asm/head.h> | ||
40 | #include <asm/prom.h> | ||
41 | #include <asm/memctrl.h> | ||
42 | |||
43 | #include "entry.h" | ||
44 | #include "kstack.h" | ||
45 | |||
46 | /* When an irrecoverable trap occurs at tl > 0, the trap entry | ||
47 | * code logs the trap state registers at every level in the trap | ||
48 | * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout | ||
49 | * is as follows: | ||
50 | */ | ||
51 | struct tl1_traplog { | ||
52 | struct { | ||
53 | unsigned long tstate; | ||
54 | unsigned long tpc; | ||
55 | unsigned long tnpc; | ||
56 | unsigned long tt; | ||
57 | } trapstack[4]; | ||
58 | unsigned long tl; | ||
59 | }; | ||
60 | |||
61 | static void dump_tl1_traplog(struct tl1_traplog *p) | ||
62 | { | ||
63 | int i, limit; | ||
64 | |||
65 | printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, " | ||
66 | "dumping track stack.\n", p->tl); | ||
67 | |||
68 | limit = (tlb_type == hypervisor) ? 2 : 4; | ||
69 | for (i = 0; i < limit; i++) { | ||
70 | printk(KERN_EMERG | ||
71 | "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] " | ||
72 | "TNPC[%016lx] TT[%lx]\n", | ||
73 | i + 1, | ||
74 | p->trapstack[i].tstate, p->trapstack[i].tpc, | ||
75 | p->trapstack[i].tnpc, p->trapstack[i].tt); | ||
76 | printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc); | ||
77 | } | ||
78 | } | ||
79 | |||
80 | void bad_trap(struct pt_regs *regs, long lvl) | ||
81 | { | ||
82 | char buffer[32]; | ||
83 | siginfo_t info; | ||
84 | |||
85 | if (notify_die(DIE_TRAP, "bad trap", regs, | ||
86 | 0, lvl, SIGTRAP) == NOTIFY_STOP) | ||
87 | return; | ||
88 | |||
89 | if (lvl < 0x100) { | ||
90 | sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl); | ||
91 | die_if_kernel(buffer, regs); | ||
92 | } | ||
93 | |||
94 | lvl -= 0x100; | ||
95 | if (regs->tstate & TSTATE_PRIV) { | ||
96 | sprintf(buffer, "Kernel bad sw trap %lx", lvl); | ||
97 | die_if_kernel(buffer, regs); | ||
98 | } | ||
99 | if (test_thread_flag(TIF_32BIT)) { | ||
100 | regs->tpc &= 0xffffffff; | ||
101 | regs->tnpc &= 0xffffffff; | ||
102 | } | ||
103 | info.si_signo = SIGILL; | ||
104 | info.si_errno = 0; | ||
105 | info.si_code = ILL_ILLTRP; | ||
106 | info.si_addr = (void __user *)regs->tpc; | ||
107 | info.si_trapno = lvl; | ||
108 | force_sig_info(SIGILL, &info, current); | ||
109 | } | ||
110 | |||
111 | void bad_trap_tl1(struct pt_regs *regs, long lvl) | ||
112 | { | ||
113 | char buffer[32]; | ||
114 | |||
115 | if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs, | ||
116 | 0, lvl, SIGTRAP) == NOTIFY_STOP) | ||
117 | return; | ||
118 | |||
119 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
120 | |||
121 | sprintf (buffer, "Bad trap %lx at tl>0", lvl); | ||
122 | die_if_kernel (buffer, regs); | ||
123 | } | ||
124 | |||
125 | #ifdef CONFIG_DEBUG_BUGVERBOSE | ||
126 | void do_BUG(const char *file, int line) | ||
127 | { | ||
128 | bust_spinlocks(1); | ||
129 | printk("kernel BUG at %s:%d!\n", file, line); | ||
130 | } | ||
131 | #endif | ||
132 | |||
133 | static DEFINE_SPINLOCK(dimm_handler_lock); | ||
134 | static dimm_printer_t dimm_handler; | ||
135 | |||
136 | static int sprintf_dimm(int synd_code, unsigned long paddr, char *buf, int buflen) | ||
137 | { | ||
138 | unsigned long flags; | ||
139 | int ret = -ENODEV; | ||
140 | |||
141 | spin_lock_irqsave(&dimm_handler_lock, flags); | ||
142 | if (dimm_handler) { | ||
143 | ret = dimm_handler(synd_code, paddr, buf, buflen); | ||
144 | } else if (tlb_type == spitfire) { | ||
145 | if (prom_getunumber(synd_code, paddr, buf, buflen) == -1) | ||
146 | ret = -EINVAL; | ||
147 | else | ||
148 | ret = 0; | ||
149 | } else | ||
150 | ret = -ENODEV; | ||
151 | spin_unlock_irqrestore(&dimm_handler_lock, flags); | ||
152 | |||
153 | return ret; | ||
154 | } | ||
155 | |||
156 | int register_dimm_printer(dimm_printer_t func) | ||
157 | { | ||
158 | unsigned long flags; | ||
159 | int ret = 0; | ||
160 | |||
161 | spin_lock_irqsave(&dimm_handler_lock, flags); | ||
162 | if (!dimm_handler) | ||
163 | dimm_handler = func; | ||
164 | else | ||
165 | ret = -EEXIST; | ||
166 | spin_unlock_irqrestore(&dimm_handler_lock, flags); | ||
167 | |||
168 | return ret; | ||
169 | } | ||
170 | EXPORT_SYMBOL_GPL(register_dimm_printer); | ||
171 | |||
172 | void unregister_dimm_printer(dimm_printer_t func) | ||
173 | { | ||
174 | unsigned long flags; | ||
175 | |||
176 | spin_lock_irqsave(&dimm_handler_lock, flags); | ||
177 | if (dimm_handler == func) | ||
178 | dimm_handler = NULL; | ||
179 | spin_unlock_irqrestore(&dimm_handler_lock, flags); | ||
180 | } | ||
181 | EXPORT_SYMBOL_GPL(unregister_dimm_printer); | ||
182 | |||
183 | void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) | ||
184 | { | ||
185 | siginfo_t info; | ||
186 | |||
187 | if (notify_die(DIE_TRAP, "instruction access exception", regs, | ||
188 | 0, 0x8, SIGTRAP) == NOTIFY_STOP) | ||
189 | return; | ||
190 | |||
191 | if (regs->tstate & TSTATE_PRIV) { | ||
192 | printk("spitfire_insn_access_exception: SFSR[%016lx] " | ||
193 | "SFAR[%016lx], going.\n", sfsr, sfar); | ||
194 | die_if_kernel("Iax", regs); | ||
195 | } | ||
196 | if (test_thread_flag(TIF_32BIT)) { | ||
197 | regs->tpc &= 0xffffffff; | ||
198 | regs->tnpc &= 0xffffffff; | ||
199 | } | ||
200 | info.si_signo = SIGSEGV; | ||
201 | info.si_errno = 0; | ||
202 | info.si_code = SEGV_MAPERR; | ||
203 | info.si_addr = (void __user *)regs->tpc; | ||
204 | info.si_trapno = 0; | ||
205 | force_sig_info(SIGSEGV, &info, current); | ||
206 | } | ||
207 | |||
208 | void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) | ||
209 | { | ||
210 | if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs, | ||
211 | 0, 0x8, SIGTRAP) == NOTIFY_STOP) | ||
212 | return; | ||
213 | |||
214 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
215 | spitfire_insn_access_exception(regs, sfsr, sfar); | ||
216 | } | ||
217 | |||
218 | void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) | ||
219 | { | ||
220 | unsigned short type = (type_ctx >> 16); | ||
221 | unsigned short ctx = (type_ctx & 0xffff); | ||
222 | siginfo_t info; | ||
223 | |||
224 | if (notify_die(DIE_TRAP, "instruction access exception", regs, | ||
225 | 0, 0x8, SIGTRAP) == NOTIFY_STOP) | ||
226 | return; | ||
227 | |||
228 | if (regs->tstate & TSTATE_PRIV) { | ||
229 | printk("sun4v_insn_access_exception: ADDR[%016lx] " | ||
230 | "CTX[%04x] TYPE[%04x], going.\n", | ||
231 | addr, ctx, type); | ||
232 | die_if_kernel("Iax", regs); | ||
233 | } | ||
234 | |||
235 | if (test_thread_flag(TIF_32BIT)) { | ||
236 | regs->tpc &= 0xffffffff; | ||
237 | regs->tnpc &= 0xffffffff; | ||
238 | } | ||
239 | info.si_signo = SIGSEGV; | ||
240 | info.si_errno = 0; | ||
241 | info.si_code = SEGV_MAPERR; | ||
242 | info.si_addr = (void __user *) addr; | ||
243 | info.si_trapno = 0; | ||
244 | force_sig_info(SIGSEGV, &info, current); | ||
245 | } | ||
246 | |||
247 | void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) | ||
248 | { | ||
249 | if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs, | ||
250 | 0, 0x8, SIGTRAP) == NOTIFY_STOP) | ||
251 | return; | ||
252 | |||
253 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
254 | sun4v_insn_access_exception(regs, addr, type_ctx); | ||
255 | } | ||
256 | |||
257 | void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) | ||
258 | { | ||
259 | siginfo_t info; | ||
260 | |||
261 | if (notify_die(DIE_TRAP, "data access exception", regs, | ||
262 | 0, 0x30, SIGTRAP) == NOTIFY_STOP) | ||
263 | return; | ||
264 | |||
265 | if (regs->tstate & TSTATE_PRIV) { | ||
266 | /* Test if this comes from uaccess places. */ | ||
267 | const struct exception_table_entry *entry; | ||
268 | |||
269 | entry = search_exception_tables(regs->tpc); | ||
270 | if (entry) { | ||
271 | /* Ouch, somebody is trying VM hole tricks on us... */ | ||
272 | #ifdef DEBUG_EXCEPTIONS | ||
273 | printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc); | ||
274 | printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n", | ||
275 | regs->tpc, entry->fixup); | ||
276 | #endif | ||
277 | regs->tpc = entry->fixup; | ||
278 | regs->tnpc = regs->tpc + 4; | ||
279 | return; | ||
280 | } | ||
281 | /* Shit... */ | ||
282 | printk("spitfire_data_access_exception: SFSR[%016lx] " | ||
283 | "SFAR[%016lx], going.\n", sfsr, sfar); | ||
284 | die_if_kernel("Dax", regs); | ||
285 | } | ||
286 | |||
287 | info.si_signo = SIGSEGV; | ||
288 | info.si_errno = 0; | ||
289 | info.si_code = SEGV_MAPERR; | ||
290 | info.si_addr = (void __user *)sfar; | ||
291 | info.si_trapno = 0; | ||
292 | force_sig_info(SIGSEGV, &info, current); | ||
293 | } | ||
294 | |||
295 | void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) | ||
296 | { | ||
297 | if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs, | ||
298 | 0, 0x30, SIGTRAP) == NOTIFY_STOP) | ||
299 | return; | ||
300 | |||
301 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
302 | spitfire_data_access_exception(regs, sfsr, sfar); | ||
303 | } | ||
304 | |||
305 | void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) | ||
306 | { | ||
307 | unsigned short type = (type_ctx >> 16); | ||
308 | unsigned short ctx = (type_ctx & 0xffff); | ||
309 | siginfo_t info; | ||
310 | |||
311 | if (notify_die(DIE_TRAP, "data access exception", regs, | ||
312 | 0, 0x8, SIGTRAP) == NOTIFY_STOP) | ||
313 | return; | ||
314 | |||
315 | if (regs->tstate & TSTATE_PRIV) { | ||
316 | printk("sun4v_data_access_exception: ADDR[%016lx] " | ||
317 | "CTX[%04x] TYPE[%04x], going.\n", | ||
318 | addr, ctx, type); | ||
319 | die_if_kernel("Dax", regs); | ||
320 | } | ||
321 | |||
322 | if (test_thread_flag(TIF_32BIT)) { | ||
323 | regs->tpc &= 0xffffffff; | ||
324 | regs->tnpc &= 0xffffffff; | ||
325 | } | ||
326 | info.si_signo = SIGSEGV; | ||
327 | info.si_errno = 0; | ||
328 | info.si_code = SEGV_MAPERR; | ||
329 | info.si_addr = (void __user *) addr; | ||
330 | info.si_trapno = 0; | ||
331 | force_sig_info(SIGSEGV, &info, current); | ||
332 | } | ||
333 | |||
334 | void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) | ||
335 | { | ||
336 | if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs, | ||
337 | 0, 0x8, SIGTRAP) == NOTIFY_STOP) | ||
338 | return; | ||
339 | |||
340 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
341 | sun4v_data_access_exception(regs, addr, type_ctx); | ||
342 | } | ||
343 | |||
344 | #ifdef CONFIG_PCI | ||
345 | #include "pci_impl.h" | ||
346 | #endif | ||
347 | |||
348 | /* When access exceptions happen, we must do this. */ | ||
349 | static void spitfire_clean_and_reenable_l1_caches(void) | ||
350 | { | ||
351 | unsigned long va; | ||
352 | |||
353 | if (tlb_type != spitfire) | ||
354 | BUG(); | ||
355 | |||
356 | /* Clean 'em. */ | ||
357 | for (va = 0; va < (PAGE_SIZE << 1); va += 32) { | ||
358 | spitfire_put_icache_tag(va, 0x0); | ||
359 | spitfire_put_dcache_tag(va, 0x0); | ||
360 | } | ||
361 | |||
362 | /* Re-enable in LSU. */ | ||
363 | __asm__ __volatile__("flush %%g6\n\t" | ||
364 | "membar #Sync\n\t" | ||
365 | "stxa %0, [%%g0] %1\n\t" | ||
366 | "membar #Sync" | ||
367 | : /* no outputs */ | ||
368 | : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC | | ||
369 | LSU_CONTROL_IM | LSU_CONTROL_DM), | ||
370 | "i" (ASI_LSU_CONTROL) | ||
371 | : "memory"); | ||
372 | } | ||
373 | |||
374 | static void spitfire_enable_estate_errors(void) | ||
375 | { | ||
376 | __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" | ||
377 | "membar #Sync" | ||
378 | : /* no outputs */ | ||
379 | : "r" (ESTATE_ERR_ALL), | ||
380 | "i" (ASI_ESTATE_ERROR_EN)); | ||
381 | } | ||
382 | |||
383 | static char ecc_syndrome_table[] = { | ||
384 | 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49, | ||
385 | 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a, | ||
386 | 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48, | ||
387 | 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c, | ||
388 | 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48, | ||
389 | 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29, | ||
390 | 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b, | ||
391 | 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48, | ||
392 | 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48, | ||
393 | 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e, | ||
394 | 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b, | ||
395 | 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48, | ||
396 | 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36, | ||
397 | 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48, | ||
398 | 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48, | ||
399 | 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b, | ||
400 | 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48, | ||
401 | 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b, | ||
402 | 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32, | ||
403 | 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48, | ||
404 | 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b, | ||
405 | 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48, | ||
406 | 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48, | ||
407 | 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b, | ||
408 | 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49, | ||
409 | 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48, | ||
410 | 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48, | ||
411 | 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b, | ||
412 | 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48, | ||
413 | 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b, | ||
414 | 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b, | ||
415 | 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a | ||
416 | }; | ||
417 | |||
418 | static char *syndrome_unknown = "<Unknown>"; | ||
419 | |||
420 | static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit) | ||
421 | { | ||
422 | unsigned short scode; | ||
423 | char memmod_str[64], *p; | ||
424 | |||
425 | if (udbl & bit) { | ||
426 | scode = ecc_syndrome_table[udbl & 0xff]; | ||
427 | if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0) | ||
428 | p = syndrome_unknown; | ||
429 | else | ||
430 | p = memmod_str; | ||
431 | printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] " | ||
432 | "Memory Module \"%s\"\n", | ||
433 | smp_processor_id(), scode, p); | ||
434 | } | ||
435 | |||
436 | if (udbh & bit) { | ||
437 | scode = ecc_syndrome_table[udbh & 0xff]; | ||
438 | if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0) | ||
439 | p = syndrome_unknown; | ||
440 | else | ||
441 | p = memmod_str; | ||
442 | printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] " | ||
443 | "Memory Module \"%s\"\n", | ||
444 | smp_processor_id(), scode, p); | ||
445 | } | ||
446 | |||
447 | } | ||
448 | |||
449 | static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs) | ||
450 | { | ||
451 | |||
452 | printk(KERN_WARNING "CPU[%d]: Correctable ECC Error " | ||
453 | "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n", | ||
454 | smp_processor_id(), afsr, afar, udbl, udbh, tl1); | ||
455 | |||
456 | spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE); | ||
457 | |||
458 | /* We always log it, even if someone is listening for this | ||
459 | * trap. | ||
460 | */ | ||
461 | notify_die(DIE_TRAP, "Correctable ECC Error", regs, | ||
462 | 0, TRAP_TYPE_CEE, SIGTRAP); | ||
463 | |||
464 | /* The Correctable ECC Error trap does not disable I/D caches. So | ||
465 | * we only have to restore the ESTATE Error Enable register. | ||
466 | */ | ||
467 | spitfire_enable_estate_errors(); | ||
468 | } | ||
469 | |||
470 | static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs) | ||
471 | { | ||
472 | siginfo_t info; | ||
473 | |||
474 | printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] " | ||
475 | "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n", | ||
476 | smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1); | ||
477 | |||
478 | /* XXX add more human friendly logging of the error status | ||
479 | * XXX as is implemented for cheetah | ||
480 | */ | ||
481 | |||
482 | spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE); | ||
483 | |||
484 | /* We always log it, even if someone is listening for this | ||
485 | * trap. | ||
486 | */ | ||
487 | notify_die(DIE_TRAP, "Uncorrectable Error", regs, | ||
488 | 0, tt, SIGTRAP); | ||
489 | |||
490 | if (regs->tstate & TSTATE_PRIV) { | ||
491 | if (tl1) | ||
492 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
493 | die_if_kernel("UE", regs); | ||
494 | } | ||
495 | |||
496 | /* XXX need more intelligent processing here, such as is implemented | ||
497 | * XXX for cheetah errors, in fact if the E-cache still holds the | ||
498 | * XXX line with bad parity this will loop | ||
499 | */ | ||
500 | |||
501 | spitfire_clean_and_reenable_l1_caches(); | ||
502 | spitfire_enable_estate_errors(); | ||
503 | |||
504 | if (test_thread_flag(TIF_32BIT)) { | ||
505 | regs->tpc &= 0xffffffff; | ||
506 | regs->tnpc &= 0xffffffff; | ||
507 | } | ||
508 | info.si_signo = SIGBUS; | ||
509 | info.si_errno = 0; | ||
510 | info.si_code = BUS_OBJERR; | ||
511 | info.si_addr = (void *)0; | ||
512 | info.si_trapno = 0; | ||
513 | force_sig_info(SIGBUS, &info, current); | ||
514 | } | ||
515 | |||
516 | void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar) | ||
517 | { | ||
518 | unsigned long afsr, tt, udbh, udbl; | ||
519 | int tl1; | ||
520 | |||
521 | afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT; | ||
522 | tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT; | ||
523 | tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0; | ||
524 | udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT; | ||
525 | udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT; | ||
526 | |||
527 | #ifdef CONFIG_PCI | ||
528 | if (tt == TRAP_TYPE_DAE && | ||
529 | pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) { | ||
530 | spitfire_clean_and_reenable_l1_caches(); | ||
531 | spitfire_enable_estate_errors(); | ||
532 | |||
533 | pci_poke_faulted = 1; | ||
534 | regs->tnpc = regs->tpc + 4; | ||
535 | return; | ||
536 | } | ||
537 | #endif | ||
538 | |||
539 | if (afsr & SFAFSR_UE) | ||
540 | spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs); | ||
541 | |||
542 | if (tt == TRAP_TYPE_CEE) { | ||
543 | /* Handle the case where we took a CEE trap, but ACK'd | ||
544 | * only the UE state in the UDB error registers. | ||
545 | */ | ||
546 | if (afsr & SFAFSR_UE) { | ||
547 | if (udbh & UDBE_CE) { | ||
548 | __asm__ __volatile__( | ||
549 | "stxa %0, [%1] %2\n\t" | ||
550 | "membar #Sync" | ||
551 | : /* no outputs */ | ||
552 | : "r" (udbh & UDBE_CE), | ||
553 | "r" (0x0), "i" (ASI_UDB_ERROR_W)); | ||
554 | } | ||
555 | if (udbl & UDBE_CE) { | ||
556 | __asm__ __volatile__( | ||
557 | "stxa %0, [%1] %2\n\t" | ||
558 | "membar #Sync" | ||
559 | : /* no outputs */ | ||
560 | : "r" (udbl & UDBE_CE), | ||
561 | "r" (0x18), "i" (ASI_UDB_ERROR_W)); | ||
562 | } | ||
563 | } | ||
564 | |||
565 | spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs); | ||
566 | } | ||
567 | } | ||
568 | |||
569 | int cheetah_pcache_forced_on; | ||
570 | |||
571 | void cheetah_enable_pcache(void) | ||
572 | { | ||
573 | unsigned long dcr; | ||
574 | |||
575 | printk("CHEETAH: Enabling P-Cache on cpu %d.\n", | ||
576 | smp_processor_id()); | ||
577 | |||
578 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | ||
579 | : "=r" (dcr) | ||
580 | : "i" (ASI_DCU_CONTROL_REG)); | ||
581 | dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL); | ||
582 | __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" | ||
583 | "membar #Sync" | ||
584 | : /* no outputs */ | ||
585 | : "r" (dcr), "i" (ASI_DCU_CONTROL_REG)); | ||
586 | } | ||
587 | |||
588 | /* Cheetah error trap handling. */ | ||
589 | static unsigned long ecache_flush_physbase; | ||
590 | static unsigned long ecache_flush_linesize; | ||
591 | static unsigned long ecache_flush_size; | ||
592 | |||
593 | /* This table is ordered in priority of errors and matches the | ||
594 | * AFAR overwrite policy as well. | ||
595 | */ | ||
596 | |||
597 | struct afsr_error_table { | ||
598 | unsigned long mask; | ||
599 | const char *name; | ||
600 | }; | ||
601 | |||
602 | static const char CHAFSR_PERR_msg[] = | ||
603 | "System interface protocol error"; | ||
604 | static const char CHAFSR_IERR_msg[] = | ||
605 | "Internal processor error"; | ||
606 | static const char CHAFSR_ISAP_msg[] = | ||
607 | "System request parity error on incoming addresss"; | ||
608 | static const char CHAFSR_UCU_msg[] = | ||
609 | "Uncorrectable E-cache ECC error for ifetch/data"; | ||
610 | static const char CHAFSR_UCC_msg[] = | ||
611 | "SW Correctable E-cache ECC error for ifetch/data"; | ||
612 | static const char CHAFSR_UE_msg[] = | ||
613 | "Uncorrectable system bus data ECC error for read"; | ||
614 | static const char CHAFSR_EDU_msg[] = | ||
615 | "Uncorrectable E-cache ECC error for stmerge/blkld"; | ||
616 | static const char CHAFSR_EMU_msg[] = | ||
617 | "Uncorrectable system bus MTAG error"; | ||
618 | static const char CHAFSR_WDU_msg[] = | ||
619 | "Uncorrectable E-cache ECC error for writeback"; | ||
620 | static const char CHAFSR_CPU_msg[] = | ||
621 | "Uncorrectable ECC error for copyout"; | ||
622 | static const char CHAFSR_CE_msg[] = | ||
623 | "HW corrected system bus data ECC error for read"; | ||
624 | static const char CHAFSR_EDC_msg[] = | ||
625 | "HW corrected E-cache ECC error for stmerge/blkld"; | ||
626 | static const char CHAFSR_EMC_msg[] = | ||
627 | "HW corrected system bus MTAG ECC error"; | ||
628 | static const char CHAFSR_WDC_msg[] = | ||
629 | "HW corrected E-cache ECC error for writeback"; | ||
630 | static const char CHAFSR_CPC_msg[] = | ||
631 | "HW corrected ECC error for copyout"; | ||
632 | static const char CHAFSR_TO_msg[] = | ||
633 | "Unmapped error from system bus"; | ||
634 | static const char CHAFSR_BERR_msg[] = | ||
635 | "Bus error response from system bus"; | ||
636 | static const char CHAFSR_IVC_msg[] = | ||
637 | "HW corrected system bus data ECC error for ivec read"; | ||
638 | static const char CHAFSR_IVU_msg[] = | ||
639 | "Uncorrectable system bus data ECC error for ivec read"; | ||
640 | static struct afsr_error_table __cheetah_error_table[] = { | ||
641 | { CHAFSR_PERR, CHAFSR_PERR_msg }, | ||
642 | { CHAFSR_IERR, CHAFSR_IERR_msg }, | ||
643 | { CHAFSR_ISAP, CHAFSR_ISAP_msg }, | ||
644 | { CHAFSR_UCU, CHAFSR_UCU_msg }, | ||
645 | { CHAFSR_UCC, CHAFSR_UCC_msg }, | ||
646 | { CHAFSR_UE, CHAFSR_UE_msg }, | ||
647 | { CHAFSR_EDU, CHAFSR_EDU_msg }, | ||
648 | { CHAFSR_EMU, CHAFSR_EMU_msg }, | ||
649 | { CHAFSR_WDU, CHAFSR_WDU_msg }, | ||
650 | { CHAFSR_CPU, CHAFSR_CPU_msg }, | ||
651 | { CHAFSR_CE, CHAFSR_CE_msg }, | ||
652 | { CHAFSR_EDC, CHAFSR_EDC_msg }, | ||
653 | { CHAFSR_EMC, CHAFSR_EMC_msg }, | ||
654 | { CHAFSR_WDC, CHAFSR_WDC_msg }, | ||
655 | { CHAFSR_CPC, CHAFSR_CPC_msg }, | ||
656 | { CHAFSR_TO, CHAFSR_TO_msg }, | ||
657 | { CHAFSR_BERR, CHAFSR_BERR_msg }, | ||
658 | /* These two do not update the AFAR. */ | ||
659 | { CHAFSR_IVC, CHAFSR_IVC_msg }, | ||
660 | { CHAFSR_IVU, CHAFSR_IVU_msg }, | ||
661 | { 0, NULL }, | ||
662 | }; | ||
663 | static const char CHPAFSR_DTO_msg[] = | ||
664 | "System bus unmapped error for prefetch/storequeue-read"; | ||
665 | static const char CHPAFSR_DBERR_msg[] = | ||
666 | "System bus error for prefetch/storequeue-read"; | ||
667 | static const char CHPAFSR_THCE_msg[] = | ||
668 | "Hardware corrected E-cache Tag ECC error"; | ||
669 | static const char CHPAFSR_TSCE_msg[] = | ||
670 | "SW handled correctable E-cache Tag ECC error"; | ||
671 | static const char CHPAFSR_TUE_msg[] = | ||
672 | "Uncorrectable E-cache Tag ECC error"; | ||
673 | static const char CHPAFSR_DUE_msg[] = | ||
674 | "System bus uncorrectable data ECC error due to prefetch/store-fill"; | ||
675 | static struct afsr_error_table __cheetah_plus_error_table[] = { | ||
676 | { CHAFSR_PERR, CHAFSR_PERR_msg }, | ||
677 | { CHAFSR_IERR, CHAFSR_IERR_msg }, | ||
678 | { CHAFSR_ISAP, CHAFSR_ISAP_msg }, | ||
679 | { CHAFSR_UCU, CHAFSR_UCU_msg }, | ||
680 | { CHAFSR_UCC, CHAFSR_UCC_msg }, | ||
681 | { CHAFSR_UE, CHAFSR_UE_msg }, | ||
682 | { CHAFSR_EDU, CHAFSR_EDU_msg }, | ||
683 | { CHAFSR_EMU, CHAFSR_EMU_msg }, | ||
684 | { CHAFSR_WDU, CHAFSR_WDU_msg }, | ||
685 | { CHAFSR_CPU, CHAFSR_CPU_msg }, | ||
686 | { CHAFSR_CE, CHAFSR_CE_msg }, | ||
687 | { CHAFSR_EDC, CHAFSR_EDC_msg }, | ||
688 | { CHAFSR_EMC, CHAFSR_EMC_msg }, | ||
689 | { CHAFSR_WDC, CHAFSR_WDC_msg }, | ||
690 | { CHAFSR_CPC, CHAFSR_CPC_msg }, | ||
691 | { CHAFSR_TO, CHAFSR_TO_msg }, | ||
692 | { CHAFSR_BERR, CHAFSR_BERR_msg }, | ||
693 | { CHPAFSR_DTO, CHPAFSR_DTO_msg }, | ||
694 | { CHPAFSR_DBERR, CHPAFSR_DBERR_msg }, | ||
695 | { CHPAFSR_THCE, CHPAFSR_THCE_msg }, | ||
696 | { CHPAFSR_TSCE, CHPAFSR_TSCE_msg }, | ||
697 | { CHPAFSR_TUE, CHPAFSR_TUE_msg }, | ||
698 | { CHPAFSR_DUE, CHPAFSR_DUE_msg }, | ||
699 | /* These two do not update the AFAR. */ | ||
700 | { CHAFSR_IVC, CHAFSR_IVC_msg }, | ||
701 | { CHAFSR_IVU, CHAFSR_IVU_msg }, | ||
702 | { 0, NULL }, | ||
703 | }; | ||
704 | static const char JPAFSR_JETO_msg[] = | ||
705 | "System interface protocol error, hw timeout caused"; | ||
706 | static const char JPAFSR_SCE_msg[] = | ||
707 | "Parity error on system snoop results"; | ||
708 | static const char JPAFSR_JEIC_msg[] = | ||
709 | "System interface protocol error, illegal command detected"; | ||
710 | static const char JPAFSR_JEIT_msg[] = | ||
711 | "System interface protocol error, illegal ADTYPE detected"; | ||
712 | static const char JPAFSR_OM_msg[] = | ||
713 | "Out of range memory error has occurred"; | ||
714 | static const char JPAFSR_ETP_msg[] = | ||
715 | "Parity error on L2 cache tag SRAM"; | ||
716 | static const char JPAFSR_UMS_msg[] = | ||
717 | "Error due to unsupported store"; | ||
718 | static const char JPAFSR_RUE_msg[] = | ||
719 | "Uncorrectable ECC error from remote cache/memory"; | ||
720 | static const char JPAFSR_RCE_msg[] = | ||
721 | "Correctable ECC error from remote cache/memory"; | ||
722 | static const char JPAFSR_BP_msg[] = | ||
723 | "JBUS parity error on returned read data"; | ||
724 | static const char JPAFSR_WBP_msg[] = | ||
725 | "JBUS parity error on data for writeback or block store"; | ||
726 | static const char JPAFSR_FRC_msg[] = | ||
727 | "Foreign read to DRAM incurring correctable ECC error"; | ||
728 | static const char JPAFSR_FRU_msg[] = | ||
729 | "Foreign read to DRAM incurring uncorrectable ECC error"; | ||
730 | static struct afsr_error_table __jalapeno_error_table[] = { | ||
731 | { JPAFSR_JETO, JPAFSR_JETO_msg }, | ||
732 | { JPAFSR_SCE, JPAFSR_SCE_msg }, | ||
733 | { JPAFSR_JEIC, JPAFSR_JEIC_msg }, | ||
734 | { JPAFSR_JEIT, JPAFSR_JEIT_msg }, | ||
735 | { CHAFSR_PERR, CHAFSR_PERR_msg }, | ||
736 | { CHAFSR_IERR, CHAFSR_IERR_msg }, | ||
737 | { CHAFSR_ISAP, CHAFSR_ISAP_msg }, | ||
738 | { CHAFSR_UCU, CHAFSR_UCU_msg }, | ||
739 | { CHAFSR_UCC, CHAFSR_UCC_msg }, | ||
740 | { CHAFSR_UE, CHAFSR_UE_msg }, | ||
741 | { CHAFSR_EDU, CHAFSR_EDU_msg }, | ||
742 | { JPAFSR_OM, JPAFSR_OM_msg }, | ||
743 | { CHAFSR_WDU, CHAFSR_WDU_msg }, | ||
744 | { CHAFSR_CPU, CHAFSR_CPU_msg }, | ||
745 | { CHAFSR_CE, CHAFSR_CE_msg }, | ||
746 | { CHAFSR_EDC, CHAFSR_EDC_msg }, | ||
747 | { JPAFSR_ETP, JPAFSR_ETP_msg }, | ||
748 | { CHAFSR_WDC, CHAFSR_WDC_msg }, | ||
749 | { CHAFSR_CPC, CHAFSR_CPC_msg }, | ||
750 | { CHAFSR_TO, CHAFSR_TO_msg }, | ||
751 | { CHAFSR_BERR, CHAFSR_BERR_msg }, | ||
752 | { JPAFSR_UMS, JPAFSR_UMS_msg }, | ||
753 | { JPAFSR_RUE, JPAFSR_RUE_msg }, | ||
754 | { JPAFSR_RCE, JPAFSR_RCE_msg }, | ||
755 | { JPAFSR_BP, JPAFSR_BP_msg }, | ||
756 | { JPAFSR_WBP, JPAFSR_WBP_msg }, | ||
757 | { JPAFSR_FRC, JPAFSR_FRC_msg }, | ||
758 | { JPAFSR_FRU, JPAFSR_FRU_msg }, | ||
759 | /* These two do not update the AFAR. */ | ||
760 | { CHAFSR_IVU, CHAFSR_IVU_msg }, | ||
761 | { 0, NULL }, | ||
762 | }; | ||
763 | static struct afsr_error_table *cheetah_error_table; | ||
764 | static unsigned long cheetah_afsr_errors; | ||
765 | |||
766 | struct cheetah_err_info *cheetah_error_log; | ||
767 | |||
768 | static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr) | ||
769 | { | ||
770 | struct cheetah_err_info *p; | ||
771 | int cpu = smp_processor_id(); | ||
772 | |||
773 | if (!cheetah_error_log) | ||
774 | return NULL; | ||
775 | |||
776 | p = cheetah_error_log + (cpu * 2); | ||
777 | if ((afsr & CHAFSR_TL1) != 0UL) | ||
778 | p++; | ||
779 | |||
780 | return p; | ||
781 | } | ||
782 | |||
783 | extern unsigned int tl0_icpe[], tl1_icpe[]; | ||
784 | extern unsigned int tl0_dcpe[], tl1_dcpe[]; | ||
785 | extern unsigned int tl0_fecc[], tl1_fecc[]; | ||
786 | extern unsigned int tl0_cee[], tl1_cee[]; | ||
787 | extern unsigned int tl0_iae[], tl1_iae[]; | ||
788 | extern unsigned int tl0_dae[], tl1_dae[]; | ||
789 | extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[]; | ||
790 | extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[]; | ||
791 | extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[]; | ||
792 | extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[]; | ||
793 | extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[]; | ||
794 | |||
795 | void __init cheetah_ecache_flush_init(void) | ||
796 | { | ||
797 | unsigned long largest_size, smallest_linesize, order, ver; | ||
798 | int i, sz; | ||
799 | |||
800 | /* Scan all cpu device tree nodes, note two values: | ||
801 | * 1) largest E-cache size | ||
802 | * 2) smallest E-cache line size | ||
803 | */ | ||
804 | largest_size = 0UL; | ||
805 | smallest_linesize = ~0UL; | ||
806 | |||
807 | for (i = 0; i < NR_CPUS; i++) { | ||
808 | unsigned long val; | ||
809 | |||
810 | val = cpu_data(i).ecache_size; | ||
811 | if (!val) | ||
812 | continue; | ||
813 | |||
814 | if (val > largest_size) | ||
815 | largest_size = val; | ||
816 | |||
817 | val = cpu_data(i).ecache_line_size; | ||
818 | if (val < smallest_linesize) | ||
819 | smallest_linesize = val; | ||
820 | |||
821 | } | ||
822 | |||
823 | if (largest_size == 0UL || smallest_linesize == ~0UL) { | ||
824 | prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache " | ||
825 | "parameters.\n"); | ||
826 | prom_halt(); | ||
827 | } | ||
828 | |||
829 | ecache_flush_size = (2 * largest_size); | ||
830 | ecache_flush_linesize = smallest_linesize; | ||
831 | |||
832 | ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size); | ||
833 | |||
834 | if (ecache_flush_physbase == ~0UL) { | ||
835 | prom_printf("cheetah_ecache_flush_init: Cannot find %d byte " | ||
836 | "contiguous physical memory.\n", | ||
837 | ecache_flush_size); | ||
838 | prom_halt(); | ||
839 | } | ||
840 | |||
841 | /* Now allocate error trap reporting scoreboard. */ | ||
842 | sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info)); | ||
843 | for (order = 0; order < MAX_ORDER; order++) { | ||
844 | if ((PAGE_SIZE << order) >= sz) | ||
845 | break; | ||
846 | } | ||
847 | cheetah_error_log = (struct cheetah_err_info *) | ||
848 | __get_free_pages(GFP_KERNEL, order); | ||
849 | if (!cheetah_error_log) { | ||
850 | prom_printf("cheetah_ecache_flush_init: Failed to allocate " | ||
851 | "error logging scoreboard (%d bytes).\n", sz); | ||
852 | prom_halt(); | ||
853 | } | ||
854 | memset(cheetah_error_log, 0, PAGE_SIZE << order); | ||
855 | |||
856 | /* Mark all AFSRs as invalid so that the trap handler will | ||
857 | * log new new information there. | ||
858 | */ | ||
859 | for (i = 0; i < 2 * NR_CPUS; i++) | ||
860 | cheetah_error_log[i].afsr = CHAFSR_INVALID; | ||
861 | |||
862 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | ||
863 | if ((ver >> 32) == __JALAPENO_ID || | ||
864 | (ver >> 32) == __SERRANO_ID) { | ||
865 | cheetah_error_table = &__jalapeno_error_table[0]; | ||
866 | cheetah_afsr_errors = JPAFSR_ERRORS; | ||
867 | } else if ((ver >> 32) == 0x003e0015) { | ||
868 | cheetah_error_table = &__cheetah_plus_error_table[0]; | ||
869 | cheetah_afsr_errors = CHPAFSR_ERRORS; | ||
870 | } else { | ||
871 | cheetah_error_table = &__cheetah_error_table[0]; | ||
872 | cheetah_afsr_errors = CHAFSR_ERRORS; | ||
873 | } | ||
874 | |||
875 | /* Now patch trap tables. */ | ||
876 | memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4)); | ||
877 | memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4)); | ||
878 | memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4)); | ||
879 | memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4)); | ||
880 | memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4)); | ||
881 | memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4)); | ||
882 | memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4)); | ||
883 | memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4)); | ||
884 | if (tlb_type == cheetah_plus) { | ||
885 | memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4)); | ||
886 | memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4)); | ||
887 | memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4)); | ||
888 | memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4)); | ||
889 | } | ||
890 | flushi(PAGE_OFFSET); | ||
891 | } | ||
892 | |||
893 | static void cheetah_flush_ecache(void) | ||
894 | { | ||
895 | unsigned long flush_base = ecache_flush_physbase; | ||
896 | unsigned long flush_linesize = ecache_flush_linesize; | ||
897 | unsigned long flush_size = ecache_flush_size; | ||
898 | |||
899 | __asm__ __volatile__("1: subcc %0, %4, %0\n\t" | ||
900 | " bne,pt %%xcc, 1b\n\t" | ||
901 | " ldxa [%2 + %0] %3, %%g0\n\t" | ||
902 | : "=&r" (flush_size) | ||
903 | : "0" (flush_size), "r" (flush_base), | ||
904 | "i" (ASI_PHYS_USE_EC), "r" (flush_linesize)); | ||
905 | } | ||
906 | |||
907 | static void cheetah_flush_ecache_line(unsigned long physaddr) | ||
908 | { | ||
909 | unsigned long alias; | ||
910 | |||
911 | physaddr &= ~(8UL - 1UL); | ||
912 | physaddr = (ecache_flush_physbase + | ||
913 | (physaddr & ((ecache_flush_size>>1UL) - 1UL))); | ||
914 | alias = physaddr + (ecache_flush_size >> 1UL); | ||
915 | __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t" | ||
916 | "ldxa [%1] %2, %%g0\n\t" | ||
917 | "membar #Sync" | ||
918 | : /* no outputs */ | ||
919 | : "r" (physaddr), "r" (alias), | ||
920 | "i" (ASI_PHYS_USE_EC)); | ||
921 | } | ||
922 | |||
923 | /* Unfortunately, the diagnostic access to the I-cache tags we need to | ||
924 | * use to clear the thing interferes with I-cache coherency transactions. | ||
925 | * | ||
926 | * So we must only flush the I-cache when it is disabled. | ||
927 | */ | ||
928 | static void __cheetah_flush_icache(void) | ||
929 | { | ||
930 | unsigned int icache_size, icache_line_size; | ||
931 | unsigned long addr; | ||
932 | |||
933 | icache_size = local_cpu_data().icache_size; | ||
934 | icache_line_size = local_cpu_data().icache_line_size; | ||
935 | |||
936 | /* Clear the valid bits in all the tags. */ | ||
937 | for (addr = 0; addr < icache_size; addr += icache_line_size) { | ||
938 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
939 | "membar #Sync" | ||
940 | : /* no outputs */ | ||
941 | : "r" (addr | (2 << 3)), | ||
942 | "i" (ASI_IC_TAG)); | ||
943 | } | ||
944 | } | ||
945 | |||
946 | static void cheetah_flush_icache(void) | ||
947 | { | ||
948 | unsigned long dcu_save; | ||
949 | |||
950 | /* Save current DCU, disable I-cache. */ | ||
951 | __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t" | ||
952 | "or %0, %2, %%g1\n\t" | ||
953 | "stxa %%g1, [%%g0] %1\n\t" | ||
954 | "membar #Sync" | ||
955 | : "=r" (dcu_save) | ||
956 | : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC) | ||
957 | : "g1"); | ||
958 | |||
959 | __cheetah_flush_icache(); | ||
960 | |||
961 | /* Restore DCU register */ | ||
962 | __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" | ||
963 | "membar #Sync" | ||
964 | : /* no outputs */ | ||
965 | : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG)); | ||
966 | } | ||
967 | |||
968 | static void cheetah_flush_dcache(void) | ||
969 | { | ||
970 | unsigned int dcache_size, dcache_line_size; | ||
971 | unsigned long addr; | ||
972 | |||
973 | dcache_size = local_cpu_data().dcache_size; | ||
974 | dcache_line_size = local_cpu_data().dcache_line_size; | ||
975 | |||
976 | for (addr = 0; addr < dcache_size; addr += dcache_line_size) { | ||
977 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
978 | "membar #Sync" | ||
979 | : /* no outputs */ | ||
980 | : "r" (addr), "i" (ASI_DCACHE_TAG)); | ||
981 | } | ||
982 | } | ||
983 | |||
984 | /* In order to make the even parity correct we must do two things. | ||
985 | * First, we clear DC_data_parity and set DC_utag to an appropriate value. | ||
986 | * Next, we clear out all 32-bytes of data for that line. Data of | ||
987 | * all-zero + tag parity value of zero == correct parity. | ||
988 | */ | ||
989 | static void cheetah_plus_zap_dcache_parity(void) | ||
990 | { | ||
991 | unsigned int dcache_size, dcache_line_size; | ||
992 | unsigned long addr; | ||
993 | |||
994 | dcache_size = local_cpu_data().dcache_size; | ||
995 | dcache_line_size = local_cpu_data().dcache_line_size; | ||
996 | |||
997 | for (addr = 0; addr < dcache_size; addr += dcache_line_size) { | ||
998 | unsigned long tag = (addr >> 14); | ||
999 | unsigned long line; | ||
1000 | |||
1001 | __asm__ __volatile__("membar #Sync\n\t" | ||
1002 | "stxa %0, [%1] %2\n\t" | ||
1003 | "membar #Sync" | ||
1004 | : /* no outputs */ | ||
1005 | : "r" (tag), "r" (addr), | ||
1006 | "i" (ASI_DCACHE_UTAG)); | ||
1007 | for (line = addr; line < addr + dcache_line_size; line += 8) | ||
1008 | __asm__ __volatile__("membar #Sync\n\t" | ||
1009 | "stxa %%g0, [%0] %1\n\t" | ||
1010 | "membar #Sync" | ||
1011 | : /* no outputs */ | ||
1012 | : "r" (line), | ||
1013 | "i" (ASI_DCACHE_DATA)); | ||
1014 | } | ||
1015 | } | ||
1016 | |||
1017 | /* Conversion tables used to frob Cheetah AFSR syndrome values into | ||
1018 | * something palatable to the memory controller driver get_unumber | ||
1019 | * routine. | ||
1020 | */ | ||
1021 | #define MT0 137 | ||
1022 | #define MT1 138 | ||
1023 | #define MT2 139 | ||
1024 | #define NONE 254 | ||
1025 | #define MTC0 140 | ||
1026 | #define MTC1 141 | ||
1027 | #define MTC2 142 | ||
1028 | #define MTC3 143 | ||
1029 | #define C0 128 | ||
1030 | #define C1 129 | ||
1031 | #define C2 130 | ||
1032 | #define C3 131 | ||
1033 | #define C4 132 | ||
1034 | #define C5 133 | ||
1035 | #define C6 134 | ||
1036 | #define C7 135 | ||
1037 | #define C8 136 | ||
1038 | #define M2 144 | ||
1039 | #define M3 145 | ||
1040 | #define M4 146 | ||
1041 | #define M 147 | ||
1042 | static unsigned char cheetah_ecc_syntab[] = { | ||
1043 | /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M, | ||
1044 | /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16, | ||
1045 | /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10, | ||
1046 | /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M, | ||
1047 | /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6, | ||
1048 | /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4, | ||
1049 | /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4, | ||
1050 | /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3, | ||
1051 | /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5, | ||
1052 | /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M, | ||
1053 | /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2, | ||
1054 | /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3, | ||
1055 | /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M, | ||
1056 | /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3, | ||
1057 | /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M, | ||
1058 | /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M, | ||
1059 | /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4, | ||
1060 | /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M, | ||
1061 | /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2, | ||
1062 | /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M, | ||
1063 | /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4, | ||
1064 | /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3, | ||
1065 | /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3, | ||
1066 | /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2, | ||
1067 | /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4, | ||
1068 | /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M, | ||
1069 | /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3, | ||
1070 | /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M, | ||
1071 | /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3, | ||
1072 | /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M, | ||
1073 | /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M, | ||
1074 | /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M | ||
1075 | }; | ||
1076 | static unsigned char cheetah_mtag_syntab[] = { | ||
1077 | NONE, MTC0, | ||
1078 | MTC1, NONE, | ||
1079 | MTC2, NONE, | ||
1080 | NONE, MT0, | ||
1081 | MTC3, NONE, | ||
1082 | NONE, MT1, | ||
1083 | NONE, MT2, | ||
1084 | NONE, NONE | ||
1085 | }; | ||
1086 | |||
1087 | /* Return the highest priority error conditon mentioned. */ | ||
1088 | static inline unsigned long cheetah_get_hipri(unsigned long afsr) | ||
1089 | { | ||
1090 | unsigned long tmp = 0; | ||
1091 | int i; | ||
1092 | |||
1093 | for (i = 0; cheetah_error_table[i].mask; i++) { | ||
1094 | if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL) | ||
1095 | return tmp; | ||
1096 | } | ||
1097 | return tmp; | ||
1098 | } | ||
1099 | |||
1100 | static const char *cheetah_get_string(unsigned long bit) | ||
1101 | { | ||
1102 | int i; | ||
1103 | |||
1104 | for (i = 0; cheetah_error_table[i].mask; i++) { | ||
1105 | if ((bit & cheetah_error_table[i].mask) != 0UL) | ||
1106 | return cheetah_error_table[i].name; | ||
1107 | } | ||
1108 | return "???"; | ||
1109 | } | ||
1110 | |||
1111 | static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info, | ||
1112 | unsigned long afsr, unsigned long afar, int recoverable) | ||
1113 | { | ||
1114 | unsigned long hipri; | ||
1115 | char unum[256]; | ||
1116 | |||
1117 | printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n", | ||
1118 | (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), | ||
1119 | afsr, afar, | ||
1120 | (afsr & CHAFSR_TL1) ? 1 : 0); | ||
1121 | printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n", | ||
1122 | (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), | ||
1123 | regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate); | ||
1124 | printk("%s" "ERROR(%d): ", | ||
1125 | (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id()); | ||
1126 | printk("TPC<%pS>\n", (void *) regs->tpc); | ||
1127 | printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n", | ||
1128 | (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), | ||
1129 | (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT, | ||
1130 | (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT, | ||
1131 | (afsr & CHAFSR_ME) ? ", Multiple Errors" : "", | ||
1132 | (afsr & CHAFSR_PRIV) ? ", Privileged" : ""); | ||
1133 | hipri = cheetah_get_hipri(afsr); | ||
1134 | printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n", | ||
1135 | (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), | ||
1136 | hipri, cheetah_get_string(hipri)); | ||
1137 | |||
1138 | /* Try to get unumber if relevant. */ | ||
1139 | #define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \ | ||
1140 | CHAFSR_CPC | CHAFSR_CPU | \ | ||
1141 | CHAFSR_UE | CHAFSR_CE | \ | ||
1142 | CHAFSR_EDC | CHAFSR_EDU | \ | ||
1143 | CHAFSR_UCC | CHAFSR_UCU | \ | ||
1144 | CHAFSR_WDU | CHAFSR_WDC) | ||
1145 | #define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU) | ||
1146 | if (afsr & ESYND_ERRORS) { | ||
1147 | int syndrome; | ||
1148 | int ret; | ||
1149 | |||
1150 | syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT; | ||
1151 | syndrome = cheetah_ecc_syntab[syndrome]; | ||
1152 | ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum)); | ||
1153 | if (ret != -1) | ||
1154 | printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n", | ||
1155 | (recoverable ? KERN_WARNING : KERN_CRIT), | ||
1156 | smp_processor_id(), unum); | ||
1157 | } else if (afsr & MSYND_ERRORS) { | ||
1158 | int syndrome; | ||
1159 | int ret; | ||
1160 | |||
1161 | syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT; | ||
1162 | syndrome = cheetah_mtag_syntab[syndrome]; | ||
1163 | ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum)); | ||
1164 | if (ret != -1) | ||
1165 | printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n", | ||
1166 | (recoverable ? KERN_WARNING : KERN_CRIT), | ||
1167 | smp_processor_id(), unum); | ||
1168 | } | ||
1169 | |||
1170 | /* Now dump the cache snapshots. */ | ||
1171 | printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n", | ||
1172 | (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), | ||
1173 | (int) info->dcache_index, | ||
1174 | info->dcache_tag, | ||
1175 | info->dcache_utag, | ||
1176 | info->dcache_stag); | ||
1177 | printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n", | ||
1178 | (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), | ||
1179 | info->dcache_data[0], | ||
1180 | info->dcache_data[1], | ||
1181 | info->dcache_data[2], | ||
1182 | info->dcache_data[3]); | ||
1183 | printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] " | ||
1184 | "u[%016lx] l[%016lx]\n", | ||
1185 | (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), | ||
1186 | (int) info->icache_index, | ||
1187 | info->icache_tag, | ||
1188 | info->icache_utag, | ||
1189 | info->icache_stag, | ||
1190 | info->icache_upper, | ||
1191 | info->icache_lower); | ||
1192 | printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n", | ||
1193 | (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), | ||
1194 | info->icache_data[0], | ||
1195 | info->icache_data[1], | ||
1196 | info->icache_data[2], | ||
1197 | info->icache_data[3]); | ||
1198 | printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n", | ||
1199 | (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), | ||
1200 | info->icache_data[4], | ||
1201 | info->icache_data[5], | ||
1202 | info->icache_data[6], | ||
1203 | info->icache_data[7]); | ||
1204 | printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n", | ||
1205 | (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), | ||
1206 | (int) info->ecache_index, info->ecache_tag); | ||
1207 | printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n", | ||
1208 | (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), | ||
1209 | info->ecache_data[0], | ||
1210 | info->ecache_data[1], | ||
1211 | info->ecache_data[2], | ||
1212 | info->ecache_data[3]); | ||
1213 | |||
1214 | afsr = (afsr & ~hipri) & cheetah_afsr_errors; | ||
1215 | while (afsr != 0UL) { | ||
1216 | unsigned long bit = cheetah_get_hipri(afsr); | ||
1217 | |||
1218 | printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n", | ||
1219 | (recoverable ? KERN_WARNING : KERN_CRIT), | ||
1220 | bit, cheetah_get_string(bit)); | ||
1221 | |||
1222 | afsr &= ~bit; | ||
1223 | } | ||
1224 | |||
1225 | if (!recoverable) | ||
1226 | printk(KERN_CRIT "ERROR: This condition is not recoverable.\n"); | ||
1227 | } | ||
1228 | |||
1229 | static int cheetah_recheck_errors(struct cheetah_err_info *logp) | ||
1230 | { | ||
1231 | unsigned long afsr, afar; | ||
1232 | int ret = 0; | ||
1233 | |||
1234 | __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t" | ||
1235 | : "=r" (afsr) | ||
1236 | : "i" (ASI_AFSR)); | ||
1237 | if ((afsr & cheetah_afsr_errors) != 0) { | ||
1238 | if (logp != NULL) { | ||
1239 | __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t" | ||
1240 | : "=r" (afar) | ||
1241 | : "i" (ASI_AFAR)); | ||
1242 | logp->afsr = afsr; | ||
1243 | logp->afar = afar; | ||
1244 | } | ||
1245 | ret = 1; | ||
1246 | } | ||
1247 | __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" | ||
1248 | "membar #Sync\n\t" | ||
1249 | : : "r" (afsr), "i" (ASI_AFSR)); | ||
1250 | |||
1251 | return ret; | ||
1252 | } | ||
1253 | |||
1254 | void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar) | ||
1255 | { | ||
1256 | struct cheetah_err_info local_snapshot, *p; | ||
1257 | int recoverable; | ||
1258 | |||
1259 | /* Flush E-cache */ | ||
1260 | cheetah_flush_ecache(); | ||
1261 | |||
1262 | p = cheetah_get_error_log(afsr); | ||
1263 | if (!p) { | ||
1264 | prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n", | ||
1265 | afsr, afar); | ||
1266 | prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n", | ||
1267 | smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate); | ||
1268 | prom_halt(); | ||
1269 | } | ||
1270 | |||
1271 | /* Grab snapshot of logged error. */ | ||
1272 | memcpy(&local_snapshot, p, sizeof(local_snapshot)); | ||
1273 | |||
1274 | /* If the current trap snapshot does not match what the | ||
1275 | * trap handler passed along into our args, big trouble. | ||
1276 | * In such a case, mark the local copy as invalid. | ||
1277 | * | ||
1278 | * Else, it matches and we mark the afsr in the non-local | ||
1279 | * copy as invalid so we may log new error traps there. | ||
1280 | */ | ||
1281 | if (p->afsr != afsr || p->afar != afar) | ||
1282 | local_snapshot.afsr = CHAFSR_INVALID; | ||
1283 | else | ||
1284 | p->afsr = CHAFSR_INVALID; | ||
1285 | |||
1286 | cheetah_flush_icache(); | ||
1287 | cheetah_flush_dcache(); | ||
1288 | |||
1289 | /* Re-enable I-cache/D-cache */ | ||
1290 | __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" | ||
1291 | "or %%g1, %1, %%g1\n\t" | ||
1292 | "stxa %%g1, [%%g0] %0\n\t" | ||
1293 | "membar #Sync" | ||
1294 | : /* no outputs */ | ||
1295 | : "i" (ASI_DCU_CONTROL_REG), | ||
1296 | "i" (DCU_DC | DCU_IC) | ||
1297 | : "g1"); | ||
1298 | |||
1299 | /* Re-enable error reporting */ | ||
1300 | __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" | ||
1301 | "or %%g1, %1, %%g1\n\t" | ||
1302 | "stxa %%g1, [%%g0] %0\n\t" | ||
1303 | "membar #Sync" | ||
1304 | : /* no outputs */ | ||
1305 | : "i" (ASI_ESTATE_ERROR_EN), | ||
1306 | "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN) | ||
1307 | : "g1"); | ||
1308 | |||
1309 | /* Decide if we can continue after handling this trap and | ||
1310 | * logging the error. | ||
1311 | */ | ||
1312 | recoverable = 1; | ||
1313 | if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP)) | ||
1314 | recoverable = 0; | ||
1315 | |||
1316 | /* Re-check AFSR/AFAR. What we are looking for here is whether a new | ||
1317 | * error was logged while we had error reporting traps disabled. | ||
1318 | */ | ||
1319 | if (cheetah_recheck_errors(&local_snapshot)) { | ||
1320 | unsigned long new_afsr = local_snapshot.afsr; | ||
1321 | |||
1322 | /* If we got a new asynchronous error, die... */ | ||
1323 | if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU | | ||
1324 | CHAFSR_WDU | CHAFSR_CPU | | ||
1325 | CHAFSR_IVU | CHAFSR_UE | | ||
1326 | CHAFSR_BERR | CHAFSR_TO)) | ||
1327 | recoverable = 0; | ||
1328 | } | ||
1329 | |||
1330 | /* Log errors. */ | ||
1331 | cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable); | ||
1332 | |||
1333 | if (!recoverable) | ||
1334 | panic("Irrecoverable Fast-ECC error trap.\n"); | ||
1335 | |||
1336 | /* Flush E-cache to kick the error trap handlers out. */ | ||
1337 | cheetah_flush_ecache(); | ||
1338 | } | ||
1339 | |||
1340 | /* Try to fix a correctable error by pushing the line out from | ||
1341 | * the E-cache. Recheck error reporting registers to see if the | ||
1342 | * problem is intermittent. | ||
1343 | */ | ||
1344 | static int cheetah_fix_ce(unsigned long physaddr) | ||
1345 | { | ||
1346 | unsigned long orig_estate; | ||
1347 | unsigned long alias1, alias2; | ||
1348 | int ret; | ||
1349 | |||
1350 | /* Make sure correctable error traps are disabled. */ | ||
1351 | __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t" | ||
1352 | "andn %0, %1, %%g1\n\t" | ||
1353 | "stxa %%g1, [%%g0] %2\n\t" | ||
1354 | "membar #Sync" | ||
1355 | : "=&r" (orig_estate) | ||
1356 | : "i" (ESTATE_ERROR_CEEN), | ||
1357 | "i" (ASI_ESTATE_ERROR_EN) | ||
1358 | : "g1"); | ||
1359 | |||
1360 | /* We calculate alias addresses that will force the | ||
1361 | * cache line in question out of the E-cache. Then | ||
1362 | * we bring it back in with an atomic instruction so | ||
1363 | * that we get it in some modified/exclusive state, | ||
1364 | * then we displace it again to try and get proper ECC | ||
1365 | * pushed back into the system. | ||
1366 | */ | ||
1367 | physaddr &= ~(8UL - 1UL); | ||
1368 | alias1 = (ecache_flush_physbase + | ||
1369 | (physaddr & ((ecache_flush_size >> 1) - 1))); | ||
1370 | alias2 = alias1 + (ecache_flush_size >> 1); | ||
1371 | __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t" | ||
1372 | "ldxa [%1] %3, %%g0\n\t" | ||
1373 | "casxa [%2] %3, %%g0, %%g0\n\t" | ||
1374 | "ldxa [%0] %3, %%g0\n\t" | ||
1375 | "ldxa [%1] %3, %%g0\n\t" | ||
1376 | "membar #Sync" | ||
1377 | : /* no outputs */ | ||
1378 | : "r" (alias1), "r" (alias2), | ||
1379 | "r" (physaddr), "i" (ASI_PHYS_USE_EC)); | ||
1380 | |||
1381 | /* Did that trigger another error? */ | ||
1382 | if (cheetah_recheck_errors(NULL)) { | ||
1383 | /* Try one more time. */ | ||
1384 | __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t" | ||
1385 | "membar #Sync" | ||
1386 | : : "r" (physaddr), "i" (ASI_PHYS_USE_EC)); | ||
1387 | if (cheetah_recheck_errors(NULL)) | ||
1388 | ret = 2; | ||
1389 | else | ||
1390 | ret = 1; | ||
1391 | } else { | ||
1392 | /* No new error, intermittent problem. */ | ||
1393 | ret = 0; | ||
1394 | } | ||
1395 | |||
1396 | /* Restore error enables. */ | ||
1397 | __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" | ||
1398 | "membar #Sync" | ||
1399 | : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN)); | ||
1400 | |||
1401 | return ret; | ||
1402 | } | ||
1403 | |||
1404 | /* Return non-zero if PADDR is a valid physical memory address. */ | ||
1405 | static int cheetah_check_main_memory(unsigned long paddr) | ||
1406 | { | ||
1407 | unsigned long vaddr = PAGE_OFFSET + paddr; | ||
1408 | |||
1409 | if (vaddr > (unsigned long) high_memory) | ||
1410 | return 0; | ||
1411 | |||
1412 | return kern_addr_valid(vaddr); | ||
1413 | } | ||
1414 | |||
1415 | void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar) | ||
1416 | { | ||
1417 | struct cheetah_err_info local_snapshot, *p; | ||
1418 | int recoverable, is_memory; | ||
1419 | |||
1420 | p = cheetah_get_error_log(afsr); | ||
1421 | if (!p) { | ||
1422 | prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n", | ||
1423 | afsr, afar); | ||
1424 | prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n", | ||
1425 | smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate); | ||
1426 | prom_halt(); | ||
1427 | } | ||
1428 | |||
1429 | /* Grab snapshot of logged error. */ | ||
1430 | memcpy(&local_snapshot, p, sizeof(local_snapshot)); | ||
1431 | |||
1432 | /* If the current trap snapshot does not match what the | ||
1433 | * trap handler passed along into our args, big trouble. | ||
1434 | * In such a case, mark the local copy as invalid. | ||
1435 | * | ||
1436 | * Else, it matches and we mark the afsr in the non-local | ||
1437 | * copy as invalid so we may log new error traps there. | ||
1438 | */ | ||
1439 | if (p->afsr != afsr || p->afar != afar) | ||
1440 | local_snapshot.afsr = CHAFSR_INVALID; | ||
1441 | else | ||
1442 | p->afsr = CHAFSR_INVALID; | ||
1443 | |||
1444 | is_memory = cheetah_check_main_memory(afar); | ||
1445 | |||
1446 | if (is_memory && (afsr & CHAFSR_CE) != 0UL) { | ||
1447 | /* XXX Might want to log the results of this operation | ||
1448 | * XXX somewhere... -DaveM | ||
1449 | */ | ||
1450 | cheetah_fix_ce(afar); | ||
1451 | } | ||
1452 | |||
1453 | { | ||
1454 | int flush_all, flush_line; | ||
1455 | |||
1456 | flush_all = flush_line = 0; | ||
1457 | if ((afsr & CHAFSR_EDC) != 0UL) { | ||
1458 | if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC) | ||
1459 | flush_line = 1; | ||
1460 | else | ||
1461 | flush_all = 1; | ||
1462 | } else if ((afsr & CHAFSR_CPC) != 0UL) { | ||
1463 | if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC) | ||
1464 | flush_line = 1; | ||
1465 | else | ||
1466 | flush_all = 1; | ||
1467 | } | ||
1468 | |||
1469 | /* Trap handler only disabled I-cache, flush it. */ | ||
1470 | cheetah_flush_icache(); | ||
1471 | |||
1472 | /* Re-enable I-cache */ | ||
1473 | __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" | ||
1474 | "or %%g1, %1, %%g1\n\t" | ||
1475 | "stxa %%g1, [%%g0] %0\n\t" | ||
1476 | "membar #Sync" | ||
1477 | : /* no outputs */ | ||
1478 | : "i" (ASI_DCU_CONTROL_REG), | ||
1479 | "i" (DCU_IC) | ||
1480 | : "g1"); | ||
1481 | |||
1482 | if (flush_all) | ||
1483 | cheetah_flush_ecache(); | ||
1484 | else if (flush_line) | ||
1485 | cheetah_flush_ecache_line(afar); | ||
1486 | } | ||
1487 | |||
1488 | /* Re-enable error reporting */ | ||
1489 | __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" | ||
1490 | "or %%g1, %1, %%g1\n\t" | ||
1491 | "stxa %%g1, [%%g0] %0\n\t" | ||
1492 | "membar #Sync" | ||
1493 | : /* no outputs */ | ||
1494 | : "i" (ASI_ESTATE_ERROR_EN), | ||
1495 | "i" (ESTATE_ERROR_CEEN) | ||
1496 | : "g1"); | ||
1497 | |||
1498 | /* Decide if we can continue after handling this trap and | ||
1499 | * logging the error. | ||
1500 | */ | ||
1501 | recoverable = 1; | ||
1502 | if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP)) | ||
1503 | recoverable = 0; | ||
1504 | |||
1505 | /* Re-check AFSR/AFAR */ | ||
1506 | (void) cheetah_recheck_errors(&local_snapshot); | ||
1507 | |||
1508 | /* Log errors. */ | ||
1509 | cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable); | ||
1510 | |||
1511 | if (!recoverable) | ||
1512 | panic("Irrecoverable Correctable-ECC error trap.\n"); | ||
1513 | } | ||
1514 | |||
1515 | void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar) | ||
1516 | { | ||
1517 | struct cheetah_err_info local_snapshot, *p; | ||
1518 | int recoverable, is_memory; | ||
1519 | |||
1520 | #ifdef CONFIG_PCI | ||
1521 | /* Check for the special PCI poke sequence. */ | ||
1522 | if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) { | ||
1523 | cheetah_flush_icache(); | ||
1524 | cheetah_flush_dcache(); | ||
1525 | |||
1526 | /* Re-enable I-cache/D-cache */ | ||
1527 | __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" | ||
1528 | "or %%g1, %1, %%g1\n\t" | ||
1529 | "stxa %%g1, [%%g0] %0\n\t" | ||
1530 | "membar #Sync" | ||
1531 | : /* no outputs */ | ||
1532 | : "i" (ASI_DCU_CONTROL_REG), | ||
1533 | "i" (DCU_DC | DCU_IC) | ||
1534 | : "g1"); | ||
1535 | |||
1536 | /* Re-enable error reporting */ | ||
1537 | __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" | ||
1538 | "or %%g1, %1, %%g1\n\t" | ||
1539 | "stxa %%g1, [%%g0] %0\n\t" | ||
1540 | "membar #Sync" | ||
1541 | : /* no outputs */ | ||
1542 | : "i" (ASI_ESTATE_ERROR_EN), | ||
1543 | "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN) | ||
1544 | : "g1"); | ||
1545 | |||
1546 | (void) cheetah_recheck_errors(NULL); | ||
1547 | |||
1548 | pci_poke_faulted = 1; | ||
1549 | regs->tpc += 4; | ||
1550 | regs->tnpc = regs->tpc + 4; | ||
1551 | return; | ||
1552 | } | ||
1553 | #endif | ||
1554 | |||
1555 | p = cheetah_get_error_log(afsr); | ||
1556 | if (!p) { | ||
1557 | prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n", | ||
1558 | afsr, afar); | ||
1559 | prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n", | ||
1560 | smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate); | ||
1561 | prom_halt(); | ||
1562 | } | ||
1563 | |||
1564 | /* Grab snapshot of logged error. */ | ||
1565 | memcpy(&local_snapshot, p, sizeof(local_snapshot)); | ||
1566 | |||
1567 | /* If the current trap snapshot does not match what the | ||
1568 | * trap handler passed along into our args, big trouble. | ||
1569 | * In such a case, mark the local copy as invalid. | ||
1570 | * | ||
1571 | * Else, it matches and we mark the afsr in the non-local | ||
1572 | * copy as invalid so we may log new error traps there. | ||
1573 | */ | ||
1574 | if (p->afsr != afsr || p->afar != afar) | ||
1575 | local_snapshot.afsr = CHAFSR_INVALID; | ||
1576 | else | ||
1577 | p->afsr = CHAFSR_INVALID; | ||
1578 | |||
1579 | is_memory = cheetah_check_main_memory(afar); | ||
1580 | |||
1581 | { | ||
1582 | int flush_all, flush_line; | ||
1583 | |||
1584 | flush_all = flush_line = 0; | ||
1585 | if ((afsr & CHAFSR_EDU) != 0UL) { | ||
1586 | if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU) | ||
1587 | flush_line = 1; | ||
1588 | else | ||
1589 | flush_all = 1; | ||
1590 | } else if ((afsr & CHAFSR_BERR) != 0UL) { | ||
1591 | if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR) | ||
1592 | flush_line = 1; | ||
1593 | else | ||
1594 | flush_all = 1; | ||
1595 | } | ||
1596 | |||
1597 | cheetah_flush_icache(); | ||
1598 | cheetah_flush_dcache(); | ||
1599 | |||
1600 | /* Re-enable I/D caches */ | ||
1601 | __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" | ||
1602 | "or %%g1, %1, %%g1\n\t" | ||
1603 | "stxa %%g1, [%%g0] %0\n\t" | ||
1604 | "membar #Sync" | ||
1605 | : /* no outputs */ | ||
1606 | : "i" (ASI_DCU_CONTROL_REG), | ||
1607 | "i" (DCU_IC | DCU_DC) | ||
1608 | : "g1"); | ||
1609 | |||
1610 | if (flush_all) | ||
1611 | cheetah_flush_ecache(); | ||
1612 | else if (flush_line) | ||
1613 | cheetah_flush_ecache_line(afar); | ||
1614 | } | ||
1615 | |||
1616 | /* Re-enable error reporting */ | ||
1617 | __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" | ||
1618 | "or %%g1, %1, %%g1\n\t" | ||
1619 | "stxa %%g1, [%%g0] %0\n\t" | ||
1620 | "membar #Sync" | ||
1621 | : /* no outputs */ | ||
1622 | : "i" (ASI_ESTATE_ERROR_EN), | ||
1623 | "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN) | ||
1624 | : "g1"); | ||
1625 | |||
1626 | /* Decide if we can continue after handling this trap and | ||
1627 | * logging the error. | ||
1628 | */ | ||
1629 | recoverable = 1; | ||
1630 | if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP)) | ||
1631 | recoverable = 0; | ||
1632 | |||
1633 | /* Re-check AFSR/AFAR. What we are looking for here is whether a new | ||
1634 | * error was logged while we had error reporting traps disabled. | ||
1635 | */ | ||
1636 | if (cheetah_recheck_errors(&local_snapshot)) { | ||
1637 | unsigned long new_afsr = local_snapshot.afsr; | ||
1638 | |||
1639 | /* If we got a new asynchronous error, die... */ | ||
1640 | if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU | | ||
1641 | CHAFSR_WDU | CHAFSR_CPU | | ||
1642 | CHAFSR_IVU | CHAFSR_UE | | ||
1643 | CHAFSR_BERR | CHAFSR_TO)) | ||
1644 | recoverable = 0; | ||
1645 | } | ||
1646 | |||
1647 | /* Log errors. */ | ||
1648 | cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable); | ||
1649 | |||
1650 | /* "Recoverable" here means we try to yank the page from ever | ||
1651 | * being newly used again. This depends upon a few things: | ||
1652 | * 1) Must be main memory, and AFAR must be valid. | ||
1653 | * 2) If we trapped from user, OK. | ||
1654 | * 3) Else, if we trapped from kernel we must find exception | ||
1655 | * table entry (ie. we have to have been accessing user | ||
1656 | * space). | ||
1657 | * | ||
1658 | * If AFAR is not in main memory, or we trapped from kernel | ||
1659 | * and cannot find an exception table entry, it is unacceptable | ||
1660 | * to try and continue. | ||
1661 | */ | ||
1662 | if (recoverable && is_memory) { | ||
1663 | if ((regs->tstate & TSTATE_PRIV) == 0UL) { | ||
1664 | /* OK, usermode access. */ | ||
1665 | recoverable = 1; | ||
1666 | } else { | ||
1667 | const struct exception_table_entry *entry; | ||
1668 | |||
1669 | entry = search_exception_tables(regs->tpc); | ||
1670 | if (entry) { | ||
1671 | /* OK, kernel access to userspace. */ | ||
1672 | recoverable = 1; | ||
1673 | |||
1674 | } else { | ||
1675 | /* BAD, privileged state is corrupted. */ | ||
1676 | recoverable = 0; | ||
1677 | } | ||
1678 | |||
1679 | if (recoverable) { | ||
1680 | if (pfn_valid(afar >> PAGE_SHIFT)) | ||
1681 | get_page(pfn_to_page(afar >> PAGE_SHIFT)); | ||
1682 | else | ||
1683 | recoverable = 0; | ||
1684 | |||
1685 | /* Only perform fixup if we still have a | ||
1686 | * recoverable condition. | ||
1687 | */ | ||
1688 | if (recoverable) { | ||
1689 | regs->tpc = entry->fixup; | ||
1690 | regs->tnpc = regs->tpc + 4; | ||
1691 | } | ||
1692 | } | ||
1693 | } | ||
1694 | } else { | ||
1695 | recoverable = 0; | ||
1696 | } | ||
1697 | |||
1698 | if (!recoverable) | ||
1699 | panic("Irrecoverable deferred error trap.\n"); | ||
1700 | } | ||
1701 | |||
1702 | /* Handle a D/I cache parity error trap. TYPE is encoded as: | ||
1703 | * | ||
1704 | * Bit0: 0=dcache,1=icache | ||
1705 | * Bit1: 0=recoverable,1=unrecoverable | ||
1706 | * | ||
1707 | * The hardware has disabled both the I-cache and D-cache in | ||
1708 | * the %dcr register. | ||
1709 | */ | ||
1710 | void cheetah_plus_parity_error(int type, struct pt_regs *regs) | ||
1711 | { | ||
1712 | if (type & 0x1) | ||
1713 | __cheetah_flush_icache(); | ||
1714 | else | ||
1715 | cheetah_plus_zap_dcache_parity(); | ||
1716 | cheetah_flush_dcache(); | ||
1717 | |||
1718 | /* Re-enable I-cache/D-cache */ | ||
1719 | __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t" | ||
1720 | "or %%g1, %1, %%g1\n\t" | ||
1721 | "stxa %%g1, [%%g0] %0\n\t" | ||
1722 | "membar #Sync" | ||
1723 | : /* no outputs */ | ||
1724 | : "i" (ASI_DCU_CONTROL_REG), | ||
1725 | "i" (DCU_DC | DCU_IC) | ||
1726 | : "g1"); | ||
1727 | |||
1728 | if (type & 0x2) { | ||
1729 | printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n", | ||
1730 | smp_processor_id(), | ||
1731 | (type & 0x1) ? 'I' : 'D', | ||
1732 | regs->tpc); | ||
1733 | printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc); | ||
1734 | panic("Irrecoverable Cheetah+ parity error."); | ||
1735 | } | ||
1736 | |||
1737 | printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n", | ||
1738 | smp_processor_id(), | ||
1739 | (type & 0x1) ? 'I' : 'D', | ||
1740 | regs->tpc); | ||
1741 | printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc); | ||
1742 | } | ||
1743 | |||
1744 | struct sun4v_error_entry { | ||
1745 | u64 err_handle; | ||
1746 | u64 err_stick; | ||
1747 | |||
1748 | u32 err_type; | ||
1749 | #define SUN4V_ERR_TYPE_UNDEFINED 0 | ||
1750 | #define SUN4V_ERR_TYPE_UNCORRECTED_RES 1 | ||
1751 | #define SUN4V_ERR_TYPE_PRECISE_NONRES 2 | ||
1752 | #define SUN4V_ERR_TYPE_DEFERRED_NONRES 3 | ||
1753 | #define SUN4V_ERR_TYPE_WARNING_RES 4 | ||
1754 | |||
1755 | u32 err_attrs; | ||
1756 | #define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001 | ||
1757 | #define SUN4V_ERR_ATTRS_MEMORY 0x00000002 | ||
1758 | #define SUN4V_ERR_ATTRS_PIO 0x00000004 | ||
1759 | #define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008 | ||
1760 | #define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010 | ||
1761 | #define SUN4V_ERR_ATTRS_USER_MODE 0x01000000 | ||
1762 | #define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000 | ||
1763 | #define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000 | ||
1764 | |||
1765 | u64 err_raddr; | ||
1766 | u32 err_size; | ||
1767 | u16 err_cpu; | ||
1768 | u16 err_pad; | ||
1769 | }; | ||
1770 | |||
1771 | static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0); | ||
1772 | static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0); | ||
1773 | |||
1774 | static const char *sun4v_err_type_to_str(u32 type) | ||
1775 | { | ||
1776 | switch (type) { | ||
1777 | case SUN4V_ERR_TYPE_UNDEFINED: | ||
1778 | return "undefined"; | ||
1779 | case SUN4V_ERR_TYPE_UNCORRECTED_RES: | ||
1780 | return "uncorrected resumable"; | ||
1781 | case SUN4V_ERR_TYPE_PRECISE_NONRES: | ||
1782 | return "precise nonresumable"; | ||
1783 | case SUN4V_ERR_TYPE_DEFERRED_NONRES: | ||
1784 | return "deferred nonresumable"; | ||
1785 | case SUN4V_ERR_TYPE_WARNING_RES: | ||
1786 | return "warning resumable"; | ||
1787 | default: | ||
1788 | return "unknown"; | ||
1789 | }; | ||
1790 | } | ||
1791 | |||
1792 | static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt) | ||
1793 | { | ||
1794 | int cnt; | ||
1795 | |||
1796 | printk("%s: Reporting on cpu %d\n", pfx, cpu); | ||
1797 | printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n", | ||
1798 | pfx, | ||
1799 | ent->err_handle, ent->err_stick, | ||
1800 | ent->err_type, | ||
1801 | sun4v_err_type_to_str(ent->err_type)); | ||
1802 | printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n", | ||
1803 | pfx, | ||
1804 | ent->err_attrs, | ||
1805 | ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ? | ||
1806 | "processor" : ""), | ||
1807 | ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ? | ||
1808 | "memory" : ""), | ||
1809 | ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ? | ||
1810 | "pio" : ""), | ||
1811 | ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ? | ||
1812 | "integer-regs" : ""), | ||
1813 | ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ? | ||
1814 | "fpu-regs" : ""), | ||
1815 | ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ? | ||
1816 | "user" : ""), | ||
1817 | ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ? | ||
1818 | "privileged" : ""), | ||
1819 | ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ? | ||
1820 | "queue-full" : "")); | ||
1821 | printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n", | ||
1822 | pfx, | ||
1823 | ent->err_raddr, ent->err_size, ent->err_cpu); | ||
1824 | |||
1825 | show_regs(regs); | ||
1826 | |||
1827 | if ((cnt = atomic_read(ocnt)) != 0) { | ||
1828 | atomic_set(ocnt, 0); | ||
1829 | wmb(); | ||
1830 | printk("%s: Queue overflowed %d times.\n", | ||
1831 | pfx, cnt); | ||
1832 | } | ||
1833 | } | ||
1834 | |||
1835 | /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate. | ||
1836 | * Log the event and clear the first word of the entry. | ||
1837 | */ | ||
1838 | void sun4v_resum_error(struct pt_regs *regs, unsigned long offset) | ||
1839 | { | ||
1840 | struct sun4v_error_entry *ent, local_copy; | ||
1841 | struct trap_per_cpu *tb; | ||
1842 | unsigned long paddr; | ||
1843 | int cpu; | ||
1844 | |||
1845 | cpu = get_cpu(); | ||
1846 | |||
1847 | tb = &trap_block[cpu]; | ||
1848 | paddr = tb->resum_kernel_buf_pa + offset; | ||
1849 | ent = __va(paddr); | ||
1850 | |||
1851 | memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry)); | ||
1852 | |||
1853 | /* We have a local copy now, so release the entry. */ | ||
1854 | ent->err_handle = 0; | ||
1855 | wmb(); | ||
1856 | |||
1857 | put_cpu(); | ||
1858 | |||
1859 | if (ent->err_type == SUN4V_ERR_TYPE_WARNING_RES) { | ||
1860 | /* If err_type is 0x4, it's a powerdown request. Do | ||
1861 | * not do the usual resumable error log because that | ||
1862 | * makes it look like some abnormal error. | ||
1863 | */ | ||
1864 | printk(KERN_INFO "Power down request...\n"); | ||
1865 | kill_cad_pid(SIGINT, 1); | ||
1866 | return; | ||
1867 | } | ||
1868 | |||
1869 | sun4v_log_error(regs, &local_copy, cpu, | ||
1870 | KERN_ERR "RESUMABLE ERROR", | ||
1871 | &sun4v_resum_oflow_cnt); | ||
1872 | } | ||
1873 | |||
1874 | /* If we try to printk() we'll probably make matters worse, by trying | ||
1875 | * to retake locks this cpu already holds or causing more errors. So | ||
1876 | * just bump a counter, and we'll report these counter bumps above. | ||
1877 | */ | ||
1878 | void sun4v_resum_overflow(struct pt_regs *regs) | ||
1879 | { | ||
1880 | atomic_inc(&sun4v_resum_oflow_cnt); | ||
1881 | } | ||
1882 | |||
1883 | /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate. | ||
1884 | * Log the event, clear the first word of the entry, and die. | ||
1885 | */ | ||
1886 | void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset) | ||
1887 | { | ||
1888 | struct sun4v_error_entry *ent, local_copy; | ||
1889 | struct trap_per_cpu *tb; | ||
1890 | unsigned long paddr; | ||
1891 | int cpu; | ||
1892 | |||
1893 | cpu = get_cpu(); | ||
1894 | |||
1895 | tb = &trap_block[cpu]; | ||
1896 | paddr = tb->nonresum_kernel_buf_pa + offset; | ||
1897 | ent = __va(paddr); | ||
1898 | |||
1899 | memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry)); | ||
1900 | |||
1901 | /* We have a local copy now, so release the entry. */ | ||
1902 | ent->err_handle = 0; | ||
1903 | wmb(); | ||
1904 | |||
1905 | put_cpu(); | ||
1906 | |||
1907 | #ifdef CONFIG_PCI | ||
1908 | /* Check for the special PCI poke sequence. */ | ||
1909 | if (pci_poke_in_progress && pci_poke_cpu == cpu) { | ||
1910 | pci_poke_faulted = 1; | ||
1911 | regs->tpc += 4; | ||
1912 | regs->tnpc = regs->tpc + 4; | ||
1913 | return; | ||
1914 | } | ||
1915 | #endif | ||
1916 | |||
1917 | sun4v_log_error(regs, &local_copy, cpu, | ||
1918 | KERN_EMERG "NON-RESUMABLE ERROR", | ||
1919 | &sun4v_nonresum_oflow_cnt); | ||
1920 | |||
1921 | panic("Non-resumable error."); | ||
1922 | } | ||
1923 | |||
1924 | /* If we try to printk() we'll probably make matters worse, by trying | ||
1925 | * to retake locks this cpu already holds or causing more errors. So | ||
1926 | * just bump a counter, and we'll report these counter bumps above. | ||
1927 | */ | ||
1928 | void sun4v_nonresum_overflow(struct pt_regs *regs) | ||
1929 | { | ||
1930 | /* XXX Actually even this can make not that much sense. Perhaps | ||
1931 | * XXX we should just pull the plug and panic directly from here? | ||
1932 | */ | ||
1933 | atomic_inc(&sun4v_nonresum_oflow_cnt); | ||
1934 | } | ||
1935 | |||
1936 | unsigned long sun4v_err_itlb_vaddr; | ||
1937 | unsigned long sun4v_err_itlb_ctx; | ||
1938 | unsigned long sun4v_err_itlb_pte; | ||
1939 | unsigned long sun4v_err_itlb_error; | ||
1940 | |||
1941 | void sun4v_itlb_error_report(struct pt_regs *regs, int tl) | ||
1942 | { | ||
1943 | if (tl > 1) | ||
1944 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
1945 | |||
1946 | printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n", | ||
1947 | regs->tpc, tl); | ||
1948 | printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc); | ||
1949 | printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]); | ||
1950 | printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n", | ||
1951 | (void *) regs->u_regs[UREG_I7]); | ||
1952 | printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] " | ||
1953 | "pte[%lx] error[%lx]\n", | ||
1954 | sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx, | ||
1955 | sun4v_err_itlb_pte, sun4v_err_itlb_error); | ||
1956 | |||
1957 | prom_halt(); | ||
1958 | } | ||
1959 | |||
1960 | unsigned long sun4v_err_dtlb_vaddr; | ||
1961 | unsigned long sun4v_err_dtlb_ctx; | ||
1962 | unsigned long sun4v_err_dtlb_pte; | ||
1963 | unsigned long sun4v_err_dtlb_error; | ||
1964 | |||
1965 | void sun4v_dtlb_error_report(struct pt_regs *regs, int tl) | ||
1966 | { | ||
1967 | if (tl > 1) | ||
1968 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
1969 | |||
1970 | printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n", | ||
1971 | regs->tpc, tl); | ||
1972 | printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc); | ||
1973 | printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]); | ||
1974 | printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n", | ||
1975 | (void *) regs->u_regs[UREG_I7]); | ||
1976 | printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] " | ||
1977 | "pte[%lx] error[%lx]\n", | ||
1978 | sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx, | ||
1979 | sun4v_err_dtlb_pte, sun4v_err_dtlb_error); | ||
1980 | |||
1981 | prom_halt(); | ||
1982 | } | ||
1983 | |||
1984 | void hypervisor_tlbop_error(unsigned long err, unsigned long op) | ||
1985 | { | ||
1986 | printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n", | ||
1987 | err, op); | ||
1988 | } | ||
1989 | |||
1990 | void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op) | ||
1991 | { | ||
1992 | printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n", | ||
1993 | err, op); | ||
1994 | } | ||
1995 | |||
1996 | void do_fpe_common(struct pt_regs *regs) | ||
1997 | { | ||
1998 | if (regs->tstate & TSTATE_PRIV) { | ||
1999 | regs->tpc = regs->tnpc; | ||
2000 | regs->tnpc += 4; | ||
2001 | } else { | ||
2002 | unsigned long fsr = current_thread_info()->xfsr[0]; | ||
2003 | siginfo_t info; | ||
2004 | |||
2005 | if (test_thread_flag(TIF_32BIT)) { | ||
2006 | regs->tpc &= 0xffffffff; | ||
2007 | regs->tnpc &= 0xffffffff; | ||
2008 | } | ||
2009 | info.si_signo = SIGFPE; | ||
2010 | info.si_errno = 0; | ||
2011 | info.si_addr = (void __user *)regs->tpc; | ||
2012 | info.si_trapno = 0; | ||
2013 | info.si_code = __SI_FAULT; | ||
2014 | if ((fsr & 0x1c000) == (1 << 14)) { | ||
2015 | if (fsr & 0x10) | ||
2016 | info.si_code = FPE_FLTINV; | ||
2017 | else if (fsr & 0x08) | ||
2018 | info.si_code = FPE_FLTOVF; | ||
2019 | else if (fsr & 0x04) | ||
2020 | info.si_code = FPE_FLTUND; | ||
2021 | else if (fsr & 0x02) | ||
2022 | info.si_code = FPE_FLTDIV; | ||
2023 | else if (fsr & 0x01) | ||
2024 | info.si_code = FPE_FLTRES; | ||
2025 | } | ||
2026 | force_sig_info(SIGFPE, &info, current); | ||
2027 | } | ||
2028 | } | ||
2029 | |||
2030 | void do_fpieee(struct pt_regs *regs) | ||
2031 | { | ||
2032 | if (notify_die(DIE_TRAP, "fpu exception ieee", regs, | ||
2033 | 0, 0x24, SIGFPE) == NOTIFY_STOP) | ||
2034 | return; | ||
2035 | |||
2036 | do_fpe_common(regs); | ||
2037 | } | ||
2038 | |||
2039 | extern int do_mathemu(struct pt_regs *, struct fpustate *); | ||
2040 | |||
2041 | void do_fpother(struct pt_regs *regs) | ||
2042 | { | ||
2043 | struct fpustate *f = FPUSTATE; | ||
2044 | int ret = 0; | ||
2045 | |||
2046 | if (notify_die(DIE_TRAP, "fpu exception other", regs, | ||
2047 | 0, 0x25, SIGFPE) == NOTIFY_STOP) | ||
2048 | return; | ||
2049 | |||
2050 | switch ((current_thread_info()->xfsr[0] & 0x1c000)) { | ||
2051 | case (2 << 14): /* unfinished_FPop */ | ||
2052 | case (3 << 14): /* unimplemented_FPop */ | ||
2053 | ret = do_mathemu(regs, f); | ||
2054 | break; | ||
2055 | } | ||
2056 | if (ret) | ||
2057 | return; | ||
2058 | do_fpe_common(regs); | ||
2059 | } | ||
2060 | |||
2061 | void do_tof(struct pt_regs *regs) | ||
2062 | { | ||
2063 | siginfo_t info; | ||
2064 | |||
2065 | if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs, | ||
2066 | 0, 0x26, SIGEMT) == NOTIFY_STOP) | ||
2067 | return; | ||
2068 | |||
2069 | if (regs->tstate & TSTATE_PRIV) | ||
2070 | die_if_kernel("Penguin overflow trap from kernel mode", regs); | ||
2071 | if (test_thread_flag(TIF_32BIT)) { | ||
2072 | regs->tpc &= 0xffffffff; | ||
2073 | regs->tnpc &= 0xffffffff; | ||
2074 | } | ||
2075 | info.si_signo = SIGEMT; | ||
2076 | info.si_errno = 0; | ||
2077 | info.si_code = EMT_TAGOVF; | ||
2078 | info.si_addr = (void __user *)regs->tpc; | ||
2079 | info.si_trapno = 0; | ||
2080 | force_sig_info(SIGEMT, &info, current); | ||
2081 | } | ||
2082 | |||
2083 | void do_div0(struct pt_regs *regs) | ||
2084 | { | ||
2085 | siginfo_t info; | ||
2086 | |||
2087 | if (notify_die(DIE_TRAP, "integer division by zero", regs, | ||
2088 | 0, 0x28, SIGFPE) == NOTIFY_STOP) | ||
2089 | return; | ||
2090 | |||
2091 | if (regs->tstate & TSTATE_PRIV) | ||
2092 | die_if_kernel("TL0: Kernel divide by zero.", regs); | ||
2093 | if (test_thread_flag(TIF_32BIT)) { | ||
2094 | regs->tpc &= 0xffffffff; | ||
2095 | regs->tnpc &= 0xffffffff; | ||
2096 | } | ||
2097 | info.si_signo = SIGFPE; | ||
2098 | info.si_errno = 0; | ||
2099 | info.si_code = FPE_INTDIV; | ||
2100 | info.si_addr = (void __user *)regs->tpc; | ||
2101 | info.si_trapno = 0; | ||
2102 | force_sig_info(SIGFPE, &info, current); | ||
2103 | } | ||
2104 | |||
2105 | static void instruction_dump(unsigned int *pc) | ||
2106 | { | ||
2107 | int i; | ||
2108 | |||
2109 | if ((((unsigned long) pc) & 3)) | ||
2110 | return; | ||
2111 | |||
2112 | printk("Instruction DUMP:"); | ||
2113 | for (i = -3; i < 6; i++) | ||
2114 | printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>'); | ||
2115 | printk("\n"); | ||
2116 | } | ||
2117 | |||
2118 | static void user_instruction_dump(unsigned int __user *pc) | ||
2119 | { | ||
2120 | int i; | ||
2121 | unsigned int buf[9]; | ||
2122 | |||
2123 | if ((((unsigned long) pc) & 3)) | ||
2124 | return; | ||
2125 | |||
2126 | if (copy_from_user(buf, pc - 3, sizeof(buf))) | ||
2127 | return; | ||
2128 | |||
2129 | printk("Instruction DUMP:"); | ||
2130 | for (i = 0; i < 9; i++) | ||
2131 | printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>'); | ||
2132 | printk("\n"); | ||
2133 | } | ||
2134 | |||
2135 | void show_stack(struct task_struct *tsk, unsigned long *_ksp) | ||
2136 | { | ||
2137 | unsigned long fp, thread_base, ksp; | ||
2138 | struct thread_info *tp; | ||
2139 | int count = 0; | ||
2140 | |||
2141 | ksp = (unsigned long) _ksp; | ||
2142 | if (!tsk) | ||
2143 | tsk = current; | ||
2144 | tp = task_thread_info(tsk); | ||
2145 | if (ksp == 0UL) { | ||
2146 | if (tsk == current) | ||
2147 | asm("mov %%fp, %0" : "=r" (ksp)); | ||
2148 | else | ||
2149 | ksp = tp->ksp; | ||
2150 | } | ||
2151 | if (tp == current_thread_info()) | ||
2152 | flushw_all(); | ||
2153 | |||
2154 | fp = ksp + STACK_BIAS; | ||
2155 | thread_base = (unsigned long) tp; | ||
2156 | |||
2157 | printk("Call Trace:\n"); | ||
2158 | do { | ||
2159 | struct sparc_stackf *sf; | ||
2160 | struct pt_regs *regs; | ||
2161 | unsigned long pc; | ||
2162 | |||
2163 | if (!kstack_valid(tp, fp)) | ||
2164 | break; | ||
2165 | sf = (struct sparc_stackf *) fp; | ||
2166 | regs = (struct pt_regs *) (sf + 1); | ||
2167 | |||
2168 | if (kstack_is_trap_frame(tp, regs)) { | ||
2169 | if (!(regs->tstate & TSTATE_PRIV)) | ||
2170 | break; | ||
2171 | pc = regs->tpc; | ||
2172 | fp = regs->u_regs[UREG_I6] + STACK_BIAS; | ||
2173 | } else { | ||
2174 | pc = sf->callers_pc; | ||
2175 | fp = (unsigned long)sf->fp + STACK_BIAS; | ||
2176 | } | ||
2177 | |||
2178 | printk(" [%016lx] %pS\n", pc, (void *) pc); | ||
2179 | } while (++count < 16); | ||
2180 | } | ||
2181 | |||
2182 | void dump_stack(void) | ||
2183 | { | ||
2184 | show_stack(current, NULL); | ||
2185 | } | ||
2186 | |||
2187 | EXPORT_SYMBOL(dump_stack); | ||
2188 | |||
2189 | static inline int is_kernel_stack(struct task_struct *task, | ||
2190 | struct reg_window *rw) | ||
2191 | { | ||
2192 | unsigned long rw_addr = (unsigned long) rw; | ||
2193 | unsigned long thread_base, thread_end; | ||
2194 | |||
2195 | if (rw_addr < PAGE_OFFSET) { | ||
2196 | if (task != &init_task) | ||
2197 | return 0; | ||
2198 | } | ||
2199 | |||
2200 | thread_base = (unsigned long) task_stack_page(task); | ||
2201 | thread_end = thread_base + sizeof(union thread_union); | ||
2202 | if (rw_addr >= thread_base && | ||
2203 | rw_addr < thread_end && | ||
2204 | !(rw_addr & 0x7UL)) | ||
2205 | return 1; | ||
2206 | |||
2207 | return 0; | ||
2208 | } | ||
2209 | |||
2210 | static inline struct reg_window *kernel_stack_up(struct reg_window *rw) | ||
2211 | { | ||
2212 | unsigned long fp = rw->ins[6]; | ||
2213 | |||
2214 | if (!fp) | ||
2215 | return NULL; | ||
2216 | |||
2217 | return (struct reg_window *) (fp + STACK_BIAS); | ||
2218 | } | ||
2219 | |||
2220 | void die_if_kernel(char *str, struct pt_regs *regs) | ||
2221 | { | ||
2222 | static int die_counter; | ||
2223 | int count = 0; | ||
2224 | |||
2225 | /* Amuse the user. */ | ||
2226 | printk( | ||
2227 | " \\|/ ____ \\|/\n" | ||
2228 | " \"@'/ .. \\`@\"\n" | ||
2229 | " /_| \\__/ |_\\\n" | ||
2230 | " \\__U_/\n"); | ||
2231 | |||
2232 | printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter); | ||
2233 | notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV); | ||
2234 | __asm__ __volatile__("flushw"); | ||
2235 | show_regs(regs); | ||
2236 | add_taint(TAINT_DIE); | ||
2237 | if (regs->tstate & TSTATE_PRIV) { | ||
2238 | struct reg_window *rw = (struct reg_window *) | ||
2239 | (regs->u_regs[UREG_FP] + STACK_BIAS); | ||
2240 | |||
2241 | /* Stop the back trace when we hit userland or we | ||
2242 | * find some badly aligned kernel stack. | ||
2243 | */ | ||
2244 | while (rw && | ||
2245 | count++ < 30&& | ||
2246 | is_kernel_stack(current, rw)) { | ||
2247 | printk("Caller[%016lx]: %pS\n", rw->ins[7], | ||
2248 | (void *) rw->ins[7]); | ||
2249 | |||
2250 | rw = kernel_stack_up(rw); | ||
2251 | } | ||
2252 | instruction_dump ((unsigned int *) regs->tpc); | ||
2253 | } else { | ||
2254 | if (test_thread_flag(TIF_32BIT)) { | ||
2255 | regs->tpc &= 0xffffffff; | ||
2256 | regs->tnpc &= 0xffffffff; | ||
2257 | } | ||
2258 | user_instruction_dump ((unsigned int __user *) regs->tpc); | ||
2259 | } | ||
2260 | if (regs->tstate & TSTATE_PRIV) | ||
2261 | do_exit(SIGKILL); | ||
2262 | do_exit(SIGSEGV); | ||
2263 | } | ||
2264 | |||
2265 | #define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19)) | ||
2266 | #define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19)) | ||
2267 | |||
2268 | extern int handle_popc(u32 insn, struct pt_regs *regs); | ||
2269 | extern int handle_ldf_stq(u32 insn, struct pt_regs *regs); | ||
2270 | |||
2271 | void do_illegal_instruction(struct pt_regs *regs) | ||
2272 | { | ||
2273 | unsigned long pc = regs->tpc; | ||
2274 | unsigned long tstate = regs->tstate; | ||
2275 | u32 insn; | ||
2276 | siginfo_t info; | ||
2277 | |||
2278 | if (notify_die(DIE_TRAP, "illegal instruction", regs, | ||
2279 | 0, 0x10, SIGILL) == NOTIFY_STOP) | ||
2280 | return; | ||
2281 | |||
2282 | if (tstate & TSTATE_PRIV) | ||
2283 | die_if_kernel("Kernel illegal instruction", regs); | ||
2284 | if (test_thread_flag(TIF_32BIT)) | ||
2285 | pc = (u32)pc; | ||
2286 | if (get_user(insn, (u32 __user *) pc) != -EFAULT) { | ||
2287 | if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ { | ||
2288 | if (handle_popc(insn, regs)) | ||
2289 | return; | ||
2290 | } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ { | ||
2291 | if (handle_ldf_stq(insn, regs)) | ||
2292 | return; | ||
2293 | } else if (tlb_type == hypervisor) { | ||
2294 | if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) { | ||
2295 | if (!vis_emul(regs, insn)) | ||
2296 | return; | ||
2297 | } else { | ||
2298 | struct fpustate *f = FPUSTATE; | ||
2299 | |||
2300 | /* XXX maybe verify XFSR bits like | ||
2301 | * XXX do_fpother() does? | ||
2302 | */ | ||
2303 | if (do_mathemu(regs, f)) | ||
2304 | return; | ||
2305 | } | ||
2306 | } | ||
2307 | } | ||
2308 | info.si_signo = SIGILL; | ||
2309 | info.si_errno = 0; | ||
2310 | info.si_code = ILL_ILLOPC; | ||
2311 | info.si_addr = (void __user *)pc; | ||
2312 | info.si_trapno = 0; | ||
2313 | force_sig_info(SIGILL, &info, current); | ||
2314 | } | ||
2315 | |||
2316 | extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn); | ||
2317 | |||
2318 | void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) | ||
2319 | { | ||
2320 | siginfo_t info; | ||
2321 | |||
2322 | if (notify_die(DIE_TRAP, "memory address unaligned", regs, | ||
2323 | 0, 0x34, SIGSEGV) == NOTIFY_STOP) | ||
2324 | return; | ||
2325 | |||
2326 | if (regs->tstate & TSTATE_PRIV) { | ||
2327 | kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); | ||
2328 | return; | ||
2329 | } | ||
2330 | info.si_signo = SIGBUS; | ||
2331 | info.si_errno = 0; | ||
2332 | info.si_code = BUS_ADRALN; | ||
2333 | info.si_addr = (void __user *)sfar; | ||
2334 | info.si_trapno = 0; | ||
2335 | force_sig_info(SIGBUS, &info, current); | ||
2336 | } | ||
2337 | |||
2338 | void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) | ||
2339 | { | ||
2340 | siginfo_t info; | ||
2341 | |||
2342 | if (notify_die(DIE_TRAP, "memory address unaligned", regs, | ||
2343 | 0, 0x34, SIGSEGV) == NOTIFY_STOP) | ||
2344 | return; | ||
2345 | |||
2346 | if (regs->tstate & TSTATE_PRIV) { | ||
2347 | kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); | ||
2348 | return; | ||
2349 | } | ||
2350 | info.si_signo = SIGBUS; | ||
2351 | info.si_errno = 0; | ||
2352 | info.si_code = BUS_ADRALN; | ||
2353 | info.si_addr = (void __user *) addr; | ||
2354 | info.si_trapno = 0; | ||
2355 | force_sig_info(SIGBUS, &info, current); | ||
2356 | } | ||
2357 | |||
2358 | void do_privop(struct pt_regs *regs) | ||
2359 | { | ||
2360 | siginfo_t info; | ||
2361 | |||
2362 | if (notify_die(DIE_TRAP, "privileged operation", regs, | ||
2363 | 0, 0x11, SIGILL) == NOTIFY_STOP) | ||
2364 | return; | ||
2365 | |||
2366 | if (test_thread_flag(TIF_32BIT)) { | ||
2367 | regs->tpc &= 0xffffffff; | ||
2368 | regs->tnpc &= 0xffffffff; | ||
2369 | } | ||
2370 | info.si_signo = SIGILL; | ||
2371 | info.si_errno = 0; | ||
2372 | info.si_code = ILL_PRVOPC; | ||
2373 | info.si_addr = (void __user *)regs->tpc; | ||
2374 | info.si_trapno = 0; | ||
2375 | force_sig_info(SIGILL, &info, current); | ||
2376 | } | ||
2377 | |||
2378 | void do_privact(struct pt_regs *regs) | ||
2379 | { | ||
2380 | do_privop(regs); | ||
2381 | } | ||
2382 | |||
2383 | /* Trap level 1 stuff or other traps we should never see... */ | ||
2384 | void do_cee(struct pt_regs *regs) | ||
2385 | { | ||
2386 | die_if_kernel("TL0: Cache Error Exception", regs); | ||
2387 | } | ||
2388 | |||
2389 | void do_cee_tl1(struct pt_regs *regs) | ||
2390 | { | ||
2391 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
2392 | die_if_kernel("TL1: Cache Error Exception", regs); | ||
2393 | } | ||
2394 | |||
2395 | void do_dae_tl1(struct pt_regs *regs) | ||
2396 | { | ||
2397 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
2398 | die_if_kernel("TL1: Data Access Exception", regs); | ||
2399 | } | ||
2400 | |||
2401 | void do_iae_tl1(struct pt_regs *regs) | ||
2402 | { | ||
2403 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
2404 | die_if_kernel("TL1: Instruction Access Exception", regs); | ||
2405 | } | ||
2406 | |||
2407 | void do_div0_tl1(struct pt_regs *regs) | ||
2408 | { | ||
2409 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
2410 | die_if_kernel("TL1: DIV0 Exception", regs); | ||
2411 | } | ||
2412 | |||
2413 | void do_fpdis_tl1(struct pt_regs *regs) | ||
2414 | { | ||
2415 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
2416 | die_if_kernel("TL1: FPU Disabled", regs); | ||
2417 | } | ||
2418 | |||
2419 | void do_fpieee_tl1(struct pt_regs *regs) | ||
2420 | { | ||
2421 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
2422 | die_if_kernel("TL1: FPU IEEE Exception", regs); | ||
2423 | } | ||
2424 | |||
2425 | void do_fpother_tl1(struct pt_regs *regs) | ||
2426 | { | ||
2427 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
2428 | die_if_kernel("TL1: FPU Other Exception", regs); | ||
2429 | } | ||
2430 | |||
2431 | void do_ill_tl1(struct pt_regs *regs) | ||
2432 | { | ||
2433 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
2434 | die_if_kernel("TL1: Illegal Instruction Exception", regs); | ||
2435 | } | ||
2436 | |||
2437 | void do_irq_tl1(struct pt_regs *regs) | ||
2438 | { | ||
2439 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
2440 | die_if_kernel("TL1: IRQ Exception", regs); | ||
2441 | } | ||
2442 | |||
2443 | void do_lddfmna_tl1(struct pt_regs *regs) | ||
2444 | { | ||
2445 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
2446 | die_if_kernel("TL1: LDDF Exception", regs); | ||
2447 | } | ||
2448 | |||
2449 | void do_stdfmna_tl1(struct pt_regs *regs) | ||
2450 | { | ||
2451 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
2452 | die_if_kernel("TL1: STDF Exception", regs); | ||
2453 | } | ||
2454 | |||
2455 | void do_paw(struct pt_regs *regs) | ||
2456 | { | ||
2457 | die_if_kernel("TL0: Phys Watchpoint Exception", regs); | ||
2458 | } | ||
2459 | |||
2460 | void do_paw_tl1(struct pt_regs *regs) | ||
2461 | { | ||
2462 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
2463 | die_if_kernel("TL1: Phys Watchpoint Exception", regs); | ||
2464 | } | ||
2465 | |||
2466 | void do_vaw(struct pt_regs *regs) | ||
2467 | { | ||
2468 | die_if_kernel("TL0: Virt Watchpoint Exception", regs); | ||
2469 | } | ||
2470 | |||
2471 | void do_vaw_tl1(struct pt_regs *regs) | ||
2472 | { | ||
2473 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
2474 | die_if_kernel("TL1: Virt Watchpoint Exception", regs); | ||
2475 | } | ||
2476 | |||
2477 | void do_tof_tl1(struct pt_regs *regs) | ||
2478 | { | ||
2479 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
2480 | die_if_kernel("TL1: Tag Overflow Exception", regs); | ||
2481 | } | ||
2482 | |||
2483 | void do_getpsr(struct pt_regs *regs) | ||
2484 | { | ||
2485 | regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate); | ||
2486 | regs->tpc = regs->tnpc; | ||
2487 | regs->tnpc += 4; | ||
2488 | if (test_thread_flag(TIF_32BIT)) { | ||
2489 | regs->tpc &= 0xffffffff; | ||
2490 | regs->tnpc &= 0xffffffff; | ||
2491 | } | ||
2492 | } | ||
2493 | |||
2494 | struct trap_per_cpu trap_block[NR_CPUS]; | ||
2495 | |||
2496 | /* This can get invoked before sched_init() so play it super safe | ||
2497 | * and use hard_smp_processor_id(). | ||
2498 | */ | ||
2499 | void notrace init_cur_cpu_trap(struct thread_info *t) | ||
2500 | { | ||
2501 | int cpu = hard_smp_processor_id(); | ||
2502 | struct trap_per_cpu *p = &trap_block[cpu]; | ||
2503 | |||
2504 | p->thread = t; | ||
2505 | p->pgd_paddr = 0; | ||
2506 | } | ||
2507 | |||
2508 | extern void thread_info_offsets_are_bolixed_dave(void); | ||
2509 | extern void trap_per_cpu_offsets_are_bolixed_dave(void); | ||
2510 | extern void tsb_config_offsets_are_bolixed_dave(void); | ||
2511 | |||
2512 | /* Only invoked on boot processor. */ | ||
2513 | void __init trap_init(void) | ||
2514 | { | ||
2515 | /* Compile time sanity check. */ | ||
2516 | if (TI_TASK != offsetof(struct thread_info, task) || | ||
2517 | TI_FLAGS != offsetof(struct thread_info, flags) || | ||
2518 | TI_CPU != offsetof(struct thread_info, cpu) || | ||
2519 | TI_FPSAVED != offsetof(struct thread_info, fpsaved) || | ||
2520 | TI_KSP != offsetof(struct thread_info, ksp) || | ||
2521 | TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) || | ||
2522 | TI_KREGS != offsetof(struct thread_info, kregs) || | ||
2523 | TI_UTRAPS != offsetof(struct thread_info, utraps) || | ||
2524 | TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) || | ||
2525 | TI_REG_WINDOW != offsetof(struct thread_info, reg_window) || | ||
2526 | TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) || | ||
2527 | TI_GSR != offsetof(struct thread_info, gsr) || | ||
2528 | TI_XFSR != offsetof(struct thread_info, xfsr) || | ||
2529 | TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) || | ||
2530 | TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) || | ||
2531 | TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) || | ||
2532 | TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) || | ||
2533 | TI_PCR != offsetof(struct thread_info, pcr_reg) || | ||
2534 | TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) || | ||
2535 | TI_NEW_CHILD != offsetof(struct thread_info, new_child) || | ||
2536 | TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) || | ||
2537 | TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) || | ||
2538 | TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) || | ||
2539 | TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) || | ||
2540 | TI_FPREGS != offsetof(struct thread_info, fpregs) || | ||
2541 | (TI_FPREGS & (64 - 1))) | ||
2542 | thread_info_offsets_are_bolixed_dave(); | ||
2543 | |||
2544 | if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) || | ||
2545 | (TRAP_PER_CPU_PGD_PADDR != | ||
2546 | offsetof(struct trap_per_cpu, pgd_paddr)) || | ||
2547 | (TRAP_PER_CPU_CPU_MONDO_PA != | ||
2548 | offsetof(struct trap_per_cpu, cpu_mondo_pa)) || | ||
2549 | (TRAP_PER_CPU_DEV_MONDO_PA != | ||
2550 | offsetof(struct trap_per_cpu, dev_mondo_pa)) || | ||
2551 | (TRAP_PER_CPU_RESUM_MONDO_PA != | ||
2552 | offsetof(struct trap_per_cpu, resum_mondo_pa)) || | ||
2553 | (TRAP_PER_CPU_RESUM_KBUF_PA != | ||
2554 | offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) || | ||
2555 | (TRAP_PER_CPU_NONRESUM_MONDO_PA != | ||
2556 | offsetof(struct trap_per_cpu, nonresum_mondo_pa)) || | ||
2557 | (TRAP_PER_CPU_NONRESUM_KBUF_PA != | ||
2558 | offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) || | ||
2559 | (TRAP_PER_CPU_FAULT_INFO != | ||
2560 | offsetof(struct trap_per_cpu, fault_info)) || | ||
2561 | (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA != | ||
2562 | offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) || | ||
2563 | (TRAP_PER_CPU_CPU_LIST_PA != | ||
2564 | offsetof(struct trap_per_cpu, cpu_list_pa)) || | ||
2565 | (TRAP_PER_CPU_TSB_HUGE != | ||
2566 | offsetof(struct trap_per_cpu, tsb_huge)) || | ||
2567 | (TRAP_PER_CPU_TSB_HUGE_TEMP != | ||
2568 | offsetof(struct trap_per_cpu, tsb_huge_temp)) || | ||
2569 | (TRAP_PER_CPU_IRQ_WORKLIST_PA != | ||
2570 | offsetof(struct trap_per_cpu, irq_worklist_pa)) || | ||
2571 | (TRAP_PER_CPU_CPU_MONDO_QMASK != | ||
2572 | offsetof(struct trap_per_cpu, cpu_mondo_qmask)) || | ||
2573 | (TRAP_PER_CPU_DEV_MONDO_QMASK != | ||
2574 | offsetof(struct trap_per_cpu, dev_mondo_qmask)) || | ||
2575 | (TRAP_PER_CPU_RESUM_QMASK != | ||
2576 | offsetof(struct trap_per_cpu, resum_qmask)) || | ||
2577 | (TRAP_PER_CPU_NONRESUM_QMASK != | ||
2578 | offsetof(struct trap_per_cpu, nonresum_qmask))) | ||
2579 | trap_per_cpu_offsets_are_bolixed_dave(); | ||
2580 | |||
2581 | if ((TSB_CONFIG_TSB != | ||
2582 | offsetof(struct tsb_config, tsb)) || | ||
2583 | (TSB_CONFIG_RSS_LIMIT != | ||
2584 | offsetof(struct tsb_config, tsb_rss_limit)) || | ||
2585 | (TSB_CONFIG_NENTRIES != | ||
2586 | offsetof(struct tsb_config, tsb_nentries)) || | ||
2587 | (TSB_CONFIG_REG_VAL != | ||
2588 | offsetof(struct tsb_config, tsb_reg_val)) || | ||
2589 | (TSB_CONFIG_MAP_VADDR != | ||
2590 | offsetof(struct tsb_config, tsb_map_vaddr)) || | ||
2591 | (TSB_CONFIG_MAP_PTE != | ||
2592 | offsetof(struct tsb_config, tsb_map_pte))) | ||
2593 | tsb_config_offsets_are_bolixed_dave(); | ||
2594 | |||
2595 | /* Attach to the address space of init_task. On SMP we | ||
2596 | * do this in smp.c:smp_callin for other cpus. | ||
2597 | */ | ||
2598 | atomic_inc(&init_mm.mm_count); | ||
2599 | current->active_mm = &init_mm; | ||
2600 | } | ||
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S new file mode 100644 index 000000000000..8c91d9b29a2f --- /dev/null +++ b/arch/sparc/kernel/tsb.S | |||
@@ -0,0 +1,552 @@ | |||
1 | /* tsb.S: Sparc64 TSB table handling. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | |||
7 | #include <asm/tsb.h> | ||
8 | #include <asm/hypervisor.h> | ||
9 | #include <asm/page.h> | ||
10 | #include <asm/cpudata.h> | ||
11 | #include <asm/mmu.h> | ||
12 | |||
13 | .text | ||
14 | .align 32 | ||
15 | |||
16 | /* Invoked from TLB miss handler, we are in the | ||
17 | * MMU global registers and they are setup like | ||
18 | * this: | ||
19 | * | ||
20 | * %g1: TSB entry pointer | ||
21 | * %g2: available temporary | ||
22 | * %g3: FAULT_CODE_{D,I}TLB | ||
23 | * %g4: available temporary | ||
24 | * %g5: available temporary | ||
25 | * %g6: TAG TARGET | ||
26 | * %g7: available temporary, will be loaded by us with | ||
27 | * the physical address base of the linux page | ||
28 | * tables for the current address space | ||
29 | */ | ||
30 | tsb_miss_dtlb: | ||
31 | mov TLB_TAG_ACCESS, %g4 | ||
32 | ba,pt %xcc, tsb_miss_page_table_walk | ||
33 | ldxa [%g4] ASI_DMMU, %g4 | ||
34 | |||
35 | tsb_miss_itlb: | ||
36 | mov TLB_TAG_ACCESS, %g4 | ||
37 | ba,pt %xcc, tsb_miss_page_table_walk | ||
38 | ldxa [%g4] ASI_IMMU, %g4 | ||
39 | |||
40 | /* At this point we have: | ||
41 | * %g1 -- PAGE_SIZE TSB entry address | ||
42 | * %g3 -- FAULT_CODE_{D,I}TLB | ||
43 | * %g4 -- missing virtual address | ||
44 | * %g6 -- TAG TARGET (vaddr >> 22) | ||
45 | */ | ||
46 | tsb_miss_page_table_walk: | ||
47 | TRAP_LOAD_TRAP_BLOCK(%g7, %g5) | ||
48 | |||
49 | /* Before committing to a full page table walk, | ||
50 | * check the huge page TSB. | ||
51 | */ | ||
52 | #ifdef CONFIG_HUGETLB_PAGE | ||
53 | |||
54 | 661: ldx [%g7 + TRAP_PER_CPU_TSB_HUGE], %g5 | ||
55 | nop | ||
56 | .section .sun4v_2insn_patch, "ax" | ||
57 | .word 661b | ||
58 | mov SCRATCHPAD_UTSBREG2, %g5 | ||
59 | ldxa [%g5] ASI_SCRATCHPAD, %g5 | ||
60 | .previous | ||
61 | |||
62 | cmp %g5, -1 | ||
63 | be,pt %xcc, 80f | ||
64 | nop | ||
65 | |||
66 | /* We need an aligned pair of registers containing 2 values | ||
67 | * which can be easily rematerialized. %g6 and %g7 foot the | ||
68 | * bill just nicely. We'll save %g6 away into %g2 for the | ||
69 | * huge page TSB TAG comparison. | ||
70 | * | ||
71 | * Perform a huge page TSB lookup. | ||
72 | */ | ||
73 | mov %g6, %g2 | ||
74 | and %g5, 0x7, %g6 | ||
75 | mov 512, %g7 | ||
76 | andn %g5, 0x7, %g5 | ||
77 | sllx %g7, %g6, %g7 | ||
78 | srlx %g4, HPAGE_SHIFT, %g6 | ||
79 | sub %g7, 1, %g7 | ||
80 | and %g6, %g7, %g6 | ||
81 | sllx %g6, 4, %g6 | ||
82 | add %g5, %g6, %g5 | ||
83 | |||
84 | TSB_LOAD_QUAD(%g5, %g6) | ||
85 | cmp %g6, %g2 | ||
86 | be,a,pt %xcc, tsb_tlb_reload | ||
87 | mov %g7, %g5 | ||
88 | |||
89 | /* No match, remember the huge page TSB entry address, | ||
90 | * and restore %g6 and %g7. | ||
91 | */ | ||
92 | TRAP_LOAD_TRAP_BLOCK(%g7, %g6) | ||
93 | srlx %g4, 22, %g6 | ||
94 | 80: stx %g5, [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP] | ||
95 | |||
96 | #endif | ||
97 | |||
98 | ldx [%g7 + TRAP_PER_CPU_PGD_PADDR], %g7 | ||
99 | |||
100 | /* At this point we have: | ||
101 | * %g1 -- TSB entry address | ||
102 | * %g3 -- FAULT_CODE_{D,I}TLB | ||
103 | * %g4 -- missing virtual address | ||
104 | * %g6 -- TAG TARGET (vaddr >> 22) | ||
105 | * %g7 -- page table physical address | ||
106 | * | ||
107 | * We know that both the base PAGE_SIZE TSB and the HPAGE_SIZE | ||
108 | * TSB both lack a matching entry. | ||
109 | */ | ||
110 | tsb_miss_page_table_walk_sun4v_fastpath: | ||
111 | USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) | ||
112 | |||
113 | /* Load and check PTE. */ | ||
114 | ldxa [%g5] ASI_PHYS_USE_EC, %g5 | ||
115 | brgez,pn %g5, tsb_do_fault | ||
116 | nop | ||
117 | |||
118 | #ifdef CONFIG_HUGETLB_PAGE | ||
119 | 661: sethi %uhi(_PAGE_SZALL_4U), %g7 | ||
120 | sllx %g7, 32, %g7 | ||
121 | .section .sun4v_2insn_patch, "ax" | ||
122 | .word 661b | ||
123 | mov _PAGE_SZALL_4V, %g7 | ||
124 | nop | ||
125 | .previous | ||
126 | |||
127 | and %g5, %g7, %g2 | ||
128 | |||
129 | 661: sethi %uhi(_PAGE_SZHUGE_4U), %g7 | ||
130 | sllx %g7, 32, %g7 | ||
131 | .section .sun4v_2insn_patch, "ax" | ||
132 | .word 661b | ||
133 | mov _PAGE_SZHUGE_4V, %g7 | ||
134 | nop | ||
135 | .previous | ||
136 | |||
137 | cmp %g2, %g7 | ||
138 | bne,pt %xcc, 60f | ||
139 | nop | ||
140 | |||
141 | /* It is a huge page, use huge page TSB entry address we | ||
142 | * calculated above. | ||
143 | */ | ||
144 | TRAP_LOAD_TRAP_BLOCK(%g7, %g2) | ||
145 | ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g2 | ||
146 | cmp %g2, -1 | ||
147 | movne %xcc, %g2, %g1 | ||
148 | 60: | ||
149 | #endif | ||
150 | |||
151 | /* At this point we have: | ||
152 | * %g1 -- TSB entry address | ||
153 | * %g3 -- FAULT_CODE_{D,I}TLB | ||
154 | * %g5 -- valid PTE | ||
155 | * %g6 -- TAG TARGET (vaddr >> 22) | ||
156 | */ | ||
157 | tsb_reload: | ||
158 | TSB_LOCK_TAG(%g1, %g2, %g7) | ||
159 | TSB_WRITE(%g1, %g5, %g6) | ||
160 | |||
161 | /* Finally, load TLB and return from trap. */ | ||
162 | tsb_tlb_reload: | ||
163 | cmp %g3, FAULT_CODE_DTLB | ||
164 | bne,pn %xcc, tsb_itlb_load | ||
165 | nop | ||
166 | |||
167 | tsb_dtlb_load: | ||
168 | |||
169 | 661: stxa %g5, [%g0] ASI_DTLB_DATA_IN | ||
170 | retry | ||
171 | .section .sun4v_2insn_patch, "ax" | ||
172 | .word 661b | ||
173 | nop | ||
174 | nop | ||
175 | .previous | ||
176 | |||
177 | /* For sun4v the ASI_DTLB_DATA_IN store and the retry | ||
178 | * instruction get nop'd out and we get here to branch | ||
179 | * to the sun4v tlb load code. The registers are setup | ||
180 | * as follows: | ||
181 | * | ||
182 | * %g4: vaddr | ||
183 | * %g5: PTE | ||
184 | * %g6: TAG | ||
185 | * | ||
186 | * The sun4v TLB load wants the PTE in %g3 so we fix that | ||
187 | * up here. | ||
188 | */ | ||
189 | ba,pt %xcc, sun4v_dtlb_load | ||
190 | mov %g5, %g3 | ||
191 | |||
192 | tsb_itlb_load: | ||
193 | /* Executable bit must be set. */ | ||
194 | 661: andcc %g5, _PAGE_EXEC_4U, %g0 | ||
195 | .section .sun4v_1insn_patch, "ax" | ||
196 | .word 661b | ||
197 | andcc %g5, _PAGE_EXEC_4V, %g0 | ||
198 | .previous | ||
199 | |||
200 | be,pn %xcc, tsb_do_fault | ||
201 | nop | ||
202 | |||
203 | 661: stxa %g5, [%g0] ASI_ITLB_DATA_IN | ||
204 | retry | ||
205 | .section .sun4v_2insn_patch, "ax" | ||
206 | .word 661b | ||
207 | nop | ||
208 | nop | ||
209 | .previous | ||
210 | |||
211 | /* For sun4v the ASI_ITLB_DATA_IN store and the retry | ||
212 | * instruction get nop'd out and we get here to branch | ||
213 | * to the sun4v tlb load code. The registers are setup | ||
214 | * as follows: | ||
215 | * | ||
216 | * %g4: vaddr | ||
217 | * %g5: PTE | ||
218 | * %g6: TAG | ||
219 | * | ||
220 | * The sun4v TLB load wants the PTE in %g3 so we fix that | ||
221 | * up here. | ||
222 | */ | ||
223 | ba,pt %xcc, sun4v_itlb_load | ||
224 | mov %g5, %g3 | ||
225 | |||
226 | /* No valid entry in the page tables, do full fault | ||
227 | * processing. | ||
228 | */ | ||
229 | |||
230 | .globl tsb_do_fault | ||
231 | tsb_do_fault: | ||
232 | cmp %g3, FAULT_CODE_DTLB | ||
233 | |||
234 | 661: rdpr %pstate, %g5 | ||
235 | wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate | ||
236 | .section .sun4v_2insn_patch, "ax" | ||
237 | .word 661b | ||
238 | SET_GL(1) | ||
239 | ldxa [%g0] ASI_SCRATCHPAD, %g4 | ||
240 | .previous | ||
241 | |||
242 | bne,pn %xcc, tsb_do_itlb_fault | ||
243 | nop | ||
244 | |||
245 | tsb_do_dtlb_fault: | ||
246 | rdpr %tl, %g3 | ||
247 | cmp %g3, 1 | ||
248 | |||
249 | 661: mov TLB_TAG_ACCESS, %g4 | ||
250 | ldxa [%g4] ASI_DMMU, %g5 | ||
251 | .section .sun4v_2insn_patch, "ax" | ||
252 | .word 661b | ||
253 | ldx [%g4 + HV_FAULT_D_ADDR_OFFSET], %g5 | ||
254 | nop | ||
255 | .previous | ||
256 | |||
257 | be,pt %xcc, sparc64_realfault_common | ||
258 | mov FAULT_CODE_DTLB, %g4 | ||
259 | ba,pt %xcc, winfix_trampoline | ||
260 | nop | ||
261 | |||
262 | tsb_do_itlb_fault: | ||
263 | rdpr %tpc, %g5 | ||
264 | ba,pt %xcc, sparc64_realfault_common | ||
265 | mov FAULT_CODE_ITLB, %g4 | ||
266 | |||
267 | .globl sparc64_realfault_common | ||
268 | sparc64_realfault_common: | ||
269 | /* fault code in %g4, fault address in %g5, etrap will | ||
270 | * preserve these two values in %l4 and %l5 respectively | ||
271 | */ | ||
272 | ba,pt %xcc, etrap ! Save trap state | ||
273 | 1: rd %pc, %g7 ! ... | ||
274 | stb %l4, [%g6 + TI_FAULT_CODE] ! Save fault code | ||
275 | stx %l5, [%g6 + TI_FAULT_ADDR] ! Save fault address | ||
276 | call do_sparc64_fault ! Call fault handler | ||
277 | add %sp, PTREGS_OFF, %o0 ! Compute pt_regs arg | ||
278 | ba,pt %xcc, rtrap ! Restore cpu state | ||
279 | nop ! Delay slot (fill me) | ||
280 | |||
281 | winfix_trampoline: | ||
282 | rdpr %tpc, %g3 ! Prepare winfixup TNPC | ||
283 | or %g3, 0x7c, %g3 ! Compute branch offset | ||
284 | wrpr %g3, %tnpc ! Write it into TNPC | ||
285 | done ! Trap return | ||
286 | |||
287 | /* Insert an entry into the TSB. | ||
288 | * | ||
289 | * %o0: TSB entry pointer (virt or phys address) | ||
290 | * %o1: tag | ||
291 | * %o2: pte | ||
292 | */ | ||
293 | .align 32 | ||
294 | .globl __tsb_insert | ||
295 | __tsb_insert: | ||
296 | rdpr %pstate, %o5 | ||
297 | wrpr %o5, PSTATE_IE, %pstate | ||
298 | TSB_LOCK_TAG(%o0, %g2, %g3) | ||
299 | TSB_WRITE(%o0, %o2, %o1) | ||
300 | wrpr %o5, %pstate | ||
301 | retl | ||
302 | nop | ||
303 | .size __tsb_insert, .-__tsb_insert | ||
304 | |||
305 | /* Flush the given TSB entry if it has the matching | ||
306 | * tag. | ||
307 | * | ||
308 | * %o0: TSB entry pointer (virt or phys address) | ||
309 | * %o1: tag | ||
310 | */ | ||
311 | .align 32 | ||
312 | .globl tsb_flush | ||
313 | .type tsb_flush,#function | ||
314 | tsb_flush: | ||
315 | sethi %hi(TSB_TAG_LOCK_HIGH), %g2 | ||
316 | 1: TSB_LOAD_TAG(%o0, %g1) | ||
317 | srlx %g1, 32, %o3 | ||
318 | andcc %o3, %g2, %g0 | ||
319 | bne,pn %icc, 1b | ||
320 | nop | ||
321 | cmp %g1, %o1 | ||
322 | mov 1, %o3 | ||
323 | bne,pt %xcc, 2f | ||
324 | sllx %o3, TSB_TAG_INVALID_BIT, %o3 | ||
325 | TSB_CAS_TAG(%o0, %g1, %o3) | ||
326 | cmp %g1, %o3 | ||
327 | bne,pn %xcc, 1b | ||
328 | nop | ||
329 | 2: retl | ||
330 | nop | ||
331 | .size tsb_flush, .-tsb_flush | ||
332 | |||
333 | /* Reload MMU related context switch state at | ||
334 | * schedule() time. | ||
335 | * | ||
336 | * %o0: page table physical address | ||
337 | * %o1: TSB base config pointer | ||
338 | * %o2: TSB huge config pointer, or NULL if none | ||
339 | * %o3: Hypervisor TSB descriptor physical address | ||
340 | * | ||
341 | * We have to run this whole thing with interrupts | ||
342 | * disabled so that the current cpu doesn't change | ||
343 | * due to preemption. | ||
344 | */ | ||
345 | .align 32 | ||
346 | .globl __tsb_context_switch | ||
347 | .type __tsb_context_switch,#function | ||
348 | __tsb_context_switch: | ||
349 | rdpr %pstate, %g1 | ||
350 | wrpr %g1, PSTATE_IE, %pstate | ||
351 | |||
352 | TRAP_LOAD_TRAP_BLOCK(%g2, %g3) | ||
353 | |||
354 | stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR] | ||
355 | |||
356 | ldx [%o1 + TSB_CONFIG_REG_VAL], %o0 | ||
357 | brz,pt %o2, 1f | ||
358 | mov -1, %g3 | ||
359 | |||
360 | ldx [%o2 + TSB_CONFIG_REG_VAL], %g3 | ||
361 | |||
362 | 1: stx %g3, [%g2 + TRAP_PER_CPU_TSB_HUGE] | ||
363 | |||
364 | sethi %hi(tlb_type), %g2 | ||
365 | lduw [%g2 + %lo(tlb_type)], %g2 | ||
366 | cmp %g2, 3 | ||
367 | bne,pt %icc, 50f | ||
368 | nop | ||
369 | |||
370 | /* Hypervisor TSB switch. */ | ||
371 | mov SCRATCHPAD_UTSBREG1, %o5 | ||
372 | stxa %o0, [%o5] ASI_SCRATCHPAD | ||
373 | mov SCRATCHPAD_UTSBREG2, %o5 | ||
374 | stxa %g3, [%o5] ASI_SCRATCHPAD | ||
375 | |||
376 | mov 2, %o0 | ||
377 | cmp %g3, -1 | ||
378 | move %xcc, 1, %o0 | ||
379 | |||
380 | mov HV_FAST_MMU_TSB_CTXNON0, %o5 | ||
381 | mov %o3, %o1 | ||
382 | ta HV_FAST_TRAP | ||
383 | |||
384 | /* Finish up. */ | ||
385 | ba,pt %xcc, 9f | ||
386 | nop | ||
387 | |||
388 | /* SUN4U TSB switch. */ | ||
389 | 50: mov TSB_REG, %o5 | ||
390 | stxa %o0, [%o5] ASI_DMMU | ||
391 | membar #Sync | ||
392 | stxa %o0, [%o5] ASI_IMMU | ||
393 | membar #Sync | ||
394 | |||
395 | 2: ldx [%o1 + TSB_CONFIG_MAP_VADDR], %o4 | ||
396 | brz %o4, 9f | ||
397 | ldx [%o1 + TSB_CONFIG_MAP_PTE], %o5 | ||
398 | |||
399 | sethi %hi(sparc64_highest_unlocked_tlb_ent), %g2 | ||
400 | mov TLB_TAG_ACCESS, %g3 | ||
401 | lduw [%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2 | ||
402 | stxa %o4, [%g3] ASI_DMMU | ||
403 | membar #Sync | ||
404 | sllx %g2, 3, %g2 | ||
405 | stxa %o5, [%g2] ASI_DTLB_DATA_ACCESS | ||
406 | membar #Sync | ||
407 | |||
408 | brz,pt %o2, 9f | ||
409 | nop | ||
410 | |||
411 | ldx [%o2 + TSB_CONFIG_MAP_VADDR], %o4 | ||
412 | ldx [%o2 + TSB_CONFIG_MAP_PTE], %o5 | ||
413 | mov TLB_TAG_ACCESS, %g3 | ||
414 | stxa %o4, [%g3] ASI_DMMU | ||
415 | membar #Sync | ||
416 | sub %g2, (1 << 3), %g2 | ||
417 | stxa %o5, [%g2] ASI_DTLB_DATA_ACCESS | ||
418 | membar #Sync | ||
419 | |||
420 | 9: | ||
421 | wrpr %g1, %pstate | ||
422 | |||
423 | retl | ||
424 | nop | ||
425 | .size __tsb_context_switch, .-__tsb_context_switch | ||
426 | |||
427 | #define TSB_PASS_BITS ((1 << TSB_TAG_LOCK_BIT) | \ | ||
428 | (1 << TSB_TAG_INVALID_BIT)) | ||
429 | |||
430 | .align 32 | ||
431 | .globl copy_tsb | ||
432 | .type copy_tsb,#function | ||
433 | copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size | ||
434 | * %o2=new_tsb_base, %o3=new_tsb_size | ||
435 | */ | ||
436 | sethi %uhi(TSB_PASS_BITS), %g7 | ||
437 | srlx %o3, 4, %o3 | ||
438 | add %o0, %o1, %g1 /* end of old tsb */ | ||
439 | sllx %g7, 32, %g7 | ||
440 | sub %o3, 1, %o3 /* %o3 == new tsb hash mask */ | ||
441 | |||
442 | 661: prefetcha [%o0] ASI_N, #one_read | ||
443 | .section .tsb_phys_patch, "ax" | ||
444 | .word 661b | ||
445 | prefetcha [%o0] ASI_PHYS_USE_EC, #one_read | ||
446 | .previous | ||
447 | |||
448 | 90: andcc %o0, (64 - 1), %g0 | ||
449 | bne 1f | ||
450 | add %o0, 64, %o5 | ||
451 | |||
452 | 661: prefetcha [%o5] ASI_N, #one_read | ||
453 | .section .tsb_phys_patch, "ax" | ||
454 | .word 661b | ||
455 | prefetcha [%o5] ASI_PHYS_USE_EC, #one_read | ||
456 | .previous | ||
457 | |||
458 | 1: TSB_LOAD_QUAD(%o0, %g2) /* %g2/%g3 == TSB entry */ | ||
459 | andcc %g2, %g7, %g0 /* LOCK or INVALID set? */ | ||
460 | bne,pn %xcc, 80f /* Skip it */ | ||
461 | sllx %g2, 22, %o4 /* TAG --> VADDR */ | ||
462 | |||
463 | /* This can definitely be computed faster... */ | ||
464 | srlx %o0, 4, %o5 /* Build index */ | ||
465 | and %o5, 511, %o5 /* Mask index */ | ||
466 | sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */ | ||
467 | or %o4, %o5, %o4 /* Full VADDR. */ | ||
468 | srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */ | ||
469 | and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */ | ||
470 | sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */ | ||
471 | TSB_STORE(%o2 + %o4, %g2) /* Store TAG */ | ||
472 | add %o4, 0x8, %o4 /* Advance to TTE */ | ||
473 | TSB_STORE(%o2 + %o4, %g3) /* Store TTE */ | ||
474 | |||
475 | 80: add %o0, 16, %o0 | ||
476 | cmp %o0, %g1 | ||
477 | bne,pt %xcc, 90b | ||
478 | nop | ||
479 | |||
480 | retl | ||
481 | nop | ||
482 | .size copy_tsb, .-copy_tsb | ||
483 | |||
484 | /* Set the invalid bit in all TSB entries. */ | ||
485 | .align 32 | ||
486 | .globl tsb_init | ||
487 | .type tsb_init,#function | ||
488 | tsb_init: /* %o0 = TSB vaddr, %o1 = size in bytes */ | ||
489 | prefetch [%o0 + 0x000], #n_writes | ||
490 | mov 1, %g1 | ||
491 | prefetch [%o0 + 0x040], #n_writes | ||
492 | sllx %g1, TSB_TAG_INVALID_BIT, %g1 | ||
493 | prefetch [%o0 + 0x080], #n_writes | ||
494 | 1: prefetch [%o0 + 0x0c0], #n_writes | ||
495 | stx %g1, [%o0 + 0x00] | ||
496 | stx %g1, [%o0 + 0x10] | ||
497 | stx %g1, [%o0 + 0x20] | ||
498 | stx %g1, [%o0 + 0x30] | ||
499 | prefetch [%o0 + 0x100], #n_writes | ||
500 | stx %g1, [%o0 + 0x40] | ||
501 | stx %g1, [%o0 + 0x50] | ||
502 | stx %g1, [%o0 + 0x60] | ||
503 | stx %g1, [%o0 + 0x70] | ||
504 | prefetch [%o0 + 0x140], #n_writes | ||
505 | stx %g1, [%o0 + 0x80] | ||
506 | stx %g1, [%o0 + 0x90] | ||
507 | stx %g1, [%o0 + 0xa0] | ||
508 | stx %g1, [%o0 + 0xb0] | ||
509 | prefetch [%o0 + 0x180], #n_writes | ||
510 | stx %g1, [%o0 + 0xc0] | ||
511 | stx %g1, [%o0 + 0xd0] | ||
512 | stx %g1, [%o0 + 0xe0] | ||
513 | stx %g1, [%o0 + 0xf0] | ||
514 | subcc %o1, 0x100, %o1 | ||
515 | bne,pt %xcc, 1b | ||
516 | add %o0, 0x100, %o0 | ||
517 | retl | ||
518 | nop | ||
519 | nop | ||
520 | nop | ||
521 | .size tsb_init, .-tsb_init | ||
522 | |||
523 | .globl NGtsb_init | ||
524 | .type NGtsb_init,#function | ||
525 | NGtsb_init: | ||
526 | rd %asi, %g2 | ||
527 | mov 1, %g1 | ||
528 | wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi | ||
529 | sllx %g1, TSB_TAG_INVALID_BIT, %g1 | ||
530 | 1: stxa %g1, [%o0 + 0x00] %asi | ||
531 | stxa %g1, [%o0 + 0x10] %asi | ||
532 | stxa %g1, [%o0 + 0x20] %asi | ||
533 | stxa %g1, [%o0 + 0x30] %asi | ||
534 | stxa %g1, [%o0 + 0x40] %asi | ||
535 | stxa %g1, [%o0 + 0x50] %asi | ||
536 | stxa %g1, [%o0 + 0x60] %asi | ||
537 | stxa %g1, [%o0 + 0x70] %asi | ||
538 | stxa %g1, [%o0 + 0x80] %asi | ||
539 | stxa %g1, [%o0 + 0x90] %asi | ||
540 | stxa %g1, [%o0 + 0xa0] %asi | ||
541 | stxa %g1, [%o0 + 0xb0] %asi | ||
542 | stxa %g1, [%o0 + 0xc0] %asi | ||
543 | stxa %g1, [%o0 + 0xd0] %asi | ||
544 | stxa %g1, [%o0 + 0xe0] %asi | ||
545 | stxa %g1, [%o0 + 0xf0] %asi | ||
546 | subcc %o1, 0x100, %o1 | ||
547 | bne,pt %xcc, 1b | ||
548 | add %o0, 0x100, %o0 | ||
549 | membar #Sync | ||
550 | retl | ||
551 | wr %g2, 0x0, %asi | ||
552 | .size NGtsb_init, .-NGtsb_init | ||
diff --git a/arch/sparc/kernel/ttable.S b/arch/sparc/kernel/ttable.S new file mode 100644 index 000000000000..ea925503b42e --- /dev/null +++ b/arch/sparc/kernel/ttable.S | |||
@@ -0,0 +1,266 @@ | |||
1 | /* ttable.S: Sparc V9 Trap Table(s) with SpitFire/Cheetah/SUN4V extensions. | ||
2 | * | ||
3 | * Copyright (C) 1996, 2001, 2006 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | |||
7 | .globl sparc64_ttable_tl0, sparc64_ttable_tl1 | ||
8 | .globl tl0_icpe, tl1_icpe | ||
9 | .globl tl0_dcpe, tl1_dcpe | ||
10 | .globl tl0_fecc, tl1_fecc | ||
11 | .globl tl0_cee, tl1_cee | ||
12 | .globl tl0_iae, tl1_iae | ||
13 | .globl tl0_dae, tl1_dae | ||
14 | |||
15 | sparc64_ttable_tl0: | ||
16 | tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3) | ||
17 | tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7) | ||
18 | tl0_iax: membar #Sync | ||
19 | TRAP_NOSAVE_7INSNS(__spitfire_insn_access_exception) | ||
20 | tl0_itsb_4v: SUN4V_ITSB_MISS | ||
21 | tl0_iae: membar #Sync | ||
22 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) | ||
23 | tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf) | ||
24 | tl0_ill: membar #Sync | ||
25 | TRAP_7INSNS(do_illegal_instruction) | ||
26 | tl0_privop: TRAP(do_privop) | ||
27 | tl0_resv012: BTRAP(0x12) BTRAP(0x13) BTRAP(0x14) BTRAP(0x15) BTRAP(0x16) BTRAP(0x17) | ||
28 | tl0_resv018: BTRAP(0x18) BTRAP(0x19) BTRAP(0x1a) BTRAP(0x1b) BTRAP(0x1c) BTRAP(0x1d) | ||
29 | tl0_resv01e: BTRAP(0x1e) BTRAP(0x1f) | ||
30 | tl0_fpdis: TRAP_NOSAVE(do_fpdis) | ||
31 | tl0_fpieee: TRAP_SAVEFPU(do_fpieee) | ||
32 | tl0_fpother: TRAP_NOSAVE(do_fpother_check_fitos) | ||
33 | tl0_tof: TRAP(do_tof) | ||
34 | tl0_cwin: CLEAN_WINDOW | ||
35 | tl0_div0: TRAP(do_div0) | ||
36 | tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e) | ||
37 | tl0_resv02f: BTRAP(0x2f) | ||
38 | tl0_dax: TRAP_NOSAVE(__spitfire_data_access_exception) | ||
39 | tl0_dtsb_4v: SUN4V_DTSB_MISS | ||
40 | tl0_dae: membar #Sync | ||
41 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) | ||
42 | tl0_resv033: BTRAP(0x33) | ||
43 | tl0_mna: TRAP_NOSAVE(do_mna) | ||
44 | tl0_lddfmna: TRAP_NOSAVE(do_lddfmna) | ||
45 | tl0_stdfmna: TRAP_NOSAVE(do_stdfmna) | ||
46 | tl0_privact: TRAP_NOSAVE(__do_privact) | ||
47 | tl0_resv038: BTRAP(0x38) BTRAP(0x39) BTRAP(0x3a) BTRAP(0x3b) BTRAP(0x3c) BTRAP(0x3d) | ||
48 | tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40) | ||
49 | #ifdef CONFIG_SMP | ||
50 | tl0_irq1: TRAP_IRQ(smp_call_function_client, 1) | ||
51 | tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2) | ||
52 | tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3) | ||
53 | tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4) | ||
54 | #else | ||
55 | tl0_irq1: BTRAP(0x41) | ||
56 | tl0_irq2: BTRAP(0x42) | ||
57 | tl0_irq3: BTRAP(0x43) | ||
58 | tl0_irq4: BTRAP(0x44) | ||
59 | #endif | ||
60 | tl0_irq5: TRAP_IRQ(handler_irq, 5) | ||
61 | #ifdef CONFIG_SMP | ||
62 | tl0_irq6: TRAP_IRQ(smp_call_function_single_client, 6) | ||
63 | #else | ||
64 | tl0_irq6: BTRAP(0x46) | ||
65 | #endif | ||
66 | tl0_irq7: BTRAP(0x47) BTRAP(0x48) BTRAP(0x49) | ||
67 | tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d) | ||
68 | tl0_irq14: TRAP_IRQ(timer_interrupt, 14) | ||
69 | tl0_irq15: TRAP_NMI_IRQ(perfctr_irq, 15) | ||
70 | tl0_resv050: BTRAP(0x50) BTRAP(0x51) BTRAP(0x52) BTRAP(0x53) BTRAP(0x54) BTRAP(0x55) | ||
71 | tl0_resv056: BTRAP(0x56) BTRAP(0x57) BTRAP(0x58) BTRAP(0x59) BTRAP(0x5a) BTRAP(0x5b) | ||
72 | tl0_resv05c: BTRAP(0x5c) BTRAP(0x5d) BTRAP(0x5e) BTRAP(0x5f) | ||
73 | tl0_ivec: TRAP_IVEC | ||
74 | tl0_paw: TRAP(do_paw) | ||
75 | tl0_vaw: TRAP(do_vaw) | ||
76 | tl0_cee: membar #Sync | ||
77 | TRAP_NOSAVE_7INSNS(__spitfire_cee_trap) | ||
78 | tl0_iamiss: | ||
79 | #include "itlb_miss.S" | ||
80 | tl0_damiss: | ||
81 | #include "dtlb_miss.S" | ||
82 | tl0_daprot: | ||
83 | #include "dtlb_prot.S" | ||
84 | tl0_fecc: BTRAP(0x70) /* Fast-ECC on Cheetah */ | ||
85 | tl0_dcpe: BTRAP(0x71) /* D-cache Parity Error on Cheetah+ */ | ||
86 | tl0_icpe: BTRAP(0x72) /* I-cache Parity Error on Cheetah+ */ | ||
87 | tl0_resv073: BTRAP(0x73) BTRAP(0x74) BTRAP(0x75) | ||
88 | tl0_resv076: BTRAP(0x76) BTRAP(0x77) BTRAP(0x78) BTRAP(0x79) BTRAP(0x7a) BTRAP(0x7b) | ||
89 | tl0_cpu_mondo: TRAP_NOSAVE(sun4v_cpu_mondo) | ||
90 | tl0_dev_mondo: TRAP_NOSAVE(sun4v_dev_mondo) | ||
91 | tl0_res_mondo: TRAP_NOSAVE(sun4v_res_mondo) | ||
92 | tl0_nres_mondo: TRAP_NOSAVE(sun4v_nonres_mondo) | ||
93 | tl0_s0n: SPILL_0_NORMAL | ||
94 | tl0_s1n: SPILL_1_NORMAL | ||
95 | tl0_s2n: SPILL_2_NORMAL | ||
96 | tl0_s3n: SPILL_0_NORMAL_ETRAP | ||
97 | tl0_s4n: SPILL_1_GENERIC_ETRAP | ||
98 | tl0_s5n: SPILL_1_GENERIC_ETRAP_FIXUP | ||
99 | tl0_s6n: SPILL_2_GENERIC_ETRAP | ||
100 | tl0_s7n: SPILL_2_GENERIC_ETRAP_FIXUP | ||
101 | tl0_s0o: SPILL_0_OTHER | ||
102 | tl0_s1o: SPILL_1_OTHER | ||
103 | tl0_s2o: SPILL_2_OTHER | ||
104 | tl0_s3o: SPILL_3_OTHER | ||
105 | tl0_s4o: SPILL_4_OTHER | ||
106 | tl0_s5o: SPILL_5_OTHER | ||
107 | tl0_s6o: SPILL_6_OTHER | ||
108 | tl0_s7o: SPILL_7_OTHER | ||
109 | tl0_f0n: FILL_0_NORMAL | ||
110 | tl0_f1n: FILL_1_NORMAL | ||
111 | tl0_f2n: FILL_2_NORMAL | ||
112 | tl0_f3n: FILL_3_NORMAL | ||
113 | tl0_f4n: FILL_4_NORMAL | ||
114 | tl0_f5n: FILL_0_NORMAL_RTRAP | ||
115 | tl0_f6n: FILL_1_GENERIC_RTRAP | ||
116 | tl0_f7n: FILL_2_GENERIC_RTRAP | ||
117 | tl0_f0o: FILL_0_OTHER | ||
118 | tl0_f1o: FILL_1_OTHER | ||
119 | tl0_f2o: FILL_2_OTHER | ||
120 | tl0_f3o: FILL_3_OTHER | ||
121 | tl0_f4o: FILL_4_OTHER | ||
122 | tl0_f5o: FILL_5_OTHER | ||
123 | tl0_f6o: FILL_6_OTHER | ||
124 | tl0_f7o: FILL_7_OTHER | ||
125 | tl0_resv100: BTRAP(0x100) | ||
126 | tl0_bkpt: BREAKPOINT_TRAP | ||
127 | tl0_divz: TRAP(do_div0) | ||
128 | tl0_flushw: FLUSH_WINDOW_TRAP | ||
129 | tl0_resv104: BTRAP(0x104) BTRAP(0x105) BTRAP(0x106) BTRAP(0x107) BTRAP(0x108) | ||
130 | tl0_resv109: BTRAP(0x109) BTRAP(0x10a) BTRAP(0x10b) BTRAP(0x10c) BTRAP(0x10d) | ||
131 | tl0_resv10e: BTRAP(0x10e) BTRAP(0x10f) | ||
132 | tl0_linux32: LINUX_32BIT_SYSCALL_TRAP | ||
133 | tl0_oldlinux64: LINUX_64BIT_SYSCALL_TRAP | ||
134 | tl0_resv112: TRAP_UTRAP(UT_TRAP_INSTRUCTION_18,0x112) TRAP_UTRAP(UT_TRAP_INSTRUCTION_19,0x113) | ||
135 | tl0_resv114: TRAP_UTRAP(UT_TRAP_INSTRUCTION_20,0x114) TRAP_UTRAP(UT_TRAP_INSTRUCTION_21,0x115) | ||
136 | tl0_resv116: TRAP_UTRAP(UT_TRAP_INSTRUCTION_22,0x116) TRAP_UTRAP(UT_TRAP_INSTRUCTION_23,0x117) | ||
137 | tl0_resv118: TRAP_UTRAP(UT_TRAP_INSTRUCTION_24,0x118) TRAP_UTRAP(UT_TRAP_INSTRUCTION_25,0x119) | ||
138 | tl0_resv11a: TRAP_UTRAP(UT_TRAP_INSTRUCTION_26,0x11a) TRAP_UTRAP(UT_TRAP_INSTRUCTION_27,0x11b) | ||
139 | tl0_resv11c: TRAP_UTRAP(UT_TRAP_INSTRUCTION_28,0x11c) TRAP_UTRAP(UT_TRAP_INSTRUCTION_29,0x11d) | ||
140 | tl0_resv11e: TRAP_UTRAP(UT_TRAP_INSTRUCTION_30,0x11e) TRAP_UTRAP(UT_TRAP_INSTRUCTION_31,0x11f) | ||
141 | tl0_getcc: GETCC_TRAP | ||
142 | tl0_setcc: SETCC_TRAP | ||
143 | tl0_getpsr: TRAP(do_getpsr) | ||
144 | tl0_resv123: BTRAP(0x123) BTRAP(0x124) BTRAP(0x125) BTRAP(0x126) BTRAP(0x127) | ||
145 | tl0_resv128: BTRAP(0x128) BTRAP(0x129) BTRAP(0x12a) BTRAP(0x12b) BTRAP(0x12c) | ||
146 | tl0_resv12d: BTRAP(0x12d) BTRAP(0x12e) BTRAP(0x12f) BTRAP(0x130) BTRAP(0x131) | ||
147 | tl0_resv132: BTRAP(0x132) BTRAP(0x133) BTRAP(0x134) BTRAP(0x135) BTRAP(0x136) | ||
148 | tl0_resv137: BTRAP(0x137) BTRAP(0x138) BTRAP(0x139) BTRAP(0x13a) BTRAP(0x13b) | ||
149 | tl0_resv13c: BTRAP(0x13c) BTRAP(0x13d) BTRAP(0x13e) BTRAP(0x13f) BTRAP(0x140) | ||
150 | tl0_resv141: BTRAP(0x141) BTRAP(0x142) BTRAP(0x143) BTRAP(0x144) BTRAP(0x145) | ||
151 | tl0_resv146: BTRAP(0x146) BTRAP(0x147) BTRAP(0x148) BTRAP(0x149) BTRAP(0x14a) | ||
152 | tl0_resv14b: BTRAP(0x14b) BTRAP(0x14c) BTRAP(0x14d) BTRAP(0x14e) BTRAP(0x14f) | ||
153 | tl0_resv150: BTRAP(0x150) BTRAP(0x151) BTRAP(0x152) BTRAP(0x153) BTRAP(0x154) | ||
154 | tl0_resv155: BTRAP(0x155) BTRAP(0x156) BTRAP(0x157) BTRAP(0x158) BTRAP(0x159) | ||
155 | tl0_resv15a: BTRAP(0x15a) BTRAP(0x15b) BTRAP(0x15c) BTRAP(0x15d) BTRAP(0x15e) | ||
156 | tl0_resv15f: BTRAP(0x15f) BTRAP(0x160) BTRAP(0x161) BTRAP(0x162) BTRAP(0x163) | ||
157 | tl0_resv164: BTRAP(0x164) BTRAP(0x165) BTRAP(0x166) BTRAP(0x167) BTRAP(0x168) | ||
158 | tl0_resv169: BTRAP(0x169) BTRAP(0x16a) BTRAP(0x16b) BTRAP(0x16c) | ||
159 | tl0_linux64: LINUX_64BIT_SYSCALL_TRAP | ||
160 | tl0_gsctx: TRAP(sparc64_get_context) TRAP(sparc64_set_context) | ||
161 | tl0_resv170: KPROBES_TRAP(0x170) KPROBES_TRAP(0x171) KGDB_TRAP(0x172) | ||
162 | tl0_resv173: BTRAP(0x173) BTRAP(0x174) BTRAP(0x175) BTRAP(0x176) BTRAP(0x177) | ||
163 | tl0_resv178: BTRAP(0x178) BTRAP(0x179) BTRAP(0x17a) BTRAP(0x17b) BTRAP(0x17c) | ||
164 | tl0_resv17d: BTRAP(0x17d) BTRAP(0x17e) BTRAP(0x17f) | ||
165 | #define BTRAPS(x) BTRAP(x) BTRAP(x+1) BTRAP(x+2) BTRAP(x+3) BTRAP(x+4) BTRAP(x+5) BTRAP(x+6) BTRAP(x+7) | ||
166 | tl0_resv180: BTRAPS(0x180) BTRAPS(0x188) | ||
167 | tl0_resv190: BTRAPS(0x190) BTRAPS(0x198) | ||
168 | tl0_resv1a0: BTRAPS(0x1a0) BTRAPS(0x1a8) | ||
169 | tl0_resv1b0: BTRAPS(0x1b0) BTRAPS(0x1b8) | ||
170 | tl0_resv1c0: BTRAPS(0x1c0) BTRAPS(0x1c8) | ||
171 | tl0_resv1d0: BTRAPS(0x1d0) BTRAPS(0x1d8) | ||
172 | tl0_resv1e0: BTRAPS(0x1e0) BTRAPS(0x1e8) | ||
173 | tl0_resv1f0: BTRAPS(0x1f0) BTRAPS(0x1f8) | ||
174 | |||
175 | sparc64_ttable_tl1: | ||
176 | tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3) | ||
177 | tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7) | ||
178 | tl1_iax: TRAP_NOSAVE(__spitfire_insn_access_exception_tl1) | ||
179 | tl1_itsb_4v: SUN4V_ITSB_MISS | ||
180 | tl1_iae: membar #Sync | ||
181 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) | ||
182 | tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf) | ||
183 | tl1_ill: TRAPTL1(do_ill_tl1) | ||
184 | tl1_privop: BTRAPTL1(0x11) | ||
185 | tl1_resv012: BTRAPTL1(0x12) BTRAPTL1(0x13) BTRAPTL1(0x14) BTRAPTL1(0x15) | ||
186 | tl1_resv016: BTRAPTL1(0x16) BTRAPTL1(0x17) BTRAPTL1(0x18) BTRAPTL1(0x19) | ||
187 | tl1_resv01a: BTRAPTL1(0x1a) BTRAPTL1(0x1b) BTRAPTL1(0x1c) BTRAPTL1(0x1d) | ||
188 | tl1_resv01e: BTRAPTL1(0x1e) BTRAPTL1(0x1f) | ||
189 | tl1_fpdis: TRAP_NOSAVE(do_fpdis) | ||
190 | tl1_fpieee: TRAPTL1(do_fpieee_tl1) | ||
191 | tl1_fpother: TRAPTL1(do_fpother_tl1) | ||
192 | tl1_tof: TRAPTL1(do_tof_tl1) | ||
193 | tl1_cwin: CLEAN_WINDOW | ||
194 | tl1_div0: TRAPTL1(do_div0_tl1) | ||
195 | tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c) | ||
196 | tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f) | ||
197 | tl1_dax: TRAP_NOSAVE(__spitfire_data_access_exception_tl1) | ||
198 | tl1_dtsb_4v: SUN4V_DTSB_MISS | ||
199 | tl1_dae: membar #Sync | ||
200 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) | ||
201 | tl1_resv033: BTRAPTL1(0x33) | ||
202 | tl1_mna: TRAP_NOSAVE(do_mna) | ||
203 | tl1_lddfmna: TRAPTL1(do_lddfmna_tl1) | ||
204 | tl1_stdfmna: TRAPTL1(do_stdfmna_tl1) | ||
205 | tl1_privact: BTRAPTL1(0x37) | ||
206 | tl1_resv038: BTRAPTL1(0x38) BTRAPTL1(0x39) BTRAPTL1(0x3a) BTRAPTL1(0x3b) | ||
207 | tl1_resv03c: BTRAPTL1(0x3c) BTRAPTL1(0x3d) BTRAPTL1(0x3e) BTRAPTL1(0x3f) | ||
208 | tl1_resv040: BTRAPTL1(0x40) | ||
209 | tl1_irq1: TRAP_IRQ(do_irq_tl1, 1) TRAP_IRQ(do_irq_tl1, 2) TRAP_IRQ(do_irq_tl1, 3) | ||
210 | tl1_irq4: TRAP_IRQ(do_irq_tl1, 4) TRAP_IRQ(do_irq_tl1, 5) TRAP_IRQ(do_irq_tl1, 6) | ||
211 | tl1_irq7: TRAP_IRQ(do_irq_tl1, 7) TRAP_IRQ(do_irq_tl1, 8) TRAP_IRQ(do_irq_tl1, 9) | ||
212 | tl1_irq10: TRAP_IRQ(do_irq_tl1, 10) TRAP_IRQ(do_irq_tl1, 11) | ||
213 | tl1_irq12: TRAP_IRQ(do_irq_tl1, 12) TRAP_IRQ(do_irq_tl1, 13) | ||
214 | tl1_irq14: TRAP_IRQ(do_irq_tl1, 14) TRAP_IRQ(do_irq_tl1, 15) | ||
215 | tl1_resv050: BTRAPTL1(0x50) BTRAPTL1(0x51) BTRAPTL1(0x52) BTRAPTL1(0x53) | ||
216 | tl1_resv054: BTRAPTL1(0x54) BTRAPTL1(0x55) BTRAPTL1(0x56) BTRAPTL1(0x57) | ||
217 | tl1_resv058: BTRAPTL1(0x58) BTRAPTL1(0x59) BTRAPTL1(0x5a) BTRAPTL1(0x5b) | ||
218 | tl1_resv05c: BTRAPTL1(0x5c) BTRAPTL1(0x5d) BTRAPTL1(0x5e) BTRAPTL1(0x5f) | ||
219 | tl1_ivec: TRAP_IVEC | ||
220 | tl1_paw: TRAPTL1(do_paw_tl1) | ||
221 | tl1_vaw: TRAPTL1(do_vaw_tl1) | ||
222 | tl1_cee: BTRAPTL1(0x63) | ||
223 | tl1_iamiss: BTRAPTL1(0x64) BTRAPTL1(0x65) BTRAPTL1(0x66) BTRAPTL1(0x67) | ||
224 | tl1_damiss: | ||
225 | #include "dtlb_miss.S" | ||
226 | tl1_daprot: | ||
227 | #include "dtlb_prot.S" | ||
228 | tl1_fecc: BTRAPTL1(0x70) /* Fast-ECC on Cheetah */ | ||
229 | tl1_dcpe: BTRAPTL1(0x71) /* D-cache Parity Error on Cheetah+ */ | ||
230 | tl1_icpe: BTRAPTL1(0x72) /* I-cache Parity Error on Cheetah+ */ | ||
231 | tl1_resv073: BTRAPTL1(0x73) | ||
232 | tl1_resv074: BTRAPTL1(0x74) BTRAPTL1(0x75) BTRAPTL1(0x76) BTRAPTL1(0x77) | ||
233 | tl1_resv078: BTRAPTL1(0x78) BTRAPTL1(0x79) BTRAPTL1(0x7a) BTRAPTL1(0x7b) | ||
234 | tl1_resv07c: BTRAPTL1(0x7c) BTRAPTL1(0x7d) BTRAPTL1(0x7e) BTRAPTL1(0x7f) | ||
235 | tl1_s0n: SPILL_0_NORMAL | ||
236 | tl1_s1n: SPILL_1_NORMAL | ||
237 | tl1_s2n: SPILL_2_NORMAL | ||
238 | tl1_s3n: SPILL_3_NORMAL | ||
239 | tl1_s4n: SPILL_4_NORMAL | ||
240 | tl1_s5n: SPILL_5_NORMAL | ||
241 | tl1_s6n: SPILL_6_NORMAL | ||
242 | tl1_s7n: SPILL_7_NORMAL | ||
243 | tl1_s0o: SPILL_0_OTHER | ||
244 | tl1_s1o: SPILL_1_OTHER | ||
245 | tl1_s2o: SPILL_2_OTHER | ||
246 | tl1_s3o: SPILL_3_OTHER | ||
247 | tl1_s4o: SPILL_4_OTHER | ||
248 | tl1_s5o: SPILL_5_OTHER | ||
249 | tl1_s6o: SPILL_6_OTHER | ||
250 | tl1_s7o: SPILL_7_OTHER | ||
251 | tl1_f0n: FILL_0_NORMAL | ||
252 | tl1_f1n: FILL_1_NORMAL | ||
253 | tl1_f2n: FILL_2_NORMAL | ||
254 | tl1_f3n: FILL_3_NORMAL | ||
255 | tl1_f4n: FILL_4_NORMAL | ||
256 | tl1_f5n: FILL_5_NORMAL | ||
257 | tl1_f6n: FILL_6_NORMAL | ||
258 | tl1_f7n: FILL_7_NORMAL | ||
259 | tl1_f0o: FILL_0_OTHER | ||
260 | tl1_f1o: FILL_1_OTHER | ||
261 | tl1_f2o: FILL_2_OTHER | ||
262 | tl1_f3o: FILL_3_OTHER | ||
263 | tl1_f4o: FILL_4_OTHER | ||
264 | tl1_f5o: FILL_5_OTHER | ||
265 | tl1_f6o: FILL_6_OTHER | ||
266 | tl1_f7o: FILL_7_OTHER | ||
diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S new file mode 100644 index 000000000000..be183fe41443 --- /dev/null +++ b/arch/sparc/kernel/una_asm_64.S | |||
@@ -0,0 +1,146 @@ | |||
1 | /* una_asm.S: Kernel unaligned trap assembler helpers. | ||
2 | * | ||
3 | * Copyright (C) 1996,2005 David S. Miller (davem@davemloft.net) | ||
4 | * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
5 | */ | ||
6 | |||
7 | .text | ||
8 | |||
9 | .globl __do_int_store | ||
10 | __do_int_store: | ||
11 | rd %asi, %o4 | ||
12 | wr %o3, 0, %asi | ||
13 | mov %o2, %g3 | ||
14 | cmp %o1, 2 | ||
15 | be,pn %icc, 2f | ||
16 | cmp %o1, 4 | ||
17 | be,pt %icc, 1f | ||
18 | srlx %g3, 24, %g2 | ||
19 | srlx %g3, 56, %g1 | ||
20 | srlx %g3, 48, %g7 | ||
21 | 4: stba %g1, [%o0] %asi | ||
22 | srlx %g3, 40, %g1 | ||
23 | 5: stba %g7, [%o0 + 1] %asi | ||
24 | srlx %g3, 32, %g7 | ||
25 | 6: stba %g1, [%o0 + 2] %asi | ||
26 | 7: stba %g7, [%o0 + 3] %asi | ||
27 | srlx %g3, 16, %g1 | ||
28 | 8: stba %g2, [%o0 + 4] %asi | ||
29 | srlx %g3, 8, %g7 | ||
30 | 9: stba %g1, [%o0 + 5] %asi | ||
31 | 10: stba %g7, [%o0 + 6] %asi | ||
32 | ba,pt %xcc, 0f | ||
33 | 11: stba %g3, [%o0 + 7] %asi | ||
34 | 1: srl %g3, 16, %g7 | ||
35 | 12: stba %g2, [%o0] %asi | ||
36 | srl %g3, 8, %g2 | ||
37 | 13: stba %g7, [%o0 + 1] %asi | ||
38 | 14: stba %g2, [%o0 + 2] %asi | ||
39 | ba,pt %xcc, 0f | ||
40 | 15: stba %g3, [%o0 + 3] %asi | ||
41 | 2: srl %g3, 8, %g2 | ||
42 | 16: stba %g2, [%o0] %asi | ||
43 | 17: stba %g3, [%o0 + 1] %asi | ||
44 | 0: | ||
45 | wr %o4, 0x0, %asi | ||
46 | retl | ||
47 | mov 0, %o0 | ||
48 | .size __do_int_store, .-__do_int_store | ||
49 | |||
50 | .section __ex_table,"a" | ||
51 | .word 4b, __retl_efault | ||
52 | .word 5b, __retl_efault | ||
53 | .word 6b, __retl_efault | ||
54 | .word 7b, __retl_efault | ||
55 | .word 8b, __retl_efault | ||
56 | .word 9b, __retl_efault | ||
57 | .word 10b, __retl_efault | ||
58 | .word 11b, __retl_efault | ||
59 | .word 12b, __retl_efault | ||
60 | .word 13b, __retl_efault | ||
61 | .word 14b, __retl_efault | ||
62 | .word 15b, __retl_efault | ||
63 | .word 16b, __retl_efault | ||
64 | .word 17b, __retl_efault | ||
65 | .previous | ||
66 | |||
67 | .globl do_int_load | ||
68 | do_int_load: | ||
69 | rd %asi, %o5 | ||
70 | wr %o4, 0, %asi | ||
71 | cmp %o1, 8 | ||
72 | bge,pn %icc, 9f | ||
73 | cmp %o1, 4 | ||
74 | be,pt %icc, 6f | ||
75 | 4: lduba [%o2] %asi, %g2 | ||
76 | 5: lduba [%o2 + 1] %asi, %g3 | ||
77 | sll %g2, 8, %g2 | ||
78 | brz,pt %o3, 3f | ||
79 | add %g2, %g3, %g2 | ||
80 | sllx %g2, 48, %g2 | ||
81 | srax %g2, 48, %g2 | ||
82 | 3: ba,pt %xcc, 0f | ||
83 | stx %g2, [%o0] | ||
84 | 6: lduba [%o2 + 1] %asi, %g3 | ||
85 | sll %g2, 24, %g2 | ||
86 | 7: lduba [%o2 + 2] %asi, %g7 | ||
87 | sll %g3, 16, %g3 | ||
88 | 8: lduba [%o2 + 3] %asi, %g1 | ||
89 | sll %g7, 8, %g7 | ||
90 | or %g2, %g3, %g2 | ||
91 | or %g7, %g1, %g7 | ||
92 | or %g2, %g7, %g2 | ||
93 | brnz,a,pt %o3, 3f | ||
94 | sra %g2, 0, %g2 | ||
95 | 3: ba,pt %xcc, 0f | ||
96 | stx %g2, [%o0] | ||
97 | 9: lduba [%o2] %asi, %g2 | ||
98 | 10: lduba [%o2 + 1] %asi, %g3 | ||
99 | sllx %g2, 56, %g2 | ||
100 | 11: lduba [%o2 + 2] %asi, %g7 | ||
101 | sllx %g3, 48, %g3 | ||
102 | 12: lduba [%o2 + 3] %asi, %g1 | ||
103 | sllx %g7, 40, %g7 | ||
104 | sllx %g1, 32, %g1 | ||
105 | or %g2, %g3, %g2 | ||
106 | or %g7, %g1, %g7 | ||
107 | 13: lduba [%o2 + 4] %asi, %g3 | ||
108 | or %g2, %g7, %g7 | ||
109 | 14: lduba [%o2 + 5] %asi, %g1 | ||
110 | sllx %g3, 24, %g3 | ||
111 | 15: lduba [%o2 + 6] %asi, %g2 | ||
112 | sllx %g1, 16, %g1 | ||
113 | or %g7, %g3, %g7 | ||
114 | 16: lduba [%o2 + 7] %asi, %g3 | ||
115 | sllx %g2, 8, %g2 | ||
116 | or %g7, %g1, %g7 | ||
117 | or %g2, %g3, %g2 | ||
118 | or %g7, %g2, %g7 | ||
119 | cmp %o1, 8 | ||
120 | be,a,pt %icc, 0f | ||
121 | stx %g7, [%o0] | ||
122 | srlx %g7, 32, %g2 | ||
123 | sra %g7, 0, %g7 | ||
124 | stx %g2, [%o0] | ||
125 | stx %g7, [%o0 + 8] | ||
126 | 0: | ||
127 | wr %o5, 0x0, %asi | ||
128 | retl | ||
129 | mov 0, %o0 | ||
130 | .size __do_int_load, .-__do_int_load | ||
131 | |||
132 | .section __ex_table,"a" | ||
133 | .word 4b, __retl_efault | ||
134 | .word 5b, __retl_efault | ||
135 | .word 6b, __retl_efault | ||
136 | .word 7b, __retl_efault | ||
137 | .word 8b, __retl_efault | ||
138 | .word 9b, __retl_efault | ||
139 | .word 10b, __retl_efault | ||
140 | .word 11b, __retl_efault | ||
141 | .word 12b, __retl_efault | ||
142 | .word 13b, __retl_efault | ||
143 | .word 14b, __retl_efault | ||
144 | .word 15b, __retl_efault | ||
145 | .word 16b, __retl_efault | ||
146 | .previous | ||
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c new file mode 100644 index 000000000000..203ddfad9f27 --- /dev/null +++ b/arch/sparc/kernel/unaligned_64.c | |||
@@ -0,0 +1,690 @@ | |||
1 | /* | ||
2 | * unaligned.c: Unaligned load/store trap handling with special | ||
3 | * cases for the kernel to do them more quickly. | ||
4 | * | ||
5 | * Copyright (C) 1996,2008 David S. Miller (davem@davemloft.net) | ||
6 | * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
7 | */ | ||
8 | |||
9 | |||
10 | #include <linux/jiffies.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <asm/asi.h> | ||
16 | #include <asm/ptrace.h> | ||
17 | #include <asm/pstate.h> | ||
18 | #include <asm/processor.h> | ||
19 | #include <asm/system.h> | ||
20 | #include <asm/uaccess.h> | ||
21 | #include <linux/smp.h> | ||
22 | #include <linux/bitops.h> | ||
23 | #include <asm/fpumacro.h> | ||
24 | |||
25 | /* #define DEBUG_MNA */ | ||
26 | |||
27 | enum direction { | ||
28 | load, /* ld, ldd, ldh, ldsh */ | ||
29 | store, /* st, std, sth, stsh */ | ||
30 | both, /* Swap, ldstub, cas, ... */ | ||
31 | fpld, | ||
32 | fpst, | ||
33 | invalid, | ||
34 | }; | ||
35 | |||
36 | #ifdef DEBUG_MNA | ||
37 | static char *dirstrings[] = { | ||
38 | "load", "store", "both", "fpload", "fpstore", "invalid" | ||
39 | }; | ||
40 | #endif | ||
41 | |||
42 | static inline enum direction decode_direction(unsigned int insn) | ||
43 | { | ||
44 | unsigned long tmp = (insn >> 21) & 1; | ||
45 | |||
46 | if (!tmp) | ||
47 | return load; | ||
48 | else { | ||
49 | switch ((insn>>19)&0xf) { | ||
50 | case 15: /* swap* */ | ||
51 | return both; | ||
52 | default: | ||
53 | return store; | ||
54 | } | ||
55 | } | ||
56 | } | ||
57 | |||
58 | /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */ | ||
59 | static inline int decode_access_size(unsigned int insn) | ||
60 | { | ||
61 | unsigned int tmp; | ||
62 | |||
63 | tmp = ((insn >> 19) & 0xf); | ||
64 | if (tmp == 11 || tmp == 14) /* ldx/stx */ | ||
65 | return 8; | ||
66 | tmp &= 3; | ||
67 | if (!tmp) | ||
68 | return 4; | ||
69 | else if (tmp == 3) | ||
70 | return 16; /* ldd/std - Although it is actually 8 */ | ||
71 | else if (tmp == 2) | ||
72 | return 2; | ||
73 | else { | ||
74 | printk("Impossible unaligned trap. insn=%08x\n", insn); | ||
75 | die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs); | ||
76 | |||
77 | /* GCC should never warn that control reaches the end | ||
78 | * of this function without returning a value because | ||
79 | * die_if_kernel() is marked with attribute 'noreturn'. | ||
80 | * Alas, some versions do... | ||
81 | */ | ||
82 | |||
83 | return 0; | ||
84 | } | ||
85 | } | ||
86 | |||
87 | static inline int decode_asi(unsigned int insn, struct pt_regs *regs) | ||
88 | { | ||
89 | if (insn & 0x800000) { | ||
90 | if (insn & 0x2000) | ||
91 | return (unsigned char)(regs->tstate >> 24); /* %asi */ | ||
92 | else | ||
93 | return (unsigned char)(insn >> 5); /* imm_asi */ | ||
94 | } else | ||
95 | return ASI_P; | ||
96 | } | ||
97 | |||
98 | /* 0x400000 = signed, 0 = unsigned */ | ||
99 | static inline int decode_signedness(unsigned int insn) | ||
100 | { | ||
101 | return (insn & 0x400000); | ||
102 | } | ||
103 | |||
104 | static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2, | ||
105 | unsigned int rd, int from_kernel) | ||
106 | { | ||
107 | if (rs2 >= 16 || rs1 >= 16 || rd >= 16) { | ||
108 | if (from_kernel != 0) | ||
109 | __asm__ __volatile__("flushw"); | ||
110 | else | ||
111 | flushw_user(); | ||
112 | } | ||
113 | } | ||
114 | |||
115 | static inline long sign_extend_imm13(long imm) | ||
116 | { | ||
117 | return imm << 51 >> 51; | ||
118 | } | ||
119 | |||
120 | static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) | ||
121 | { | ||
122 | unsigned long value; | ||
123 | |||
124 | if (reg < 16) | ||
125 | return (!reg ? 0 : regs->u_regs[reg]); | ||
126 | if (regs->tstate & TSTATE_PRIV) { | ||
127 | struct reg_window *win; | ||
128 | win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); | ||
129 | value = win->locals[reg - 16]; | ||
130 | } else if (test_thread_flag(TIF_32BIT)) { | ||
131 | struct reg_window32 __user *win32; | ||
132 | win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); | ||
133 | get_user(value, &win32->locals[reg - 16]); | ||
134 | } else { | ||
135 | struct reg_window __user *win; | ||
136 | win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); | ||
137 | get_user(value, &win->locals[reg - 16]); | ||
138 | } | ||
139 | return value; | ||
140 | } | ||
141 | |||
142 | static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) | ||
143 | { | ||
144 | if (reg < 16) | ||
145 | return ®s->u_regs[reg]; | ||
146 | if (regs->tstate & TSTATE_PRIV) { | ||
147 | struct reg_window *win; | ||
148 | win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); | ||
149 | return &win->locals[reg - 16]; | ||
150 | } else if (test_thread_flag(TIF_32BIT)) { | ||
151 | struct reg_window32 *win32; | ||
152 | win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP])); | ||
153 | return (unsigned long *)&win32->locals[reg - 16]; | ||
154 | } else { | ||
155 | struct reg_window *win; | ||
156 | win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); | ||
157 | return &win->locals[reg - 16]; | ||
158 | } | ||
159 | } | ||
160 | |||
161 | unsigned long compute_effective_address(struct pt_regs *regs, | ||
162 | unsigned int insn, unsigned int rd) | ||
163 | { | ||
164 | unsigned int rs1 = (insn >> 14) & 0x1f; | ||
165 | unsigned int rs2 = insn & 0x1f; | ||
166 | int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; | ||
167 | |||
168 | if (insn & 0x2000) { | ||
169 | maybe_flush_windows(rs1, 0, rd, from_kernel); | ||
170 | return (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); | ||
171 | } else { | ||
172 | maybe_flush_windows(rs1, rs2, rd, from_kernel); | ||
173 | return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); | ||
174 | } | ||
175 | } | ||
176 | |||
177 | /* This is just to make gcc think die_if_kernel does return... */ | ||
178 | static void __used unaligned_panic(char *str, struct pt_regs *regs) | ||
179 | { | ||
180 | die_if_kernel(str, regs); | ||
181 | } | ||
182 | |||
183 | extern int do_int_load(unsigned long *dest_reg, int size, | ||
184 | unsigned long *saddr, int is_signed, int asi); | ||
185 | |||
186 | extern int __do_int_store(unsigned long *dst_addr, int size, | ||
187 | unsigned long src_val, int asi); | ||
188 | |||
189 | static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr, | ||
190 | struct pt_regs *regs, int asi, int orig_asi) | ||
191 | { | ||
192 | unsigned long zero = 0; | ||
193 | unsigned long *src_val_p = &zero; | ||
194 | unsigned long src_val; | ||
195 | |||
196 | if (size == 16) { | ||
197 | size = 8; | ||
198 | zero = (((long)(reg_num ? | ||
199 | (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) | | ||
200 | (unsigned)fetch_reg(reg_num + 1, regs); | ||
201 | } else if (reg_num) { | ||
202 | src_val_p = fetch_reg_addr(reg_num, regs); | ||
203 | } | ||
204 | src_val = *src_val_p; | ||
205 | if (unlikely(asi != orig_asi)) { | ||
206 | switch (size) { | ||
207 | case 2: | ||
208 | src_val = swab16(src_val); | ||
209 | break; | ||
210 | case 4: | ||
211 | src_val = swab32(src_val); | ||
212 | break; | ||
213 | case 8: | ||
214 | src_val = swab64(src_val); | ||
215 | break; | ||
216 | case 16: | ||
217 | default: | ||
218 | BUG(); | ||
219 | break; | ||
220 | }; | ||
221 | } | ||
222 | return __do_int_store(dst_addr, size, src_val, asi); | ||
223 | } | ||
224 | |||
225 | static inline void advance(struct pt_regs *regs) | ||
226 | { | ||
227 | regs->tpc = regs->tnpc; | ||
228 | regs->tnpc += 4; | ||
229 | if (test_thread_flag(TIF_32BIT)) { | ||
230 | regs->tpc &= 0xffffffff; | ||
231 | regs->tnpc &= 0xffffffff; | ||
232 | } | ||
233 | } | ||
234 | |||
235 | static inline int floating_point_load_or_store_p(unsigned int insn) | ||
236 | { | ||
237 | return (insn >> 24) & 1; | ||
238 | } | ||
239 | |||
240 | static inline int ok_for_kernel(unsigned int insn) | ||
241 | { | ||
242 | return !floating_point_load_or_store_p(insn); | ||
243 | } | ||
244 | |||
245 | static void kernel_mna_trap_fault(int fixup_tstate_asi) | ||
246 | { | ||
247 | struct pt_regs *regs = current_thread_info()->kern_una_regs; | ||
248 | unsigned int insn = current_thread_info()->kern_una_insn; | ||
249 | const struct exception_table_entry *entry; | ||
250 | |||
251 | entry = search_exception_tables(regs->tpc); | ||
252 | if (!entry) { | ||
253 | unsigned long address; | ||
254 | |||
255 | address = compute_effective_address(regs, insn, | ||
256 | ((insn >> 25) & 0x1f)); | ||
257 | if (address < PAGE_SIZE) { | ||
258 | printk(KERN_ALERT "Unable to handle kernel NULL " | ||
259 | "pointer dereference in mna handler"); | ||
260 | } else | ||
261 | printk(KERN_ALERT "Unable to handle kernel paging " | ||
262 | "request in mna handler"); | ||
263 | printk(KERN_ALERT " at virtual address %016lx\n",address); | ||
264 | printk(KERN_ALERT "current->{active_,}mm->context = %016lx\n", | ||
265 | (current->mm ? CTX_HWBITS(current->mm->context) : | ||
266 | CTX_HWBITS(current->active_mm->context))); | ||
267 | printk(KERN_ALERT "current->{active_,}mm->pgd = %016lx\n", | ||
268 | (current->mm ? (unsigned long) current->mm->pgd : | ||
269 | (unsigned long) current->active_mm->pgd)); | ||
270 | die_if_kernel("Oops", regs); | ||
271 | /* Not reached */ | ||
272 | } | ||
273 | regs->tpc = entry->fixup; | ||
274 | regs->tnpc = regs->tpc + 4; | ||
275 | |||
276 | if (fixup_tstate_asi) { | ||
277 | regs->tstate &= ~TSTATE_ASI; | ||
278 | regs->tstate |= (ASI_AIUS << 24UL); | ||
279 | } | ||
280 | } | ||
281 | |||
282 | static void log_unaligned(struct pt_regs *regs) | ||
283 | { | ||
284 | static unsigned long count, last_time; | ||
285 | |||
286 | if (time_after(jiffies, last_time + 5 * HZ)) | ||
287 | count = 0; | ||
288 | if (count < 5) { | ||
289 | last_time = jiffies; | ||
290 | count++; | ||
291 | printk("Kernel unaligned access at TPC[%lx] %pS\n", | ||
292 | regs->tpc, (void *) regs->tpc); | ||
293 | } | ||
294 | } | ||
295 | |||
296 | asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) | ||
297 | { | ||
298 | enum direction dir = decode_direction(insn); | ||
299 | int size = decode_access_size(insn); | ||
300 | int orig_asi, asi; | ||
301 | |||
302 | current_thread_info()->kern_una_regs = regs; | ||
303 | current_thread_info()->kern_una_insn = insn; | ||
304 | |||
305 | orig_asi = asi = decode_asi(insn, regs); | ||
306 | |||
307 | /* If this is a {get,put}_user() on an unaligned userspace pointer, | ||
308 | * just signal a fault and do not log the event. | ||
309 | */ | ||
310 | if (asi == ASI_AIUS) { | ||
311 | kernel_mna_trap_fault(0); | ||
312 | return; | ||
313 | } | ||
314 | |||
315 | log_unaligned(regs); | ||
316 | |||
317 | if (!ok_for_kernel(insn) || dir == both) { | ||
318 | printk("Unsupported unaligned load/store trap for kernel " | ||
319 | "at <%016lx>.\n", regs->tpc); | ||
320 | unaligned_panic("Kernel does fpu/atomic " | ||
321 | "unaligned load/store.", regs); | ||
322 | |||
323 | kernel_mna_trap_fault(0); | ||
324 | } else { | ||
325 | unsigned long addr, *reg_addr; | ||
326 | int err; | ||
327 | |||
328 | addr = compute_effective_address(regs, insn, | ||
329 | ((insn >> 25) & 0x1f)); | ||
330 | #ifdef DEBUG_MNA | ||
331 | printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] " | ||
332 | "retpc[%016lx]\n", | ||
333 | regs->tpc, dirstrings[dir], addr, size, | ||
334 | regs->u_regs[UREG_RETPC]); | ||
335 | #endif | ||
336 | switch (asi) { | ||
337 | case ASI_NL: | ||
338 | case ASI_AIUPL: | ||
339 | case ASI_AIUSL: | ||
340 | case ASI_PL: | ||
341 | case ASI_SL: | ||
342 | case ASI_PNFL: | ||
343 | case ASI_SNFL: | ||
344 | asi &= ~0x08; | ||
345 | break; | ||
346 | }; | ||
347 | switch (dir) { | ||
348 | case load: | ||
349 | reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs); | ||
350 | err = do_int_load(reg_addr, size, | ||
351 | (unsigned long *) addr, | ||
352 | decode_signedness(insn), asi); | ||
353 | if (likely(!err) && unlikely(asi != orig_asi)) { | ||
354 | unsigned long val_in = *reg_addr; | ||
355 | switch (size) { | ||
356 | case 2: | ||
357 | val_in = swab16(val_in); | ||
358 | break; | ||
359 | case 4: | ||
360 | val_in = swab32(val_in); | ||
361 | break; | ||
362 | case 8: | ||
363 | val_in = swab64(val_in); | ||
364 | break; | ||
365 | case 16: | ||
366 | default: | ||
367 | BUG(); | ||
368 | break; | ||
369 | }; | ||
370 | *reg_addr = val_in; | ||
371 | } | ||
372 | break; | ||
373 | |||
374 | case store: | ||
375 | err = do_int_store(((insn>>25)&0x1f), size, | ||
376 | (unsigned long *) addr, regs, | ||
377 | asi, orig_asi); | ||
378 | break; | ||
379 | |||
380 | default: | ||
381 | panic("Impossible kernel unaligned trap."); | ||
382 | /* Not reached... */ | ||
383 | } | ||
384 | if (unlikely(err)) | ||
385 | kernel_mna_trap_fault(1); | ||
386 | else | ||
387 | advance(regs); | ||
388 | } | ||
389 | } | ||
390 | |||
391 | static char popc_helper[] = { | ||
392 | 0, 1, 1, 2, 1, 2, 2, 3, | ||
393 | 1, 2, 2, 3, 2, 3, 3, 4, | ||
394 | }; | ||
395 | |||
396 | int handle_popc(u32 insn, struct pt_regs *regs) | ||
397 | { | ||
398 | u64 value; | ||
399 | int ret, i, rd = ((insn >> 25) & 0x1f); | ||
400 | int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; | ||
401 | |||
402 | if (insn & 0x2000) { | ||
403 | maybe_flush_windows(0, 0, rd, from_kernel); | ||
404 | value = sign_extend_imm13(insn); | ||
405 | } else { | ||
406 | maybe_flush_windows(0, insn & 0x1f, rd, from_kernel); | ||
407 | value = fetch_reg(insn & 0x1f, regs); | ||
408 | } | ||
409 | for (ret = 0, i = 0; i < 16; i++) { | ||
410 | ret += popc_helper[value & 0xf]; | ||
411 | value >>= 4; | ||
412 | } | ||
413 | if (rd < 16) { | ||
414 | if (rd) | ||
415 | regs->u_regs[rd] = ret; | ||
416 | } else { | ||
417 | if (test_thread_flag(TIF_32BIT)) { | ||
418 | struct reg_window32 __user *win32; | ||
419 | win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); | ||
420 | put_user(ret, &win32->locals[rd - 16]); | ||
421 | } else { | ||
422 | struct reg_window __user *win; | ||
423 | win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); | ||
424 | put_user(ret, &win->locals[rd - 16]); | ||
425 | } | ||
426 | } | ||
427 | advance(regs); | ||
428 | return 1; | ||
429 | } | ||
430 | |||
431 | extern void do_fpother(struct pt_regs *regs); | ||
432 | extern void do_privact(struct pt_regs *regs); | ||
433 | extern void spitfire_data_access_exception(struct pt_regs *regs, | ||
434 | unsigned long sfsr, | ||
435 | unsigned long sfar); | ||
436 | extern void sun4v_data_access_exception(struct pt_regs *regs, | ||
437 | unsigned long addr, | ||
438 | unsigned long type_ctx); | ||
439 | |||
440 | int handle_ldf_stq(u32 insn, struct pt_regs *regs) | ||
441 | { | ||
442 | unsigned long addr = compute_effective_address(regs, insn, 0); | ||
443 | int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); | ||
444 | struct fpustate *f = FPUSTATE; | ||
445 | int asi = decode_asi(insn, regs); | ||
446 | int flag = (freg < 32) ? FPRS_DL : FPRS_DU; | ||
447 | |||
448 | save_and_clear_fpu(); | ||
449 | current_thread_info()->xfsr[0] &= ~0x1c000; | ||
450 | if (freg & 3) { | ||
451 | current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */; | ||
452 | do_fpother(regs); | ||
453 | return 0; | ||
454 | } | ||
455 | if (insn & 0x200000) { | ||
456 | /* STQ */ | ||
457 | u64 first = 0, second = 0; | ||
458 | |||
459 | if (current_thread_info()->fpsaved[0] & flag) { | ||
460 | first = *(u64 *)&f->regs[freg]; | ||
461 | second = *(u64 *)&f->regs[freg+2]; | ||
462 | } | ||
463 | if (asi < 0x80) { | ||
464 | do_privact(regs); | ||
465 | return 1; | ||
466 | } | ||
467 | switch (asi) { | ||
468 | case ASI_P: | ||
469 | case ASI_S: break; | ||
470 | case ASI_PL: | ||
471 | case ASI_SL: | ||
472 | { | ||
473 | /* Need to convert endians */ | ||
474 | u64 tmp = __swab64p(&first); | ||
475 | |||
476 | first = __swab64p(&second); | ||
477 | second = tmp; | ||
478 | break; | ||
479 | } | ||
480 | default: | ||
481 | if (tlb_type == hypervisor) | ||
482 | sun4v_data_access_exception(regs, addr, 0); | ||
483 | else | ||
484 | spitfire_data_access_exception(regs, 0, addr); | ||
485 | return 1; | ||
486 | } | ||
487 | if (put_user (first >> 32, (u32 __user *)addr) || | ||
488 | __put_user ((u32)first, (u32 __user *)(addr + 4)) || | ||
489 | __put_user (second >> 32, (u32 __user *)(addr + 8)) || | ||
490 | __put_user ((u32)second, (u32 __user *)(addr + 12))) { | ||
491 | if (tlb_type == hypervisor) | ||
492 | sun4v_data_access_exception(regs, addr, 0); | ||
493 | else | ||
494 | spitfire_data_access_exception(regs, 0, addr); | ||
495 | return 1; | ||
496 | } | ||
497 | } else { | ||
498 | /* LDF, LDDF, LDQF */ | ||
499 | u32 data[4] __attribute__ ((aligned(8))); | ||
500 | int size, i; | ||
501 | int err; | ||
502 | |||
503 | if (asi < 0x80) { | ||
504 | do_privact(regs); | ||
505 | return 1; | ||
506 | } else if (asi > ASI_SNFL) { | ||
507 | if (tlb_type == hypervisor) | ||
508 | sun4v_data_access_exception(regs, addr, 0); | ||
509 | else | ||
510 | spitfire_data_access_exception(regs, 0, addr); | ||
511 | return 1; | ||
512 | } | ||
513 | switch (insn & 0x180000) { | ||
514 | case 0x000000: size = 1; break; | ||
515 | case 0x100000: size = 4; break; | ||
516 | default: size = 2; break; | ||
517 | } | ||
518 | for (i = 0; i < size; i++) | ||
519 | data[i] = 0; | ||
520 | |||
521 | err = get_user (data[0], (u32 __user *) addr); | ||
522 | if (!err) { | ||
523 | for (i = 1; i < size; i++) | ||
524 | err |= __get_user (data[i], (u32 __user *)(addr + 4*i)); | ||
525 | } | ||
526 | if (err && !(asi & 0x2 /* NF */)) { | ||
527 | if (tlb_type == hypervisor) | ||
528 | sun4v_data_access_exception(regs, addr, 0); | ||
529 | else | ||
530 | spitfire_data_access_exception(regs, 0, addr); | ||
531 | return 1; | ||
532 | } | ||
533 | if (asi & 0x8) /* Little */ { | ||
534 | u64 tmp; | ||
535 | |||
536 | switch (size) { | ||
537 | case 1: data[0] = le32_to_cpup(data + 0); break; | ||
538 | default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0)); | ||
539 | break; | ||
540 | case 4: tmp = le64_to_cpup((u64 *)(data + 0)); | ||
541 | *(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2)); | ||
542 | *(u64 *)(data + 2) = tmp; | ||
543 | break; | ||
544 | } | ||
545 | } | ||
546 | if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) { | ||
547 | current_thread_info()->fpsaved[0] = FPRS_FEF; | ||
548 | current_thread_info()->gsr[0] = 0; | ||
549 | } | ||
550 | if (!(current_thread_info()->fpsaved[0] & flag)) { | ||
551 | if (freg < 32) | ||
552 | memset(f->regs, 0, 32*sizeof(u32)); | ||
553 | else | ||
554 | memset(f->regs+32, 0, 32*sizeof(u32)); | ||
555 | } | ||
556 | memcpy(f->regs + freg, data, size * 4); | ||
557 | current_thread_info()->fpsaved[0] |= flag; | ||
558 | } | ||
559 | advance(regs); | ||
560 | return 1; | ||
561 | } | ||
562 | |||
563 | void handle_ld_nf(u32 insn, struct pt_regs *regs) | ||
564 | { | ||
565 | int rd = ((insn >> 25) & 0x1f); | ||
566 | int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; | ||
567 | unsigned long *reg; | ||
568 | |||
569 | maybe_flush_windows(0, 0, rd, from_kernel); | ||
570 | reg = fetch_reg_addr(rd, regs); | ||
571 | if (from_kernel || rd < 16) { | ||
572 | reg[0] = 0; | ||
573 | if ((insn & 0x780000) == 0x180000) | ||
574 | reg[1] = 0; | ||
575 | } else if (test_thread_flag(TIF_32BIT)) { | ||
576 | put_user(0, (int __user *) reg); | ||
577 | if ((insn & 0x780000) == 0x180000) | ||
578 | put_user(0, ((int __user *) reg) + 1); | ||
579 | } else { | ||
580 | put_user(0, (unsigned long __user *) reg); | ||
581 | if ((insn & 0x780000) == 0x180000) | ||
582 | put_user(0, (unsigned long __user *) reg + 1); | ||
583 | } | ||
584 | advance(regs); | ||
585 | } | ||
586 | |||
587 | void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) | ||
588 | { | ||
589 | unsigned long pc = regs->tpc; | ||
590 | unsigned long tstate = regs->tstate; | ||
591 | u32 insn; | ||
592 | u32 first, second; | ||
593 | u64 value; | ||
594 | u8 freg; | ||
595 | int flag; | ||
596 | struct fpustate *f = FPUSTATE; | ||
597 | |||
598 | if (tstate & TSTATE_PRIV) | ||
599 | die_if_kernel("lddfmna from kernel", regs); | ||
600 | if (test_thread_flag(TIF_32BIT)) | ||
601 | pc = (u32)pc; | ||
602 | if (get_user(insn, (u32 __user *) pc) != -EFAULT) { | ||
603 | int asi = decode_asi(insn, regs); | ||
604 | if ((asi > ASI_SNFL) || | ||
605 | (asi < ASI_P)) | ||
606 | goto daex; | ||
607 | if (get_user(first, (u32 __user *)sfar) || | ||
608 | get_user(second, (u32 __user *)(sfar + 4))) { | ||
609 | if (asi & 0x2) /* NF */ { | ||
610 | first = 0; second = 0; | ||
611 | } else | ||
612 | goto daex; | ||
613 | } | ||
614 | save_and_clear_fpu(); | ||
615 | freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); | ||
616 | value = (((u64)first) << 32) | second; | ||
617 | if (asi & 0x8) /* Little */ | ||
618 | value = __swab64p(&value); | ||
619 | flag = (freg < 32) ? FPRS_DL : FPRS_DU; | ||
620 | if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) { | ||
621 | current_thread_info()->fpsaved[0] = FPRS_FEF; | ||
622 | current_thread_info()->gsr[0] = 0; | ||
623 | } | ||
624 | if (!(current_thread_info()->fpsaved[0] & flag)) { | ||
625 | if (freg < 32) | ||
626 | memset(f->regs, 0, 32*sizeof(u32)); | ||
627 | else | ||
628 | memset(f->regs+32, 0, 32*sizeof(u32)); | ||
629 | } | ||
630 | *(u64 *)(f->regs + freg) = value; | ||
631 | current_thread_info()->fpsaved[0] |= flag; | ||
632 | } else { | ||
633 | daex: | ||
634 | if (tlb_type == hypervisor) | ||
635 | sun4v_data_access_exception(regs, sfar, sfsr); | ||
636 | else | ||
637 | spitfire_data_access_exception(regs, sfsr, sfar); | ||
638 | return; | ||
639 | } | ||
640 | advance(regs); | ||
641 | return; | ||
642 | } | ||
643 | |||
644 | void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) | ||
645 | { | ||
646 | unsigned long pc = regs->tpc; | ||
647 | unsigned long tstate = regs->tstate; | ||
648 | u32 insn; | ||
649 | u64 value; | ||
650 | u8 freg; | ||
651 | int flag; | ||
652 | struct fpustate *f = FPUSTATE; | ||
653 | |||
654 | if (tstate & TSTATE_PRIV) | ||
655 | die_if_kernel("stdfmna from kernel", regs); | ||
656 | if (test_thread_flag(TIF_32BIT)) | ||
657 | pc = (u32)pc; | ||
658 | if (get_user(insn, (u32 __user *) pc) != -EFAULT) { | ||
659 | int asi = decode_asi(insn, regs); | ||
660 | freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); | ||
661 | value = 0; | ||
662 | flag = (freg < 32) ? FPRS_DL : FPRS_DU; | ||
663 | if ((asi > ASI_SNFL) || | ||
664 | (asi < ASI_P)) | ||
665 | goto daex; | ||
666 | save_and_clear_fpu(); | ||
667 | if (current_thread_info()->fpsaved[0] & flag) | ||
668 | value = *(u64 *)&f->regs[freg]; | ||
669 | switch (asi) { | ||
670 | case ASI_P: | ||
671 | case ASI_S: break; | ||
672 | case ASI_PL: | ||
673 | case ASI_SL: | ||
674 | value = __swab64p(&value); break; | ||
675 | default: goto daex; | ||
676 | } | ||
677 | if (put_user (value >> 32, (u32 __user *) sfar) || | ||
678 | __put_user ((u32)value, (u32 __user *)(sfar + 4))) | ||
679 | goto daex; | ||
680 | } else { | ||
681 | daex: | ||
682 | if (tlb_type == hypervisor) | ||
683 | sun4v_data_access_exception(regs, sfar, sfsr); | ||
684 | else | ||
685 | spitfire_data_access_exception(regs, sfsr, sfar); | ||
686 | return; | ||
687 | } | ||
688 | advance(regs); | ||
689 | return; | ||
690 | } | ||
diff --git a/arch/sparc/kernel/us2e_cpufreq.c b/arch/sparc/kernel/us2e_cpufreq.c new file mode 100644 index 000000000000..791c15138f3a --- /dev/null +++ b/arch/sparc/kernel/us2e_cpufreq.c | |||
@@ -0,0 +1,413 @@ | |||
1 | /* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support | ||
2 | * | ||
3 | * Copyright (C) 2003 David S. Miller (davem@redhat.com) | ||
4 | * | ||
5 | * Many thanks to Dominik Brodowski for fixing up the cpufreq | ||
6 | * infrastructure in order to make this driver easier to implement. | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/smp.h> | ||
13 | #include <linux/cpufreq.h> | ||
14 | #include <linux/threads.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <linux/init.h> | ||
18 | |||
19 | #include <asm/asi.h> | ||
20 | #include <asm/timer.h> | ||
21 | |||
22 | static struct cpufreq_driver *cpufreq_us2e_driver; | ||
23 | |||
24 | struct us2e_freq_percpu_info { | ||
25 | struct cpufreq_frequency_table table[6]; | ||
26 | }; | ||
27 | |||
28 | /* Indexed by cpu number. */ | ||
29 | static struct us2e_freq_percpu_info *us2e_freq_table; | ||
30 | |||
31 | #define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL | ||
32 | #define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL | ||
33 | |||
34 | /* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled | ||
35 | * in the ESTAR mode control register. | ||
36 | */ | ||
37 | #define ESTAR_MODE_DIV_1 0x0000000000000000UL | ||
38 | #define ESTAR_MODE_DIV_2 0x0000000000000001UL | ||
39 | #define ESTAR_MODE_DIV_4 0x0000000000000003UL | ||
40 | #define ESTAR_MODE_DIV_6 0x0000000000000002UL | ||
41 | #define ESTAR_MODE_DIV_8 0x0000000000000004UL | ||
42 | #define ESTAR_MODE_DIV_MASK 0x0000000000000007UL | ||
43 | |||
44 | #define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL | ||
45 | #define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL | ||
46 | #define MCTRL0_REFR_COUNT_SHIFT 8 | ||
47 | #define MCTRL0_REFR_INTERVAL 7800 | ||
48 | #define MCTRL0_REFR_CLKS_P_CNT 64 | ||
49 | |||
50 | static unsigned long read_hbreg(unsigned long addr) | ||
51 | { | ||
52 | unsigned long ret; | ||
53 | |||
54 | __asm__ __volatile__("ldxa [%1] %2, %0" | ||
55 | : "=&r" (ret) | ||
56 | : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); | ||
57 | return ret; | ||
58 | } | ||
59 | |||
60 | static void write_hbreg(unsigned long addr, unsigned long val) | ||
61 | { | ||
62 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
63 | "membar #Sync" | ||
64 | : /* no outputs */ | ||
65 | : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E) | ||
66 | : "memory"); | ||
67 | if (addr == HBIRD_ESTAR_MODE_ADDR) { | ||
68 | /* Need to wait 16 clock cycles for the PLL to lock. */ | ||
69 | udelay(1); | ||
70 | } | ||
71 | } | ||
72 | |||
73 | static void self_refresh_ctl(int enable) | ||
74 | { | ||
75 | unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR); | ||
76 | |||
77 | if (enable) | ||
78 | mctrl |= MCTRL0_SREFRESH_ENAB; | ||
79 | else | ||
80 | mctrl &= ~MCTRL0_SREFRESH_ENAB; | ||
81 | write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl); | ||
82 | (void) read_hbreg(HBIRD_MEM_CNTL0_ADDR); | ||
83 | } | ||
84 | |||
85 | static void frob_mem_refresh(int cpu_slowing_down, | ||
86 | unsigned long clock_tick, | ||
87 | unsigned long old_divisor, unsigned long divisor) | ||
88 | { | ||
89 | unsigned long old_refr_count, refr_count, mctrl; | ||
90 | |||
91 | refr_count = (clock_tick * MCTRL0_REFR_INTERVAL); | ||
92 | refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL); | ||
93 | |||
94 | mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR); | ||
95 | old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK) | ||
96 | >> MCTRL0_REFR_COUNT_SHIFT; | ||
97 | |||
98 | mctrl &= ~MCTRL0_REFR_COUNT_MASK; | ||
99 | mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT; | ||
100 | write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl); | ||
101 | mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR); | ||
102 | |||
103 | if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) { | ||
104 | unsigned long usecs; | ||
105 | |||
106 | /* We have to wait for both refresh counts (old | ||
107 | * and new) to go to zero. | ||
108 | */ | ||
109 | usecs = (MCTRL0_REFR_CLKS_P_CNT * | ||
110 | (refr_count + old_refr_count) * | ||
111 | 1000000UL * | ||
112 | old_divisor) / clock_tick; | ||
113 | udelay(usecs + 1UL); | ||
114 | } | ||
115 | } | ||
116 | |||
117 | static void us2e_transition(unsigned long estar, unsigned long new_bits, | ||
118 | unsigned long clock_tick, | ||
119 | unsigned long old_divisor, unsigned long divisor) | ||
120 | { | ||
121 | unsigned long flags; | ||
122 | |||
123 | local_irq_save(flags); | ||
124 | |||
125 | estar &= ~ESTAR_MODE_DIV_MASK; | ||
126 | |||
127 | /* This is based upon the state transition diagram in the IIe manual. */ | ||
128 | if (old_divisor == 2 && divisor == 1) { | ||
129 | self_refresh_ctl(0); | ||
130 | write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); | ||
131 | frob_mem_refresh(0, clock_tick, old_divisor, divisor); | ||
132 | } else if (old_divisor == 1 && divisor == 2) { | ||
133 | frob_mem_refresh(1, clock_tick, old_divisor, divisor); | ||
134 | write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); | ||
135 | self_refresh_ctl(1); | ||
136 | } else if (old_divisor == 1 && divisor > 2) { | ||
137 | us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick, | ||
138 | 1, 2); | ||
139 | us2e_transition(estar, new_bits, clock_tick, | ||
140 | 2, divisor); | ||
141 | } else if (old_divisor > 2 && divisor == 1) { | ||
142 | us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick, | ||
143 | old_divisor, 2); | ||
144 | us2e_transition(estar, new_bits, clock_tick, | ||
145 | 2, divisor); | ||
146 | } else if (old_divisor < divisor) { | ||
147 | frob_mem_refresh(0, clock_tick, old_divisor, divisor); | ||
148 | write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); | ||
149 | } else if (old_divisor > divisor) { | ||
150 | write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); | ||
151 | frob_mem_refresh(1, clock_tick, old_divisor, divisor); | ||
152 | } else { | ||
153 | BUG(); | ||
154 | } | ||
155 | |||
156 | local_irq_restore(flags); | ||
157 | } | ||
158 | |||
159 | static unsigned long index_to_estar_mode(unsigned int index) | ||
160 | { | ||
161 | switch (index) { | ||
162 | case 0: | ||
163 | return ESTAR_MODE_DIV_1; | ||
164 | |||
165 | case 1: | ||
166 | return ESTAR_MODE_DIV_2; | ||
167 | |||
168 | case 2: | ||
169 | return ESTAR_MODE_DIV_4; | ||
170 | |||
171 | case 3: | ||
172 | return ESTAR_MODE_DIV_6; | ||
173 | |||
174 | case 4: | ||
175 | return ESTAR_MODE_DIV_8; | ||
176 | |||
177 | default: | ||
178 | BUG(); | ||
179 | }; | ||
180 | } | ||
181 | |||
182 | static unsigned long index_to_divisor(unsigned int index) | ||
183 | { | ||
184 | switch (index) { | ||
185 | case 0: | ||
186 | return 1; | ||
187 | |||
188 | case 1: | ||
189 | return 2; | ||
190 | |||
191 | case 2: | ||
192 | return 4; | ||
193 | |||
194 | case 3: | ||
195 | return 6; | ||
196 | |||
197 | case 4: | ||
198 | return 8; | ||
199 | |||
200 | default: | ||
201 | BUG(); | ||
202 | }; | ||
203 | } | ||
204 | |||
205 | static unsigned long estar_to_divisor(unsigned long estar) | ||
206 | { | ||
207 | unsigned long ret; | ||
208 | |||
209 | switch (estar & ESTAR_MODE_DIV_MASK) { | ||
210 | case ESTAR_MODE_DIV_1: | ||
211 | ret = 1; | ||
212 | break; | ||
213 | case ESTAR_MODE_DIV_2: | ||
214 | ret = 2; | ||
215 | break; | ||
216 | case ESTAR_MODE_DIV_4: | ||
217 | ret = 4; | ||
218 | break; | ||
219 | case ESTAR_MODE_DIV_6: | ||
220 | ret = 6; | ||
221 | break; | ||
222 | case ESTAR_MODE_DIV_8: | ||
223 | ret = 8; | ||
224 | break; | ||
225 | default: | ||
226 | BUG(); | ||
227 | }; | ||
228 | |||
229 | return ret; | ||
230 | } | ||
231 | |||
232 | static unsigned int us2e_freq_get(unsigned int cpu) | ||
233 | { | ||
234 | cpumask_t cpus_allowed; | ||
235 | unsigned long clock_tick, estar; | ||
236 | |||
237 | if (!cpu_online(cpu)) | ||
238 | return 0; | ||
239 | |||
240 | cpus_allowed = current->cpus_allowed; | ||
241 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | ||
242 | |||
243 | clock_tick = sparc64_get_clock_tick(cpu) / 1000; | ||
244 | estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR); | ||
245 | |||
246 | set_cpus_allowed(current, cpus_allowed); | ||
247 | |||
248 | return clock_tick / estar_to_divisor(estar); | ||
249 | } | ||
250 | |||
251 | static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index) | ||
252 | { | ||
253 | unsigned long new_bits, new_freq; | ||
254 | unsigned long clock_tick, divisor, old_divisor, estar; | ||
255 | cpumask_t cpus_allowed; | ||
256 | struct cpufreq_freqs freqs; | ||
257 | |||
258 | if (!cpu_online(cpu)) | ||
259 | return; | ||
260 | |||
261 | cpus_allowed = current->cpus_allowed; | ||
262 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | ||
263 | |||
264 | new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000; | ||
265 | new_bits = index_to_estar_mode(index); | ||
266 | divisor = index_to_divisor(index); | ||
267 | new_freq /= divisor; | ||
268 | |||
269 | estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR); | ||
270 | |||
271 | old_divisor = estar_to_divisor(estar); | ||
272 | |||
273 | freqs.old = clock_tick / old_divisor; | ||
274 | freqs.new = new_freq; | ||
275 | freqs.cpu = cpu; | ||
276 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
277 | |||
278 | if (old_divisor != divisor) | ||
279 | us2e_transition(estar, new_bits, clock_tick * 1000, | ||
280 | old_divisor, divisor); | ||
281 | |||
282 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
283 | |||
284 | set_cpus_allowed(current, cpus_allowed); | ||
285 | } | ||
286 | |||
287 | static int us2e_freq_target(struct cpufreq_policy *policy, | ||
288 | unsigned int target_freq, | ||
289 | unsigned int relation) | ||
290 | { | ||
291 | unsigned int new_index = 0; | ||
292 | |||
293 | if (cpufreq_frequency_table_target(policy, | ||
294 | &us2e_freq_table[policy->cpu].table[0], | ||
295 | target_freq, relation, &new_index)) | ||
296 | return -EINVAL; | ||
297 | |||
298 | us2e_set_cpu_divider_index(policy->cpu, new_index); | ||
299 | |||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | static int us2e_freq_verify(struct cpufreq_policy *policy) | ||
304 | { | ||
305 | return cpufreq_frequency_table_verify(policy, | ||
306 | &us2e_freq_table[policy->cpu].table[0]); | ||
307 | } | ||
308 | |||
309 | static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy) | ||
310 | { | ||
311 | unsigned int cpu = policy->cpu; | ||
312 | unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000; | ||
313 | struct cpufreq_frequency_table *table = | ||
314 | &us2e_freq_table[cpu].table[0]; | ||
315 | |||
316 | table[0].index = 0; | ||
317 | table[0].frequency = clock_tick / 1; | ||
318 | table[1].index = 1; | ||
319 | table[1].frequency = clock_tick / 2; | ||
320 | table[2].index = 2; | ||
321 | table[2].frequency = clock_tick / 4; | ||
322 | table[2].index = 3; | ||
323 | table[2].frequency = clock_tick / 6; | ||
324 | table[2].index = 4; | ||
325 | table[2].frequency = clock_tick / 8; | ||
326 | table[2].index = 5; | ||
327 | table[3].frequency = CPUFREQ_TABLE_END; | ||
328 | |||
329 | policy->cpuinfo.transition_latency = 0; | ||
330 | policy->cur = clock_tick; | ||
331 | |||
332 | return cpufreq_frequency_table_cpuinfo(policy, table); | ||
333 | } | ||
334 | |||
335 | static int us2e_freq_cpu_exit(struct cpufreq_policy *policy) | ||
336 | { | ||
337 | if (cpufreq_us2e_driver) | ||
338 | us2e_set_cpu_divider_index(policy->cpu, 0); | ||
339 | |||
340 | return 0; | ||
341 | } | ||
342 | |||
343 | static int __init us2e_freq_init(void) | ||
344 | { | ||
345 | unsigned long manuf, impl, ver; | ||
346 | int ret; | ||
347 | |||
348 | if (tlb_type != spitfire) | ||
349 | return -ENODEV; | ||
350 | |||
351 | __asm__("rdpr %%ver, %0" : "=r" (ver)); | ||
352 | manuf = ((ver >> 48) & 0xffff); | ||
353 | impl = ((ver >> 32) & 0xffff); | ||
354 | |||
355 | if (manuf == 0x17 && impl == 0x13) { | ||
356 | struct cpufreq_driver *driver; | ||
357 | |||
358 | ret = -ENOMEM; | ||
359 | driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); | ||
360 | if (!driver) | ||
361 | goto err_out; | ||
362 | |||
363 | us2e_freq_table = kzalloc( | ||
364 | (NR_CPUS * sizeof(struct us2e_freq_percpu_info)), | ||
365 | GFP_KERNEL); | ||
366 | if (!us2e_freq_table) | ||
367 | goto err_out; | ||
368 | |||
369 | driver->init = us2e_freq_cpu_init; | ||
370 | driver->verify = us2e_freq_verify; | ||
371 | driver->target = us2e_freq_target; | ||
372 | driver->get = us2e_freq_get; | ||
373 | driver->exit = us2e_freq_cpu_exit; | ||
374 | driver->owner = THIS_MODULE, | ||
375 | strcpy(driver->name, "UltraSPARC-IIe"); | ||
376 | |||
377 | cpufreq_us2e_driver = driver; | ||
378 | ret = cpufreq_register_driver(driver); | ||
379 | if (ret) | ||
380 | goto err_out; | ||
381 | |||
382 | return 0; | ||
383 | |||
384 | err_out: | ||
385 | if (driver) { | ||
386 | kfree(driver); | ||
387 | cpufreq_us2e_driver = NULL; | ||
388 | } | ||
389 | kfree(us2e_freq_table); | ||
390 | us2e_freq_table = NULL; | ||
391 | return ret; | ||
392 | } | ||
393 | |||
394 | return -ENODEV; | ||
395 | } | ||
396 | |||
397 | static void __exit us2e_freq_exit(void) | ||
398 | { | ||
399 | if (cpufreq_us2e_driver) { | ||
400 | cpufreq_unregister_driver(cpufreq_us2e_driver); | ||
401 | kfree(cpufreq_us2e_driver); | ||
402 | cpufreq_us2e_driver = NULL; | ||
403 | kfree(us2e_freq_table); | ||
404 | us2e_freq_table = NULL; | ||
405 | } | ||
406 | } | ||
407 | |||
408 | MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); | ||
409 | MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe"); | ||
410 | MODULE_LICENSE("GPL"); | ||
411 | |||
412 | module_init(us2e_freq_init); | ||
413 | module_exit(us2e_freq_exit); | ||
diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c new file mode 100644 index 000000000000..365b6464e2ce --- /dev/null +++ b/arch/sparc/kernel/us3_cpufreq.c | |||
@@ -0,0 +1,274 @@ | |||
1 | /* us3_cpufreq.c: UltraSPARC-III cpu frequency support | ||
2 | * | ||
3 | * Copyright (C) 2003 David S. Miller (davem@redhat.com) | ||
4 | * | ||
5 | * Many thanks to Dominik Brodowski for fixing up the cpufreq | ||
6 | * infrastructure in order to make this driver easier to implement. | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/smp.h> | ||
13 | #include <linux/cpufreq.h> | ||
14 | #include <linux/threads.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/init.h> | ||
17 | |||
18 | #include <asm/head.h> | ||
19 | #include <asm/timer.h> | ||
20 | |||
21 | static struct cpufreq_driver *cpufreq_us3_driver; | ||
22 | |||
23 | struct us3_freq_percpu_info { | ||
24 | struct cpufreq_frequency_table table[4]; | ||
25 | }; | ||
26 | |||
27 | /* Indexed by cpu number. */ | ||
28 | static struct us3_freq_percpu_info *us3_freq_table; | ||
29 | |||
30 | /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled | ||
31 | * in the Safari config register. | ||
32 | */ | ||
33 | #define SAFARI_CFG_DIV_1 0x0000000000000000UL | ||
34 | #define SAFARI_CFG_DIV_2 0x0000000040000000UL | ||
35 | #define SAFARI_CFG_DIV_32 0x0000000080000000UL | ||
36 | #define SAFARI_CFG_DIV_MASK 0x00000000C0000000UL | ||
37 | |||
38 | static unsigned long read_safari_cfg(void) | ||
39 | { | ||
40 | unsigned long ret; | ||
41 | |||
42 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | ||
43 | : "=&r" (ret) | ||
44 | : "i" (ASI_SAFARI_CONFIG)); | ||
45 | return ret; | ||
46 | } | ||
47 | |||
48 | static void write_safari_cfg(unsigned long val) | ||
49 | { | ||
50 | __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" | ||
51 | "membar #Sync" | ||
52 | : /* no outputs */ | ||
53 | : "r" (val), "i" (ASI_SAFARI_CONFIG) | ||
54 | : "memory"); | ||
55 | } | ||
56 | |||
57 | static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg) | ||
58 | { | ||
59 | unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000; | ||
60 | unsigned long ret; | ||
61 | |||
62 | switch (safari_cfg & SAFARI_CFG_DIV_MASK) { | ||
63 | case SAFARI_CFG_DIV_1: | ||
64 | ret = clock_tick / 1; | ||
65 | break; | ||
66 | case SAFARI_CFG_DIV_2: | ||
67 | ret = clock_tick / 2; | ||
68 | break; | ||
69 | case SAFARI_CFG_DIV_32: | ||
70 | ret = clock_tick / 32; | ||
71 | break; | ||
72 | default: | ||
73 | BUG(); | ||
74 | }; | ||
75 | |||
76 | return ret; | ||
77 | } | ||
78 | |||
79 | static unsigned int us3_freq_get(unsigned int cpu) | ||
80 | { | ||
81 | cpumask_t cpus_allowed; | ||
82 | unsigned long reg; | ||
83 | unsigned int ret; | ||
84 | |||
85 | if (!cpu_online(cpu)) | ||
86 | return 0; | ||
87 | |||
88 | cpus_allowed = current->cpus_allowed; | ||
89 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | ||
90 | |||
91 | reg = read_safari_cfg(); | ||
92 | ret = get_current_freq(cpu, reg); | ||
93 | |||
94 | set_cpus_allowed(current, cpus_allowed); | ||
95 | |||
96 | return ret; | ||
97 | } | ||
98 | |||
99 | static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index) | ||
100 | { | ||
101 | unsigned long new_bits, new_freq, reg; | ||
102 | cpumask_t cpus_allowed; | ||
103 | struct cpufreq_freqs freqs; | ||
104 | |||
105 | if (!cpu_online(cpu)) | ||
106 | return; | ||
107 | |||
108 | cpus_allowed = current->cpus_allowed; | ||
109 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | ||
110 | |||
111 | new_freq = sparc64_get_clock_tick(cpu) / 1000; | ||
112 | switch (index) { | ||
113 | case 0: | ||
114 | new_bits = SAFARI_CFG_DIV_1; | ||
115 | new_freq /= 1; | ||
116 | break; | ||
117 | case 1: | ||
118 | new_bits = SAFARI_CFG_DIV_2; | ||
119 | new_freq /= 2; | ||
120 | break; | ||
121 | case 2: | ||
122 | new_bits = SAFARI_CFG_DIV_32; | ||
123 | new_freq /= 32; | ||
124 | break; | ||
125 | |||
126 | default: | ||
127 | BUG(); | ||
128 | }; | ||
129 | |||
130 | reg = read_safari_cfg(); | ||
131 | |||
132 | freqs.old = get_current_freq(cpu, reg); | ||
133 | freqs.new = new_freq; | ||
134 | freqs.cpu = cpu; | ||
135 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
136 | |||
137 | reg &= ~SAFARI_CFG_DIV_MASK; | ||
138 | reg |= new_bits; | ||
139 | write_safari_cfg(reg); | ||
140 | |||
141 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
142 | |||
143 | set_cpus_allowed(current, cpus_allowed); | ||
144 | } | ||
145 | |||
146 | static int us3_freq_target(struct cpufreq_policy *policy, | ||
147 | unsigned int target_freq, | ||
148 | unsigned int relation) | ||
149 | { | ||
150 | unsigned int new_index = 0; | ||
151 | |||
152 | if (cpufreq_frequency_table_target(policy, | ||
153 | &us3_freq_table[policy->cpu].table[0], | ||
154 | target_freq, | ||
155 | relation, | ||
156 | &new_index)) | ||
157 | return -EINVAL; | ||
158 | |||
159 | us3_set_cpu_divider_index(policy->cpu, new_index); | ||
160 | |||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | static int us3_freq_verify(struct cpufreq_policy *policy) | ||
165 | { | ||
166 | return cpufreq_frequency_table_verify(policy, | ||
167 | &us3_freq_table[policy->cpu].table[0]); | ||
168 | } | ||
169 | |||
170 | static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) | ||
171 | { | ||
172 | unsigned int cpu = policy->cpu; | ||
173 | unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000; | ||
174 | struct cpufreq_frequency_table *table = | ||
175 | &us3_freq_table[cpu].table[0]; | ||
176 | |||
177 | table[0].index = 0; | ||
178 | table[0].frequency = clock_tick / 1; | ||
179 | table[1].index = 1; | ||
180 | table[1].frequency = clock_tick / 2; | ||
181 | table[2].index = 2; | ||
182 | table[2].frequency = clock_tick / 32; | ||
183 | table[3].index = 0; | ||
184 | table[3].frequency = CPUFREQ_TABLE_END; | ||
185 | |||
186 | policy->cpuinfo.transition_latency = 0; | ||
187 | policy->cur = clock_tick; | ||
188 | |||
189 | return cpufreq_frequency_table_cpuinfo(policy, table); | ||
190 | } | ||
191 | |||
192 | static int us3_freq_cpu_exit(struct cpufreq_policy *policy) | ||
193 | { | ||
194 | if (cpufreq_us3_driver) | ||
195 | us3_set_cpu_divider_index(policy->cpu, 0); | ||
196 | |||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | static int __init us3_freq_init(void) | ||
201 | { | ||
202 | unsigned long manuf, impl, ver; | ||
203 | int ret; | ||
204 | |||
205 | if (tlb_type != cheetah && tlb_type != cheetah_plus) | ||
206 | return -ENODEV; | ||
207 | |||
208 | __asm__("rdpr %%ver, %0" : "=r" (ver)); | ||
209 | manuf = ((ver >> 48) & 0xffff); | ||
210 | impl = ((ver >> 32) & 0xffff); | ||
211 | |||
212 | if (manuf == CHEETAH_MANUF && | ||
213 | (impl == CHEETAH_IMPL || | ||
214 | impl == CHEETAH_PLUS_IMPL || | ||
215 | impl == JAGUAR_IMPL || | ||
216 | impl == PANTHER_IMPL)) { | ||
217 | struct cpufreq_driver *driver; | ||
218 | |||
219 | ret = -ENOMEM; | ||
220 | driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); | ||
221 | if (!driver) | ||
222 | goto err_out; | ||
223 | |||
224 | us3_freq_table = kzalloc( | ||
225 | (NR_CPUS * sizeof(struct us3_freq_percpu_info)), | ||
226 | GFP_KERNEL); | ||
227 | if (!us3_freq_table) | ||
228 | goto err_out; | ||
229 | |||
230 | driver->init = us3_freq_cpu_init; | ||
231 | driver->verify = us3_freq_verify; | ||
232 | driver->target = us3_freq_target; | ||
233 | driver->get = us3_freq_get; | ||
234 | driver->exit = us3_freq_cpu_exit; | ||
235 | driver->owner = THIS_MODULE, | ||
236 | strcpy(driver->name, "UltraSPARC-III"); | ||
237 | |||
238 | cpufreq_us3_driver = driver; | ||
239 | ret = cpufreq_register_driver(driver); | ||
240 | if (ret) | ||
241 | goto err_out; | ||
242 | |||
243 | return 0; | ||
244 | |||
245 | err_out: | ||
246 | if (driver) { | ||
247 | kfree(driver); | ||
248 | cpufreq_us3_driver = NULL; | ||
249 | } | ||
250 | kfree(us3_freq_table); | ||
251 | us3_freq_table = NULL; | ||
252 | return ret; | ||
253 | } | ||
254 | |||
255 | return -ENODEV; | ||
256 | } | ||
257 | |||
258 | static void __exit us3_freq_exit(void) | ||
259 | { | ||
260 | if (cpufreq_us3_driver) { | ||
261 | cpufreq_unregister_driver(cpufreq_us3_driver); | ||
262 | kfree(cpufreq_us3_driver); | ||
263 | cpufreq_us3_driver = NULL; | ||
264 | kfree(us3_freq_table); | ||
265 | us3_freq_table = NULL; | ||
266 | } | ||
267 | } | ||
268 | |||
269 | MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); | ||
270 | MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-III"); | ||
271 | MODULE_LICENSE("GPL"); | ||
272 | |||
273 | module_init(us3_freq_init); | ||
274 | module_exit(us3_freq_exit); | ||
diff --git a/arch/sparc/kernel/utrap.S b/arch/sparc/kernel/utrap.S new file mode 100644 index 000000000000..b7f0f3f3a909 --- /dev/null +++ b/arch/sparc/kernel/utrap.S | |||
@@ -0,0 +1,29 @@ | |||
1 | .globl utrap_trap | ||
2 | .type utrap_trap,#function | ||
3 | utrap_trap: /* %g3=handler,%g4=level */ | ||
4 | TRAP_LOAD_THREAD_REG(%g6, %g1) | ||
5 | ldx [%g6 + TI_UTRAPS], %g1 | ||
6 | brnz,pt %g1, invoke_utrap | ||
7 | nop | ||
8 | |||
9 | ba,pt %xcc, etrap | ||
10 | rd %pc, %g7 | ||
11 | mov %l4, %o1 | ||
12 | call bad_trap | ||
13 | add %sp, PTREGS_OFF, %o0 | ||
14 | ba,pt %xcc, rtrap | ||
15 | nop | ||
16 | |||
17 | invoke_utrap: | ||
18 | sllx %g3, 3, %g3 | ||
19 | ldx [%g1 + %g3], %g1 | ||
20 | save %sp, -128, %sp | ||
21 | rdpr %tstate, %l6 | ||
22 | rdpr %cwp, %l7 | ||
23 | andn %l6, TSTATE_CWP, %l6 | ||
24 | wrpr %l6, %l7, %tstate | ||
25 | rdpr %tpc, %l6 | ||
26 | rdpr %tnpc, %l7 | ||
27 | wrpr %g1, 0, %tnpc | ||
28 | done | ||
29 | .size utrap_trap,.-utrap_trap | ||
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c new file mode 100644 index 000000000000..92b1f8ec01de --- /dev/null +++ b/arch/sparc/kernel/vio.c | |||
@@ -0,0 +1,451 @@ | |||
1 | /* vio.c: Virtual I/O channel devices probing infrastructure. | ||
2 | * | ||
3 | * Copyright (c) 2003-2005 IBM Corp. | ||
4 | * Dave Engebretsen engebret@us.ibm.com | ||
5 | * Santiago Leon santil@us.ibm.com | ||
6 | * Hollis Blanchard <hollisb@us.ibm.com> | ||
7 | * Stephen Rothwell | ||
8 | * | ||
9 | * Adapted to sparc64 by David S. Miller davem@davemloft.net | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/irq.h> | ||
14 | #include <linux/init.h> | ||
15 | |||
16 | #include <asm/mdesc.h> | ||
17 | #include <asm/vio.h> | ||
18 | |||
19 | static const struct vio_device_id *vio_match_device( | ||
20 | const struct vio_device_id *matches, | ||
21 | const struct vio_dev *dev) | ||
22 | { | ||
23 | const char *type, *compat; | ||
24 | int len; | ||
25 | |||
26 | type = dev->type; | ||
27 | compat = dev->compat; | ||
28 | len = dev->compat_len; | ||
29 | |||
30 | while (matches->type[0] || matches->compat[0]) { | ||
31 | int match = 1; | ||
32 | if (matches->type[0]) | ||
33 | match &= !strcmp(matches->type, type); | ||
34 | |||
35 | if (matches->compat[0]) { | ||
36 | match &= len && | ||
37 | of_find_in_proplist(compat, matches->compat, len); | ||
38 | } | ||
39 | if (match) | ||
40 | return matches; | ||
41 | matches++; | ||
42 | } | ||
43 | return NULL; | ||
44 | } | ||
45 | |||
46 | static int vio_bus_match(struct device *dev, struct device_driver *drv) | ||
47 | { | ||
48 | struct vio_dev *vio_dev = to_vio_dev(dev); | ||
49 | struct vio_driver *vio_drv = to_vio_driver(drv); | ||
50 | const struct vio_device_id *matches = vio_drv->id_table; | ||
51 | |||
52 | if (!matches) | ||
53 | return 0; | ||
54 | |||
55 | return vio_match_device(matches, vio_dev) != NULL; | ||
56 | } | ||
57 | |||
58 | static int vio_device_probe(struct device *dev) | ||
59 | { | ||
60 | struct vio_dev *vdev = to_vio_dev(dev); | ||
61 | struct vio_driver *drv = to_vio_driver(dev->driver); | ||
62 | const struct vio_device_id *id; | ||
63 | int error = -ENODEV; | ||
64 | |||
65 | if (drv->probe) { | ||
66 | id = vio_match_device(drv->id_table, vdev); | ||
67 | if (id) | ||
68 | error = drv->probe(vdev, id); | ||
69 | } | ||
70 | |||
71 | return error; | ||
72 | } | ||
73 | |||
74 | static int vio_device_remove(struct device *dev) | ||
75 | { | ||
76 | struct vio_dev *vdev = to_vio_dev(dev); | ||
77 | struct vio_driver *drv = to_vio_driver(dev->driver); | ||
78 | |||
79 | if (drv->remove) | ||
80 | return drv->remove(vdev); | ||
81 | |||
82 | return 1; | ||
83 | } | ||
84 | |||
85 | static ssize_t devspec_show(struct device *dev, | ||
86 | struct device_attribute *attr, char *buf) | ||
87 | { | ||
88 | struct vio_dev *vdev = to_vio_dev(dev); | ||
89 | const char *str = "none"; | ||
90 | |||
91 | if (!strcmp(vdev->type, "vnet-port")) | ||
92 | str = "vnet"; | ||
93 | else if (!strcmp(vdev->type, "vdc-port")) | ||
94 | str = "vdisk"; | ||
95 | |||
96 | return sprintf(buf, "%s\n", str); | ||
97 | } | ||
98 | |||
99 | static ssize_t type_show(struct device *dev, | ||
100 | struct device_attribute *attr, char *buf) | ||
101 | { | ||
102 | struct vio_dev *vdev = to_vio_dev(dev); | ||
103 | return sprintf(buf, "%s\n", vdev->type); | ||
104 | } | ||
105 | |||
106 | static struct device_attribute vio_dev_attrs[] = { | ||
107 | __ATTR_RO(devspec), | ||
108 | __ATTR_RO(type), | ||
109 | __ATTR_NULL | ||
110 | }; | ||
111 | |||
112 | static struct bus_type vio_bus_type = { | ||
113 | .name = "vio", | ||
114 | .dev_attrs = vio_dev_attrs, | ||
115 | .match = vio_bus_match, | ||
116 | .probe = vio_device_probe, | ||
117 | .remove = vio_device_remove, | ||
118 | }; | ||
119 | |||
120 | int vio_register_driver(struct vio_driver *viodrv) | ||
121 | { | ||
122 | viodrv->driver.bus = &vio_bus_type; | ||
123 | |||
124 | return driver_register(&viodrv->driver); | ||
125 | } | ||
126 | EXPORT_SYMBOL(vio_register_driver); | ||
127 | |||
128 | void vio_unregister_driver(struct vio_driver *viodrv) | ||
129 | { | ||
130 | driver_unregister(&viodrv->driver); | ||
131 | } | ||
132 | EXPORT_SYMBOL(vio_unregister_driver); | ||
133 | |||
134 | static void vio_dev_release(struct device *dev) | ||
135 | { | ||
136 | kfree(to_vio_dev(dev)); | ||
137 | } | ||
138 | |||
139 | static ssize_t | ||
140 | show_pciobppath_attr(struct device *dev, struct device_attribute *attr, | ||
141 | char *buf) | ||
142 | { | ||
143 | struct vio_dev *vdev; | ||
144 | struct device_node *dp; | ||
145 | |||
146 | vdev = to_vio_dev(dev); | ||
147 | dp = vdev->dp; | ||
148 | |||
149 | return snprintf (buf, PAGE_SIZE, "%s\n", dp->full_name); | ||
150 | } | ||
151 | |||
152 | static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, | ||
153 | show_pciobppath_attr, NULL); | ||
154 | |||
155 | static struct device_node *cdev_node; | ||
156 | |||
157 | static struct vio_dev *root_vdev; | ||
158 | static u64 cdev_cfg_handle; | ||
159 | |||
160 | static void vio_fill_channel_info(struct mdesc_handle *hp, u64 mp, | ||
161 | struct vio_dev *vdev) | ||
162 | { | ||
163 | u64 a; | ||
164 | |||
165 | mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { | ||
166 | const u64 *chan_id; | ||
167 | const u64 *irq; | ||
168 | u64 target; | ||
169 | |||
170 | target = mdesc_arc_target(hp, a); | ||
171 | |||
172 | irq = mdesc_get_property(hp, target, "tx-ino", NULL); | ||
173 | if (irq) | ||
174 | vdev->tx_irq = sun4v_build_virq(cdev_cfg_handle, *irq); | ||
175 | |||
176 | irq = mdesc_get_property(hp, target, "rx-ino", NULL); | ||
177 | if (irq) | ||
178 | vdev->rx_irq = sun4v_build_virq(cdev_cfg_handle, *irq); | ||
179 | |||
180 | chan_id = mdesc_get_property(hp, target, "id", NULL); | ||
181 | if (chan_id) | ||
182 | vdev->channel_id = *chan_id; | ||
183 | } | ||
184 | } | ||
185 | |||
186 | static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp, | ||
187 | struct device *parent) | ||
188 | { | ||
189 | const char *type, *compat, *bus_id_name; | ||
190 | struct device_node *dp; | ||
191 | struct vio_dev *vdev; | ||
192 | int err, tlen, clen; | ||
193 | const u64 *id, *cfg_handle; | ||
194 | u64 a; | ||
195 | |||
196 | type = mdesc_get_property(hp, mp, "device-type", &tlen); | ||
197 | if (!type) { | ||
198 | type = mdesc_get_property(hp, mp, "name", &tlen); | ||
199 | if (!type) { | ||
200 | type = mdesc_node_name(hp, mp); | ||
201 | tlen = strlen(type) + 1; | ||
202 | } | ||
203 | } | ||
204 | if (tlen > VIO_MAX_TYPE_LEN) { | ||
205 | printk(KERN_ERR "VIO: Type string [%s] is too long.\n", | ||
206 | type); | ||
207 | return NULL; | ||
208 | } | ||
209 | |||
210 | id = mdesc_get_property(hp, mp, "id", NULL); | ||
211 | |||
212 | cfg_handle = NULL; | ||
213 | mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) { | ||
214 | u64 target; | ||
215 | |||
216 | target = mdesc_arc_target(hp, a); | ||
217 | cfg_handle = mdesc_get_property(hp, target, | ||
218 | "cfg-handle", NULL); | ||
219 | if (cfg_handle) | ||
220 | break; | ||
221 | } | ||
222 | |||
223 | bus_id_name = type; | ||
224 | if (!strcmp(type, "domain-services-port")) | ||
225 | bus_id_name = "ds"; | ||
226 | |||
227 | if (strlen(bus_id_name) >= BUS_ID_SIZE - 4) { | ||
228 | printk(KERN_ERR "VIO: bus_id_name [%s] is too long.\n", | ||
229 | bus_id_name); | ||
230 | return NULL; | ||
231 | } | ||
232 | |||
233 | compat = mdesc_get_property(hp, mp, "device-type", &clen); | ||
234 | if (!compat) { | ||
235 | clen = 0; | ||
236 | } else if (clen > VIO_MAX_COMPAT_LEN) { | ||
237 | printk(KERN_ERR "VIO: Compat len %d for [%s] is too long.\n", | ||
238 | clen, type); | ||
239 | return NULL; | ||
240 | } | ||
241 | |||
242 | vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); | ||
243 | if (!vdev) { | ||
244 | printk(KERN_ERR "VIO: Could not allocate vio_dev\n"); | ||
245 | return NULL; | ||
246 | } | ||
247 | |||
248 | vdev->mp = mp; | ||
249 | memcpy(vdev->type, type, tlen); | ||
250 | if (compat) | ||
251 | memcpy(vdev->compat, compat, clen); | ||
252 | else | ||
253 | memset(vdev->compat, 0, sizeof(vdev->compat)); | ||
254 | vdev->compat_len = clen; | ||
255 | |||
256 | vdev->channel_id = ~0UL; | ||
257 | vdev->tx_irq = ~0; | ||
258 | vdev->rx_irq = ~0; | ||
259 | |||
260 | vio_fill_channel_info(hp, mp, vdev); | ||
261 | |||
262 | if (!id) { | ||
263 | dev_set_name(&vdev->dev, "%s", bus_id_name); | ||
264 | vdev->dev_no = ~(u64)0; | ||
265 | } else if (!cfg_handle) { | ||
266 | dev_set_name(&vdev->dev, "%s-%lu", bus_id_name, *id); | ||
267 | vdev->dev_no = *id; | ||
268 | } else { | ||
269 | dev_set_name(&vdev->dev, "%s-%lu-%lu", bus_id_name, | ||
270 | *cfg_handle, *id); | ||
271 | vdev->dev_no = *cfg_handle; | ||
272 | } | ||
273 | |||
274 | vdev->dev.parent = parent; | ||
275 | vdev->dev.bus = &vio_bus_type; | ||
276 | vdev->dev.release = vio_dev_release; | ||
277 | |||
278 | if (parent == NULL) { | ||
279 | dp = cdev_node; | ||
280 | } else if (to_vio_dev(parent) == root_vdev) { | ||
281 | dp = of_get_next_child(cdev_node, NULL); | ||
282 | while (dp) { | ||
283 | if (!strcmp(dp->type, type)) | ||
284 | break; | ||
285 | |||
286 | dp = of_get_next_child(cdev_node, dp); | ||
287 | } | ||
288 | } else { | ||
289 | dp = to_vio_dev(parent)->dp; | ||
290 | } | ||
291 | vdev->dp = dp; | ||
292 | |||
293 | printk(KERN_INFO "VIO: Adding device %s\n", dev_name(&vdev->dev)); | ||
294 | |||
295 | err = device_register(&vdev->dev); | ||
296 | if (err) { | ||
297 | printk(KERN_ERR "VIO: Could not register device %s, err=%d\n", | ||
298 | dev_name(&vdev->dev), err); | ||
299 | kfree(vdev); | ||
300 | return NULL; | ||
301 | } | ||
302 | if (vdev->dp) | ||
303 | err = sysfs_create_file(&vdev->dev.kobj, | ||
304 | &dev_attr_obppath.attr); | ||
305 | |||
306 | return vdev; | ||
307 | } | ||
308 | |||
309 | static void vio_add(struct mdesc_handle *hp, u64 node) | ||
310 | { | ||
311 | (void) vio_create_one(hp, node, &root_vdev->dev); | ||
312 | } | ||
313 | |||
314 | static int vio_md_node_match(struct device *dev, void *arg) | ||
315 | { | ||
316 | struct vio_dev *vdev = to_vio_dev(dev); | ||
317 | |||
318 | if (vdev->mp == (u64) arg) | ||
319 | return 1; | ||
320 | |||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | static void vio_remove(struct mdesc_handle *hp, u64 node) | ||
325 | { | ||
326 | struct device *dev; | ||
327 | |||
328 | dev = device_find_child(&root_vdev->dev, (void *) node, | ||
329 | vio_md_node_match); | ||
330 | if (dev) { | ||
331 | printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev)); | ||
332 | |||
333 | device_unregister(dev); | ||
334 | } | ||
335 | } | ||
336 | |||
337 | static struct mdesc_notifier_client vio_device_notifier = { | ||
338 | .add = vio_add, | ||
339 | .remove = vio_remove, | ||
340 | .node_name = "virtual-device-port", | ||
341 | }; | ||
342 | |||
343 | /* We are only interested in domain service ports under the | ||
344 | * "domain-services" node. On control nodes there is another port | ||
345 | * under "openboot" that we should not mess with as aparently that is | ||
346 | * reserved exclusively for OBP use. | ||
347 | */ | ||
348 | static void vio_add_ds(struct mdesc_handle *hp, u64 node) | ||
349 | { | ||
350 | int found; | ||
351 | u64 a; | ||
352 | |||
353 | found = 0; | ||
354 | mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) { | ||
355 | u64 target = mdesc_arc_target(hp, a); | ||
356 | const char *name = mdesc_node_name(hp, target); | ||
357 | |||
358 | if (!strcmp(name, "domain-services")) { | ||
359 | found = 1; | ||
360 | break; | ||
361 | } | ||
362 | } | ||
363 | |||
364 | if (found) | ||
365 | (void) vio_create_one(hp, node, &root_vdev->dev); | ||
366 | } | ||
367 | |||
368 | static struct mdesc_notifier_client vio_ds_notifier = { | ||
369 | .add = vio_add_ds, | ||
370 | .remove = vio_remove, | ||
371 | .node_name = "domain-services-port", | ||
372 | }; | ||
373 | |||
374 | static const char *channel_devices_node = "channel-devices"; | ||
375 | static const char *channel_devices_compat = "SUNW,sun4v-channel-devices"; | ||
376 | static const char *cfg_handle_prop = "cfg-handle"; | ||
377 | |||
378 | static int __init vio_init(void) | ||
379 | { | ||
380 | struct mdesc_handle *hp; | ||
381 | const char *compat; | ||
382 | const u64 *cfg_handle; | ||
383 | int err, len; | ||
384 | u64 root; | ||
385 | |||
386 | err = bus_register(&vio_bus_type); | ||
387 | if (err) { | ||
388 | printk(KERN_ERR "VIO: Could not register bus type err=%d\n", | ||
389 | err); | ||
390 | return err; | ||
391 | } | ||
392 | |||
393 | hp = mdesc_grab(); | ||
394 | if (!hp) | ||
395 | return 0; | ||
396 | |||
397 | root = mdesc_node_by_name(hp, MDESC_NODE_NULL, channel_devices_node); | ||
398 | if (root == MDESC_NODE_NULL) { | ||
399 | printk(KERN_INFO "VIO: No channel-devices MDESC node.\n"); | ||
400 | mdesc_release(hp); | ||
401 | return 0; | ||
402 | } | ||
403 | |||
404 | cdev_node = of_find_node_by_name(NULL, "channel-devices"); | ||
405 | err = -ENODEV; | ||
406 | if (!cdev_node) { | ||
407 | printk(KERN_INFO "VIO: No channel-devices OBP node.\n"); | ||
408 | goto out_release; | ||
409 | } | ||
410 | |||
411 | compat = mdesc_get_property(hp, root, "compatible", &len); | ||
412 | if (!compat) { | ||
413 | printk(KERN_ERR "VIO: Channel devices lacks compatible " | ||
414 | "property\n"); | ||
415 | goto out_release; | ||
416 | } | ||
417 | if (!of_find_in_proplist(compat, channel_devices_compat, len)) { | ||
418 | printk(KERN_ERR "VIO: Channel devices node lacks (%s) " | ||
419 | "compat entry.\n", channel_devices_compat); | ||
420 | goto out_release; | ||
421 | } | ||
422 | |||
423 | cfg_handle = mdesc_get_property(hp, root, cfg_handle_prop, NULL); | ||
424 | if (!cfg_handle) { | ||
425 | printk(KERN_ERR "VIO: Channel devices lacks %s property\n", | ||
426 | cfg_handle_prop); | ||
427 | goto out_release; | ||
428 | } | ||
429 | |||
430 | cdev_cfg_handle = *cfg_handle; | ||
431 | |||
432 | root_vdev = vio_create_one(hp, root, NULL); | ||
433 | err = -ENODEV; | ||
434 | if (!root_vdev) { | ||
435 | printk(KERN_ERR "VIO: Coult not create root device.\n"); | ||
436 | goto out_release; | ||
437 | } | ||
438 | |||
439 | mdesc_register_notifier(&vio_device_notifier); | ||
440 | mdesc_register_notifier(&vio_ds_notifier); | ||
441 | |||
442 | mdesc_release(hp); | ||
443 | |||
444 | return err; | ||
445 | |||
446 | out_release: | ||
447 | mdesc_release(hp); | ||
448 | return err; | ||
449 | } | ||
450 | |||
451 | postcore_initcall(vio_init); | ||
diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c new file mode 100644 index 000000000000..708fa1705fbe --- /dev/null +++ b/arch/sparc/kernel/viohs.c | |||
@@ -0,0 +1,822 @@ | |||
1 | /* viohs.c: LDOM Virtual I/O handshake helper layer. | ||
2 | * | ||
3 | * Copyright (C) 2007 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/string.h> | ||
9 | #include <linux/delay.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/slab.h> | ||
12 | |||
13 | #include <asm/ldc.h> | ||
14 | #include <asm/vio.h> | ||
15 | |||
16 | int vio_ldc_send(struct vio_driver_state *vio, void *data, int len) | ||
17 | { | ||
18 | int err, limit = 1000; | ||
19 | |||
20 | err = -EINVAL; | ||
21 | while (limit-- > 0) { | ||
22 | err = ldc_write(vio->lp, data, len); | ||
23 | if (!err || (err != -EAGAIN)) | ||
24 | break; | ||
25 | udelay(1); | ||
26 | } | ||
27 | |||
28 | return err; | ||
29 | } | ||
30 | EXPORT_SYMBOL(vio_ldc_send); | ||
31 | |||
32 | static int send_ctrl(struct vio_driver_state *vio, | ||
33 | struct vio_msg_tag *tag, int len) | ||
34 | { | ||
35 | tag->sid = vio_send_sid(vio); | ||
36 | return vio_ldc_send(vio, tag, len); | ||
37 | } | ||
38 | |||
39 | static void init_tag(struct vio_msg_tag *tag, u8 type, u8 stype, u16 stype_env) | ||
40 | { | ||
41 | tag->type = type; | ||
42 | tag->stype = stype; | ||
43 | tag->stype_env = stype_env; | ||
44 | } | ||
45 | |||
46 | static int send_version(struct vio_driver_state *vio, u16 major, u16 minor) | ||
47 | { | ||
48 | struct vio_ver_info pkt; | ||
49 | |||
50 | vio->_local_sid = (u32) sched_clock(); | ||
51 | |||
52 | memset(&pkt, 0, sizeof(pkt)); | ||
53 | init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_VER_INFO); | ||
54 | pkt.major = major; | ||
55 | pkt.minor = minor; | ||
56 | pkt.dev_class = vio->dev_class; | ||
57 | |||
58 | viodbg(HS, "SEND VERSION INFO maj[%u] min[%u] devclass[%u]\n", | ||
59 | major, minor, vio->dev_class); | ||
60 | |||
61 | return send_ctrl(vio, &pkt.tag, sizeof(pkt)); | ||
62 | } | ||
63 | |||
64 | static int start_handshake(struct vio_driver_state *vio) | ||
65 | { | ||
66 | int err; | ||
67 | |||
68 | viodbg(HS, "START HANDSHAKE\n"); | ||
69 | |||
70 | vio->hs_state = VIO_HS_INVALID; | ||
71 | |||
72 | err = send_version(vio, | ||
73 | vio->ver_table[0].major, | ||
74 | vio->ver_table[0].minor); | ||
75 | if (err < 0) | ||
76 | return err; | ||
77 | |||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | static void flush_rx_dring(struct vio_driver_state *vio) | ||
82 | { | ||
83 | struct vio_dring_state *dr; | ||
84 | u64 ident; | ||
85 | |||
86 | BUG_ON(!(vio->dr_state & VIO_DR_STATE_RXREG)); | ||
87 | |||
88 | dr = &vio->drings[VIO_DRIVER_RX_RING]; | ||
89 | ident = dr->ident; | ||
90 | |||
91 | BUG_ON(!vio->desc_buf); | ||
92 | kfree(vio->desc_buf); | ||
93 | vio->desc_buf = NULL; | ||
94 | |||
95 | memset(dr, 0, sizeof(*dr)); | ||
96 | dr->ident = ident; | ||
97 | } | ||
98 | |||
99 | void vio_link_state_change(struct vio_driver_state *vio, int event) | ||
100 | { | ||
101 | if (event == LDC_EVENT_UP) { | ||
102 | vio->hs_state = VIO_HS_INVALID; | ||
103 | |||
104 | switch (vio->dev_class) { | ||
105 | case VDEV_NETWORK: | ||
106 | case VDEV_NETWORK_SWITCH: | ||
107 | vio->dr_state = (VIO_DR_STATE_TXREQ | | ||
108 | VIO_DR_STATE_RXREQ); | ||
109 | break; | ||
110 | |||
111 | case VDEV_DISK: | ||
112 | vio->dr_state = VIO_DR_STATE_TXREQ; | ||
113 | break; | ||
114 | case VDEV_DISK_SERVER: | ||
115 | vio->dr_state = VIO_DR_STATE_RXREQ; | ||
116 | break; | ||
117 | } | ||
118 | start_handshake(vio); | ||
119 | } else if (event == LDC_EVENT_RESET) { | ||
120 | vio->hs_state = VIO_HS_INVALID; | ||
121 | |||
122 | if (vio->dr_state & VIO_DR_STATE_RXREG) | ||
123 | flush_rx_dring(vio); | ||
124 | |||
125 | vio->dr_state = 0x00; | ||
126 | memset(&vio->ver, 0, sizeof(vio->ver)); | ||
127 | |||
128 | ldc_disconnect(vio->lp); | ||
129 | } | ||
130 | } | ||
131 | EXPORT_SYMBOL(vio_link_state_change); | ||
132 | |||
133 | static int handshake_failure(struct vio_driver_state *vio) | ||
134 | { | ||
135 | struct vio_dring_state *dr; | ||
136 | |||
137 | /* XXX Put policy here... Perhaps start a timer to fire | ||
138 | * XXX in 100 ms, which will bring the link up and retry | ||
139 | * XXX the handshake. | ||
140 | */ | ||
141 | |||
142 | viodbg(HS, "HANDSHAKE FAILURE\n"); | ||
143 | |||
144 | vio->dr_state &= ~(VIO_DR_STATE_TXREG | | ||
145 | VIO_DR_STATE_RXREG); | ||
146 | |||
147 | dr = &vio->drings[VIO_DRIVER_RX_RING]; | ||
148 | memset(dr, 0, sizeof(*dr)); | ||
149 | |||
150 | kfree(vio->desc_buf); | ||
151 | vio->desc_buf = NULL; | ||
152 | vio->desc_buf_len = 0; | ||
153 | |||
154 | vio->hs_state = VIO_HS_INVALID; | ||
155 | |||
156 | return -ECONNRESET; | ||
157 | } | ||
158 | |||
159 | static int process_unknown(struct vio_driver_state *vio, void *arg) | ||
160 | { | ||
161 | struct vio_msg_tag *pkt = arg; | ||
162 | |||
163 | viodbg(HS, "UNKNOWN CONTROL [%02x:%02x:%04x:%08x]\n", | ||
164 | pkt->type, pkt->stype, pkt->stype_env, pkt->sid); | ||
165 | |||
166 | printk(KERN_ERR "vio: ID[%lu] Resetting connection.\n", | ||
167 | vio->vdev->channel_id); | ||
168 | |||
169 | ldc_disconnect(vio->lp); | ||
170 | |||
171 | return -ECONNRESET; | ||
172 | } | ||
173 | |||
174 | static int send_dreg(struct vio_driver_state *vio) | ||
175 | { | ||
176 | struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_TX_RING]; | ||
177 | union { | ||
178 | struct vio_dring_register pkt; | ||
179 | char all[sizeof(struct vio_dring_register) + | ||
180 | (sizeof(struct ldc_trans_cookie) * | ||
181 | dr->ncookies)]; | ||
182 | } u; | ||
183 | int i; | ||
184 | |||
185 | memset(&u, 0, sizeof(u)); | ||
186 | init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG); | ||
187 | u.pkt.dring_ident = 0; | ||
188 | u.pkt.num_descr = dr->num_entries; | ||
189 | u.pkt.descr_size = dr->entry_size; | ||
190 | u.pkt.options = VIO_TX_DRING; | ||
191 | u.pkt.num_cookies = dr->ncookies; | ||
192 | |||
193 | viodbg(HS, "SEND DRING_REG INFO ndesc[%u] dsz[%u] opt[0x%x] " | ||
194 | "ncookies[%u]\n", | ||
195 | u.pkt.num_descr, u.pkt.descr_size, u.pkt.options, | ||
196 | u.pkt.num_cookies); | ||
197 | |||
198 | for (i = 0; i < dr->ncookies; i++) { | ||
199 | u.pkt.cookies[i] = dr->cookies[i]; | ||
200 | |||
201 | viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n", | ||
202 | i, | ||
203 | (unsigned long long) u.pkt.cookies[i].cookie_addr, | ||
204 | (unsigned long long) u.pkt.cookies[i].cookie_size); | ||
205 | } | ||
206 | |||
207 | return send_ctrl(vio, &u.pkt.tag, sizeof(u)); | ||
208 | } | ||
209 | |||
210 | static int send_rdx(struct vio_driver_state *vio) | ||
211 | { | ||
212 | struct vio_rdx pkt; | ||
213 | |||
214 | memset(&pkt, 0, sizeof(pkt)); | ||
215 | |||
216 | init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX); | ||
217 | |||
218 | viodbg(HS, "SEND RDX INFO\n"); | ||
219 | |||
220 | return send_ctrl(vio, &pkt.tag, sizeof(pkt)); | ||
221 | } | ||
222 | |||
223 | static int send_attr(struct vio_driver_state *vio) | ||
224 | { | ||
225 | return vio->ops->send_attr(vio); | ||
226 | } | ||
227 | |||
228 | static struct vio_version *find_by_major(struct vio_driver_state *vio, | ||
229 | u16 major) | ||
230 | { | ||
231 | struct vio_version *ret = NULL; | ||
232 | int i; | ||
233 | |||
234 | for (i = 0; i < vio->ver_table_entries; i++) { | ||
235 | struct vio_version *v = &vio->ver_table[i]; | ||
236 | if (v->major <= major) { | ||
237 | ret = v; | ||
238 | break; | ||
239 | } | ||
240 | } | ||
241 | return ret; | ||
242 | } | ||
243 | |||
244 | static int process_ver_info(struct vio_driver_state *vio, | ||
245 | struct vio_ver_info *pkt) | ||
246 | { | ||
247 | struct vio_version *vap; | ||
248 | int err; | ||
249 | |||
250 | viodbg(HS, "GOT VERSION INFO maj[%u] min[%u] devclass[%u]\n", | ||
251 | pkt->major, pkt->minor, pkt->dev_class); | ||
252 | |||
253 | if (vio->hs_state != VIO_HS_INVALID) { | ||
254 | /* XXX Perhaps invoke start_handshake? XXX */ | ||
255 | memset(&vio->ver, 0, sizeof(vio->ver)); | ||
256 | vio->hs_state = VIO_HS_INVALID; | ||
257 | } | ||
258 | |||
259 | vap = find_by_major(vio, pkt->major); | ||
260 | |||
261 | vio->_peer_sid = pkt->tag.sid; | ||
262 | |||
263 | if (!vap) { | ||
264 | pkt->tag.stype = VIO_SUBTYPE_NACK; | ||
265 | pkt->major = 0; | ||
266 | pkt->minor = 0; | ||
267 | viodbg(HS, "SEND VERSION NACK maj[0] min[0]\n"); | ||
268 | err = send_ctrl(vio, &pkt->tag, sizeof(*pkt)); | ||
269 | } else if (vap->major != pkt->major) { | ||
270 | pkt->tag.stype = VIO_SUBTYPE_NACK; | ||
271 | pkt->major = vap->major; | ||
272 | pkt->minor = vap->minor; | ||
273 | viodbg(HS, "SEND VERSION NACK maj[%u] min[%u]\n", | ||
274 | pkt->major, pkt->minor); | ||
275 | err = send_ctrl(vio, &pkt->tag, sizeof(*pkt)); | ||
276 | } else { | ||
277 | struct vio_version ver = { | ||
278 | .major = pkt->major, | ||
279 | .minor = pkt->minor, | ||
280 | }; | ||
281 | if (ver.minor > vap->minor) | ||
282 | ver.minor = vap->minor; | ||
283 | pkt->minor = ver.minor; | ||
284 | pkt->tag.stype = VIO_SUBTYPE_ACK; | ||
285 | viodbg(HS, "SEND VERSION ACK maj[%u] min[%u]\n", | ||
286 | pkt->major, pkt->minor); | ||
287 | err = send_ctrl(vio, &pkt->tag, sizeof(*pkt)); | ||
288 | if (err > 0) { | ||
289 | vio->ver = ver; | ||
290 | vio->hs_state = VIO_HS_GOTVERS; | ||
291 | } | ||
292 | } | ||
293 | if (err < 0) | ||
294 | return handshake_failure(vio); | ||
295 | |||
296 | return 0; | ||
297 | } | ||
298 | |||
299 | static int process_ver_ack(struct vio_driver_state *vio, | ||
300 | struct vio_ver_info *pkt) | ||
301 | { | ||
302 | viodbg(HS, "GOT VERSION ACK maj[%u] min[%u] devclass[%u]\n", | ||
303 | pkt->major, pkt->minor, pkt->dev_class); | ||
304 | |||
305 | if (vio->hs_state & VIO_HS_GOTVERS) { | ||
306 | if (vio->ver.major != pkt->major || | ||
307 | vio->ver.minor != pkt->minor) { | ||
308 | pkt->tag.stype = VIO_SUBTYPE_NACK; | ||
309 | (void) send_ctrl(vio, &pkt->tag, sizeof(*pkt)); | ||
310 | return handshake_failure(vio); | ||
311 | } | ||
312 | } else { | ||
313 | vio->ver.major = pkt->major; | ||
314 | vio->ver.minor = pkt->minor; | ||
315 | vio->hs_state = VIO_HS_GOTVERS; | ||
316 | } | ||
317 | |||
318 | switch (vio->dev_class) { | ||
319 | case VDEV_NETWORK: | ||
320 | case VDEV_DISK: | ||
321 | if (send_attr(vio) < 0) | ||
322 | return handshake_failure(vio); | ||
323 | break; | ||
324 | |||
325 | default: | ||
326 | break; | ||
327 | } | ||
328 | |||
329 | return 0; | ||
330 | } | ||
331 | |||
332 | static int process_ver_nack(struct vio_driver_state *vio, | ||
333 | struct vio_ver_info *pkt) | ||
334 | { | ||
335 | struct vio_version *nver; | ||
336 | |||
337 | viodbg(HS, "GOT VERSION NACK maj[%u] min[%u] devclass[%u]\n", | ||
338 | pkt->major, pkt->minor, pkt->dev_class); | ||
339 | |||
340 | if ((pkt->major == 0 && pkt->minor == 0) || | ||
341 | !(nver = find_by_major(vio, pkt->major))) | ||
342 | return handshake_failure(vio); | ||
343 | |||
344 | if (send_version(vio, nver->major, nver->minor) < 0) | ||
345 | return handshake_failure(vio); | ||
346 | |||
347 | return 0; | ||
348 | } | ||
349 | |||
350 | static int process_ver(struct vio_driver_state *vio, struct vio_ver_info *pkt) | ||
351 | { | ||
352 | switch (pkt->tag.stype) { | ||
353 | case VIO_SUBTYPE_INFO: | ||
354 | return process_ver_info(vio, pkt); | ||
355 | |||
356 | case VIO_SUBTYPE_ACK: | ||
357 | return process_ver_ack(vio, pkt); | ||
358 | |||
359 | case VIO_SUBTYPE_NACK: | ||
360 | return process_ver_nack(vio, pkt); | ||
361 | |||
362 | default: | ||
363 | return handshake_failure(vio); | ||
364 | }; | ||
365 | } | ||
366 | |||
367 | static int process_attr(struct vio_driver_state *vio, void *pkt) | ||
368 | { | ||
369 | int err; | ||
370 | |||
371 | if (!(vio->hs_state & VIO_HS_GOTVERS)) | ||
372 | return handshake_failure(vio); | ||
373 | |||
374 | err = vio->ops->handle_attr(vio, pkt); | ||
375 | if (err < 0) { | ||
376 | return handshake_failure(vio); | ||
377 | } else { | ||
378 | vio->hs_state |= VIO_HS_GOT_ATTR; | ||
379 | |||
380 | if ((vio->dr_state & VIO_DR_STATE_TXREQ) && | ||
381 | !(vio->hs_state & VIO_HS_SENT_DREG)) { | ||
382 | if (send_dreg(vio) < 0) | ||
383 | return handshake_failure(vio); | ||
384 | |||
385 | vio->hs_state |= VIO_HS_SENT_DREG; | ||
386 | } | ||
387 | } | ||
388 | return 0; | ||
389 | } | ||
390 | |||
391 | static int all_drings_registered(struct vio_driver_state *vio) | ||
392 | { | ||
393 | int need_rx, need_tx; | ||
394 | |||
395 | need_rx = (vio->dr_state & VIO_DR_STATE_RXREQ); | ||
396 | need_tx = (vio->dr_state & VIO_DR_STATE_TXREQ); | ||
397 | |||
398 | if (need_rx && | ||
399 | !(vio->dr_state & VIO_DR_STATE_RXREG)) | ||
400 | return 0; | ||
401 | |||
402 | if (need_tx && | ||
403 | !(vio->dr_state & VIO_DR_STATE_TXREG)) | ||
404 | return 0; | ||
405 | |||
406 | return 1; | ||
407 | } | ||
408 | |||
409 | static int process_dreg_info(struct vio_driver_state *vio, | ||
410 | struct vio_dring_register *pkt) | ||
411 | { | ||
412 | struct vio_dring_state *dr; | ||
413 | int i, len; | ||
414 | |||
415 | viodbg(HS, "GOT DRING_REG INFO ident[%llx] " | ||
416 | "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n", | ||
417 | (unsigned long long) pkt->dring_ident, | ||
418 | pkt->num_descr, pkt->descr_size, pkt->options, | ||
419 | pkt->num_cookies); | ||
420 | |||
421 | if (!(vio->dr_state & VIO_DR_STATE_RXREQ)) | ||
422 | goto send_nack; | ||
423 | |||
424 | if (vio->dr_state & VIO_DR_STATE_RXREG) | ||
425 | goto send_nack; | ||
426 | |||
427 | BUG_ON(vio->desc_buf); | ||
428 | |||
429 | vio->desc_buf = kzalloc(pkt->descr_size, GFP_ATOMIC); | ||
430 | if (!vio->desc_buf) | ||
431 | goto send_nack; | ||
432 | |||
433 | vio->desc_buf_len = pkt->descr_size; | ||
434 | |||
435 | dr = &vio->drings[VIO_DRIVER_RX_RING]; | ||
436 | |||
437 | dr->num_entries = pkt->num_descr; | ||
438 | dr->entry_size = pkt->descr_size; | ||
439 | dr->ncookies = pkt->num_cookies; | ||
440 | for (i = 0; i < dr->ncookies; i++) { | ||
441 | dr->cookies[i] = pkt->cookies[i]; | ||
442 | |||
443 | viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n", | ||
444 | i, | ||
445 | (unsigned long long) | ||
446 | pkt->cookies[i].cookie_addr, | ||
447 | (unsigned long long) | ||
448 | pkt->cookies[i].cookie_size); | ||
449 | } | ||
450 | |||
451 | pkt->tag.stype = VIO_SUBTYPE_ACK; | ||
452 | pkt->dring_ident = ++dr->ident; | ||
453 | |||
454 | viodbg(HS, "SEND DRING_REG ACK ident[%llx]\n", | ||
455 | (unsigned long long) pkt->dring_ident); | ||
456 | |||
457 | len = (sizeof(*pkt) + | ||
458 | (dr->ncookies * sizeof(struct ldc_trans_cookie))); | ||
459 | if (send_ctrl(vio, &pkt->tag, len) < 0) | ||
460 | goto send_nack; | ||
461 | |||
462 | vio->dr_state |= VIO_DR_STATE_RXREG; | ||
463 | |||
464 | return 0; | ||
465 | |||
466 | send_nack: | ||
467 | pkt->tag.stype = VIO_SUBTYPE_NACK; | ||
468 | viodbg(HS, "SEND DRING_REG NACK\n"); | ||
469 | (void) send_ctrl(vio, &pkt->tag, sizeof(*pkt)); | ||
470 | |||
471 | return handshake_failure(vio); | ||
472 | } | ||
473 | |||
474 | static int process_dreg_ack(struct vio_driver_state *vio, | ||
475 | struct vio_dring_register *pkt) | ||
476 | { | ||
477 | struct vio_dring_state *dr; | ||
478 | |||
479 | viodbg(HS, "GOT DRING_REG ACK ident[%llx] " | ||
480 | "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n", | ||
481 | (unsigned long long) pkt->dring_ident, | ||
482 | pkt->num_descr, pkt->descr_size, pkt->options, | ||
483 | pkt->num_cookies); | ||
484 | |||
485 | dr = &vio->drings[VIO_DRIVER_TX_RING]; | ||
486 | |||
487 | if (!(vio->dr_state & VIO_DR_STATE_TXREQ)) | ||
488 | return handshake_failure(vio); | ||
489 | |||
490 | dr->ident = pkt->dring_ident; | ||
491 | vio->dr_state |= VIO_DR_STATE_TXREG; | ||
492 | |||
493 | if (all_drings_registered(vio)) { | ||
494 | if (send_rdx(vio) < 0) | ||
495 | return handshake_failure(vio); | ||
496 | vio->hs_state = VIO_HS_SENT_RDX; | ||
497 | } | ||
498 | return 0; | ||
499 | } | ||
500 | |||
501 | static int process_dreg_nack(struct vio_driver_state *vio, | ||
502 | struct vio_dring_register *pkt) | ||
503 | { | ||
504 | viodbg(HS, "GOT DRING_REG NACK ident[%llx] " | ||
505 | "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n", | ||
506 | (unsigned long long) pkt->dring_ident, | ||
507 | pkt->num_descr, pkt->descr_size, pkt->options, | ||
508 | pkt->num_cookies); | ||
509 | |||
510 | return handshake_failure(vio); | ||
511 | } | ||
512 | |||
513 | static int process_dreg(struct vio_driver_state *vio, | ||
514 | struct vio_dring_register *pkt) | ||
515 | { | ||
516 | if (!(vio->hs_state & VIO_HS_GOTVERS)) | ||
517 | return handshake_failure(vio); | ||
518 | |||
519 | switch (pkt->tag.stype) { | ||
520 | case VIO_SUBTYPE_INFO: | ||
521 | return process_dreg_info(vio, pkt); | ||
522 | |||
523 | case VIO_SUBTYPE_ACK: | ||
524 | return process_dreg_ack(vio, pkt); | ||
525 | |||
526 | case VIO_SUBTYPE_NACK: | ||
527 | return process_dreg_nack(vio, pkt); | ||
528 | |||
529 | default: | ||
530 | return handshake_failure(vio); | ||
531 | } | ||
532 | } | ||
533 | |||
534 | static int process_dunreg(struct vio_driver_state *vio, | ||
535 | struct vio_dring_unregister *pkt) | ||
536 | { | ||
537 | struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_RX_RING]; | ||
538 | |||
539 | viodbg(HS, "GOT DRING_UNREG\n"); | ||
540 | |||
541 | if (pkt->dring_ident != dr->ident) | ||
542 | return 0; | ||
543 | |||
544 | vio->dr_state &= ~VIO_DR_STATE_RXREG; | ||
545 | |||
546 | memset(dr, 0, sizeof(*dr)); | ||
547 | |||
548 | kfree(vio->desc_buf); | ||
549 | vio->desc_buf = NULL; | ||
550 | vio->desc_buf_len = 0; | ||
551 | |||
552 | return 0; | ||
553 | } | ||
554 | |||
555 | static int process_rdx_info(struct vio_driver_state *vio, struct vio_rdx *pkt) | ||
556 | { | ||
557 | viodbg(HS, "GOT RDX INFO\n"); | ||
558 | |||
559 | pkt->tag.stype = VIO_SUBTYPE_ACK; | ||
560 | viodbg(HS, "SEND RDX ACK\n"); | ||
561 | if (send_ctrl(vio, &pkt->tag, sizeof(*pkt)) < 0) | ||
562 | return handshake_failure(vio); | ||
563 | |||
564 | vio->hs_state |= VIO_HS_SENT_RDX_ACK; | ||
565 | return 0; | ||
566 | } | ||
567 | |||
568 | static int process_rdx_ack(struct vio_driver_state *vio, struct vio_rdx *pkt) | ||
569 | { | ||
570 | viodbg(HS, "GOT RDX ACK\n"); | ||
571 | |||
572 | if (!(vio->hs_state & VIO_HS_SENT_RDX)) | ||
573 | return handshake_failure(vio); | ||
574 | |||
575 | vio->hs_state |= VIO_HS_GOT_RDX_ACK; | ||
576 | return 0; | ||
577 | } | ||
578 | |||
579 | static int process_rdx_nack(struct vio_driver_state *vio, struct vio_rdx *pkt) | ||
580 | { | ||
581 | viodbg(HS, "GOT RDX NACK\n"); | ||
582 | |||
583 | return handshake_failure(vio); | ||
584 | } | ||
585 | |||
586 | static int process_rdx(struct vio_driver_state *vio, struct vio_rdx *pkt) | ||
587 | { | ||
588 | if (!all_drings_registered(vio)) | ||
589 | handshake_failure(vio); | ||
590 | |||
591 | switch (pkt->tag.stype) { | ||
592 | case VIO_SUBTYPE_INFO: | ||
593 | return process_rdx_info(vio, pkt); | ||
594 | |||
595 | case VIO_SUBTYPE_ACK: | ||
596 | return process_rdx_ack(vio, pkt); | ||
597 | |||
598 | case VIO_SUBTYPE_NACK: | ||
599 | return process_rdx_nack(vio, pkt); | ||
600 | |||
601 | default: | ||
602 | return handshake_failure(vio); | ||
603 | } | ||
604 | } | ||
605 | |||
606 | int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt) | ||
607 | { | ||
608 | struct vio_msg_tag *tag = pkt; | ||
609 | u8 prev_state = vio->hs_state; | ||
610 | int err; | ||
611 | |||
612 | switch (tag->stype_env) { | ||
613 | case VIO_VER_INFO: | ||
614 | err = process_ver(vio, pkt); | ||
615 | break; | ||
616 | |||
617 | case VIO_ATTR_INFO: | ||
618 | err = process_attr(vio, pkt); | ||
619 | break; | ||
620 | |||
621 | case VIO_DRING_REG: | ||
622 | err = process_dreg(vio, pkt); | ||
623 | break; | ||
624 | |||
625 | case VIO_DRING_UNREG: | ||
626 | err = process_dunreg(vio, pkt); | ||
627 | break; | ||
628 | |||
629 | case VIO_RDX: | ||
630 | err = process_rdx(vio, pkt); | ||
631 | break; | ||
632 | |||
633 | default: | ||
634 | err = process_unknown(vio, pkt); | ||
635 | break; | ||
636 | } | ||
637 | if (!err && | ||
638 | vio->hs_state != prev_state && | ||
639 | (vio->hs_state & VIO_HS_COMPLETE)) | ||
640 | vio->ops->handshake_complete(vio); | ||
641 | |||
642 | return err; | ||
643 | } | ||
644 | EXPORT_SYMBOL(vio_control_pkt_engine); | ||
645 | |||
646 | void vio_conn_reset(struct vio_driver_state *vio) | ||
647 | { | ||
648 | } | ||
649 | EXPORT_SYMBOL(vio_conn_reset); | ||
650 | |||
651 | /* The issue is that the Solaris virtual disk server just mirrors the | ||
652 | * SID values it gets from the client peer. So we work around that | ||
653 | * here in vio_{validate,send}_sid() so that the drivers don't need | ||
654 | * to be aware of this crap. | ||
655 | */ | ||
656 | int vio_validate_sid(struct vio_driver_state *vio, struct vio_msg_tag *tp) | ||
657 | { | ||
658 | u32 sid; | ||
659 | |||
660 | /* Always let VERSION+INFO packets through unchecked, they | ||
661 | * define the new SID. | ||
662 | */ | ||
663 | if (tp->type == VIO_TYPE_CTRL && | ||
664 | tp->stype == VIO_SUBTYPE_INFO && | ||
665 | tp->stype_env == VIO_VER_INFO) | ||
666 | return 0; | ||
667 | |||
668 | /* Ok, now figure out which SID to use. */ | ||
669 | switch (vio->dev_class) { | ||
670 | case VDEV_NETWORK: | ||
671 | case VDEV_NETWORK_SWITCH: | ||
672 | case VDEV_DISK_SERVER: | ||
673 | default: | ||
674 | sid = vio->_peer_sid; | ||
675 | break; | ||
676 | |||
677 | case VDEV_DISK: | ||
678 | sid = vio->_local_sid; | ||
679 | break; | ||
680 | } | ||
681 | |||
682 | if (sid == tp->sid) | ||
683 | return 0; | ||
684 | viodbg(DATA, "BAD SID tag->sid[%08x] peer_sid[%08x] local_sid[%08x]\n", | ||
685 | tp->sid, vio->_peer_sid, vio->_local_sid); | ||
686 | return -EINVAL; | ||
687 | } | ||
688 | EXPORT_SYMBOL(vio_validate_sid); | ||
689 | |||
690 | u32 vio_send_sid(struct vio_driver_state *vio) | ||
691 | { | ||
692 | switch (vio->dev_class) { | ||
693 | case VDEV_NETWORK: | ||
694 | case VDEV_NETWORK_SWITCH: | ||
695 | case VDEV_DISK: | ||
696 | default: | ||
697 | return vio->_local_sid; | ||
698 | |||
699 | case VDEV_DISK_SERVER: | ||
700 | return vio->_peer_sid; | ||
701 | } | ||
702 | } | ||
703 | EXPORT_SYMBOL(vio_send_sid); | ||
704 | |||
705 | int vio_ldc_alloc(struct vio_driver_state *vio, | ||
706 | struct ldc_channel_config *base_cfg, | ||
707 | void *event_arg) | ||
708 | { | ||
709 | struct ldc_channel_config cfg = *base_cfg; | ||
710 | struct ldc_channel *lp; | ||
711 | |||
712 | cfg.tx_irq = vio->vdev->tx_irq; | ||
713 | cfg.rx_irq = vio->vdev->rx_irq; | ||
714 | |||
715 | lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg); | ||
716 | if (IS_ERR(lp)) | ||
717 | return PTR_ERR(lp); | ||
718 | |||
719 | vio->lp = lp; | ||
720 | |||
721 | return 0; | ||
722 | } | ||
723 | EXPORT_SYMBOL(vio_ldc_alloc); | ||
724 | |||
725 | void vio_ldc_free(struct vio_driver_state *vio) | ||
726 | { | ||
727 | ldc_free(vio->lp); | ||
728 | vio->lp = NULL; | ||
729 | |||
730 | kfree(vio->desc_buf); | ||
731 | vio->desc_buf = NULL; | ||
732 | vio->desc_buf_len = 0; | ||
733 | } | ||
734 | EXPORT_SYMBOL(vio_ldc_free); | ||
735 | |||
736 | void vio_port_up(struct vio_driver_state *vio) | ||
737 | { | ||
738 | unsigned long flags; | ||
739 | int err, state; | ||
740 | |||
741 | spin_lock_irqsave(&vio->lock, flags); | ||
742 | |||
743 | state = ldc_state(vio->lp); | ||
744 | |||
745 | err = 0; | ||
746 | if (state == LDC_STATE_INIT) { | ||
747 | err = ldc_bind(vio->lp, vio->name); | ||
748 | if (err) | ||
749 | printk(KERN_WARNING "%s: Port %lu bind failed, " | ||
750 | "err=%d\n", | ||
751 | vio->name, vio->vdev->channel_id, err); | ||
752 | } | ||
753 | |||
754 | if (!err) { | ||
755 | err = ldc_connect(vio->lp); | ||
756 | if (err) | ||
757 | printk(KERN_WARNING "%s: Port %lu connect failed, " | ||
758 | "err=%d\n", | ||
759 | vio->name, vio->vdev->channel_id, err); | ||
760 | } | ||
761 | if (err) { | ||
762 | unsigned long expires = jiffies + HZ; | ||
763 | |||
764 | expires = round_jiffies(expires); | ||
765 | mod_timer(&vio->timer, expires); | ||
766 | } | ||
767 | |||
768 | spin_unlock_irqrestore(&vio->lock, flags); | ||
769 | } | ||
770 | EXPORT_SYMBOL(vio_port_up); | ||
771 | |||
772 | static void vio_port_timer(unsigned long _arg) | ||
773 | { | ||
774 | struct vio_driver_state *vio = (struct vio_driver_state *) _arg; | ||
775 | |||
776 | vio_port_up(vio); | ||
777 | } | ||
778 | |||
779 | int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev, | ||
780 | u8 dev_class, struct vio_version *ver_table, | ||
781 | int ver_table_size, struct vio_driver_ops *ops, | ||
782 | char *name) | ||
783 | { | ||
784 | switch (dev_class) { | ||
785 | case VDEV_NETWORK: | ||
786 | case VDEV_NETWORK_SWITCH: | ||
787 | case VDEV_DISK: | ||
788 | case VDEV_DISK_SERVER: | ||
789 | break; | ||
790 | |||
791 | default: | ||
792 | return -EINVAL; | ||
793 | } | ||
794 | |||
795 | if (!ops->send_attr || | ||
796 | !ops->handle_attr || | ||
797 | !ops->handshake_complete) | ||
798 | return -EINVAL; | ||
799 | |||
800 | if (!ver_table || ver_table_size < 0) | ||
801 | return -EINVAL; | ||
802 | |||
803 | if (!name) | ||
804 | return -EINVAL; | ||
805 | |||
806 | spin_lock_init(&vio->lock); | ||
807 | |||
808 | vio->name = name; | ||
809 | |||
810 | vio->dev_class = dev_class; | ||
811 | vio->vdev = vdev; | ||
812 | |||
813 | vio->ver_table = ver_table; | ||
814 | vio->ver_table_entries = ver_table_size; | ||
815 | |||
816 | vio->ops = ops; | ||
817 | |||
818 | setup_timer(&vio->timer, vio_port_timer, (unsigned long) vio); | ||
819 | |||
820 | return 0; | ||
821 | } | ||
822 | EXPORT_SYMBOL(vio_driver_init); | ||
diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c new file mode 100644 index 000000000000..b956fd71c131 --- /dev/null +++ b/arch/sparc/kernel/visemul.c | |||
@@ -0,0 +1,890 @@ | |||
1 | /* visemul.c: Emulation of VIS instructions. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/errno.h> | ||
7 | #include <linux/thread_info.h> | ||
8 | |||
9 | #include <asm/ptrace.h> | ||
10 | #include <asm/pstate.h> | ||
11 | #include <asm/system.h> | ||
12 | #include <asm/fpumacro.h> | ||
13 | #include <asm/uaccess.h> | ||
14 | |||
15 | /* OPF field of various VIS instructions. */ | ||
16 | |||
17 | /* 000111011 - four 16-bit packs */ | ||
18 | #define FPACK16_OPF 0x03b | ||
19 | |||
20 | /* 000111010 - two 32-bit packs */ | ||
21 | #define FPACK32_OPF 0x03a | ||
22 | |||
23 | /* 000111101 - four 16-bit packs */ | ||
24 | #define FPACKFIX_OPF 0x03d | ||
25 | |||
26 | /* 001001101 - four 16-bit expands */ | ||
27 | #define FEXPAND_OPF 0x04d | ||
28 | |||
29 | /* 001001011 - two 32-bit merges */ | ||
30 | #define FPMERGE_OPF 0x04b | ||
31 | |||
32 | /* 000110001 - 8-by-16-bit partitoned product */ | ||
33 | #define FMUL8x16_OPF 0x031 | ||
34 | |||
35 | /* 000110011 - 8-by-16-bit upper alpha partitioned product */ | ||
36 | #define FMUL8x16AU_OPF 0x033 | ||
37 | |||
38 | /* 000110101 - 8-by-16-bit lower alpha partitioned product */ | ||
39 | #define FMUL8x16AL_OPF 0x035 | ||
40 | |||
41 | /* 000110110 - upper 8-by-16-bit partitioned product */ | ||
42 | #define FMUL8SUx16_OPF 0x036 | ||
43 | |||
44 | /* 000110111 - lower 8-by-16-bit partitioned product */ | ||
45 | #define FMUL8ULx16_OPF 0x037 | ||
46 | |||
47 | /* 000111000 - upper 8-by-16-bit partitioned product */ | ||
48 | #define FMULD8SUx16_OPF 0x038 | ||
49 | |||
50 | /* 000111001 - lower unsigned 8-by-16-bit partitioned product */ | ||
51 | #define FMULD8ULx16_OPF 0x039 | ||
52 | |||
53 | /* 000101000 - four 16-bit compare; set rd if src1 > src2 */ | ||
54 | #define FCMPGT16_OPF 0x028 | ||
55 | |||
56 | /* 000101100 - two 32-bit compare; set rd if src1 > src2 */ | ||
57 | #define FCMPGT32_OPF 0x02c | ||
58 | |||
59 | /* 000100000 - four 16-bit compare; set rd if src1 <= src2 */ | ||
60 | #define FCMPLE16_OPF 0x020 | ||
61 | |||
62 | /* 000100100 - two 32-bit compare; set rd if src1 <= src2 */ | ||
63 | #define FCMPLE32_OPF 0x024 | ||
64 | |||
65 | /* 000100010 - four 16-bit compare; set rd if src1 != src2 */ | ||
66 | #define FCMPNE16_OPF 0x022 | ||
67 | |||
68 | /* 000100110 - two 32-bit compare; set rd if src1 != src2 */ | ||
69 | #define FCMPNE32_OPF 0x026 | ||
70 | |||
71 | /* 000101010 - four 16-bit compare; set rd if src1 == src2 */ | ||
72 | #define FCMPEQ16_OPF 0x02a | ||
73 | |||
74 | /* 000101110 - two 32-bit compare; set rd if src1 == src2 */ | ||
75 | #define FCMPEQ32_OPF 0x02e | ||
76 | |||
77 | /* 000000000 - Eight 8-bit edge boundary processing */ | ||
78 | #define EDGE8_OPF 0x000 | ||
79 | |||
80 | /* 000000001 - Eight 8-bit edge boundary processing, no CC */ | ||
81 | #define EDGE8N_OPF 0x001 | ||
82 | |||
83 | /* 000000010 - Eight 8-bit edge boundary processing, little-endian */ | ||
84 | #define EDGE8L_OPF 0x002 | ||
85 | |||
86 | /* 000000011 - Eight 8-bit edge boundary processing, little-endian, no CC */ | ||
87 | #define EDGE8LN_OPF 0x003 | ||
88 | |||
89 | /* 000000100 - Four 16-bit edge boundary processing */ | ||
90 | #define EDGE16_OPF 0x004 | ||
91 | |||
92 | /* 000000101 - Four 16-bit edge boundary processing, no CC */ | ||
93 | #define EDGE16N_OPF 0x005 | ||
94 | |||
95 | /* 000000110 - Four 16-bit edge boundary processing, little-endian */ | ||
96 | #define EDGE16L_OPF 0x006 | ||
97 | |||
98 | /* 000000111 - Four 16-bit edge boundary processing, little-endian, no CC */ | ||
99 | #define EDGE16LN_OPF 0x007 | ||
100 | |||
101 | /* 000001000 - Two 32-bit edge boundary processing */ | ||
102 | #define EDGE32_OPF 0x008 | ||
103 | |||
104 | /* 000001001 - Two 32-bit edge boundary processing, no CC */ | ||
105 | #define EDGE32N_OPF 0x009 | ||
106 | |||
107 | /* 000001010 - Two 32-bit edge boundary processing, little-endian */ | ||
108 | #define EDGE32L_OPF 0x00a | ||
109 | |||
110 | /* 000001011 - Two 32-bit edge boundary processing, little-endian, no CC */ | ||
111 | #define EDGE32LN_OPF 0x00b | ||
112 | |||
113 | /* 000111110 - distance between 8 8-bit components */ | ||
114 | #define PDIST_OPF 0x03e | ||
115 | |||
116 | /* 000010000 - convert 8-bit 3-D address to blocked byte address */ | ||
117 | #define ARRAY8_OPF 0x010 | ||
118 | |||
119 | /* 000010010 - convert 16-bit 3-D address to blocked byte address */ | ||
120 | #define ARRAY16_OPF 0x012 | ||
121 | |||
122 | /* 000010100 - convert 32-bit 3-D address to blocked byte address */ | ||
123 | #define ARRAY32_OPF 0x014 | ||
124 | |||
125 | /* 000011001 - Set the GSR.MASK field in preparation for a BSHUFFLE */ | ||
126 | #define BMASK_OPF 0x019 | ||
127 | |||
128 | /* 001001100 - Permute bytes as specified by GSR.MASK */ | ||
129 | #define BSHUFFLE_OPF 0x04c | ||
130 | |||
131 | #define VIS_OPF_SHIFT 5 | ||
132 | #define VIS_OPF_MASK (0x1ff << VIS_OPF_SHIFT) | ||
133 | |||
134 | #define RS1(INSN) (((INSN) >> 14) & 0x1f) | ||
135 | #define RS2(INSN) (((INSN) >> 0) & 0x1f) | ||
136 | #define RD(INSN) (((INSN) >> 25) & 0x1f) | ||
137 | |||
138 | static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2, | ||
139 | unsigned int rd, int from_kernel) | ||
140 | { | ||
141 | if (rs2 >= 16 || rs1 >= 16 || rd >= 16) { | ||
142 | if (from_kernel != 0) | ||
143 | __asm__ __volatile__("flushw"); | ||
144 | else | ||
145 | flushw_user(); | ||
146 | } | ||
147 | } | ||
148 | |||
149 | static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) | ||
150 | { | ||
151 | unsigned long value; | ||
152 | |||
153 | if (reg < 16) | ||
154 | return (!reg ? 0 : regs->u_regs[reg]); | ||
155 | if (regs->tstate & TSTATE_PRIV) { | ||
156 | struct reg_window *win; | ||
157 | win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); | ||
158 | value = win->locals[reg - 16]; | ||
159 | } else if (test_thread_flag(TIF_32BIT)) { | ||
160 | struct reg_window32 __user *win32; | ||
161 | win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); | ||
162 | get_user(value, &win32->locals[reg - 16]); | ||
163 | } else { | ||
164 | struct reg_window __user *win; | ||
165 | win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); | ||
166 | get_user(value, &win->locals[reg - 16]); | ||
167 | } | ||
168 | return value; | ||
169 | } | ||
170 | |||
171 | static inline unsigned long __user *__fetch_reg_addr_user(unsigned int reg, | ||
172 | struct pt_regs *regs) | ||
173 | { | ||
174 | BUG_ON(reg < 16); | ||
175 | BUG_ON(regs->tstate & TSTATE_PRIV); | ||
176 | |||
177 | if (test_thread_flag(TIF_32BIT)) { | ||
178 | struct reg_window32 __user *win32; | ||
179 | win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); | ||
180 | return (unsigned long __user *)&win32->locals[reg - 16]; | ||
181 | } else { | ||
182 | struct reg_window __user *win; | ||
183 | win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); | ||
184 | return &win->locals[reg - 16]; | ||
185 | } | ||
186 | } | ||
187 | |||
188 | static inline unsigned long *__fetch_reg_addr_kern(unsigned int reg, | ||
189 | struct pt_regs *regs) | ||
190 | { | ||
191 | BUG_ON(reg >= 16); | ||
192 | BUG_ON(regs->tstate & TSTATE_PRIV); | ||
193 | |||
194 | return ®s->u_regs[reg]; | ||
195 | } | ||
196 | |||
197 | static void store_reg(struct pt_regs *regs, unsigned long val, unsigned long rd) | ||
198 | { | ||
199 | if (rd < 16) { | ||
200 | unsigned long *rd_kern = __fetch_reg_addr_kern(rd, regs); | ||
201 | |||
202 | *rd_kern = val; | ||
203 | } else { | ||
204 | unsigned long __user *rd_user = __fetch_reg_addr_user(rd, regs); | ||
205 | |||
206 | if (test_thread_flag(TIF_32BIT)) | ||
207 | __put_user((u32)val, (u32 __user *)rd_user); | ||
208 | else | ||
209 | __put_user(val, rd_user); | ||
210 | } | ||
211 | } | ||
212 | |||
213 | static inline unsigned long fpd_regval(struct fpustate *f, | ||
214 | unsigned int insn_regnum) | ||
215 | { | ||
216 | insn_regnum = (((insn_regnum & 1) << 5) | | ||
217 | (insn_regnum & 0x1e)); | ||
218 | |||
219 | return *(unsigned long *) &f->regs[insn_regnum]; | ||
220 | } | ||
221 | |||
222 | static inline unsigned long *fpd_regaddr(struct fpustate *f, | ||
223 | unsigned int insn_regnum) | ||
224 | { | ||
225 | insn_regnum = (((insn_regnum & 1) << 5) | | ||
226 | (insn_regnum & 0x1e)); | ||
227 | |||
228 | return (unsigned long *) &f->regs[insn_regnum]; | ||
229 | } | ||
230 | |||
231 | static inline unsigned int fps_regval(struct fpustate *f, | ||
232 | unsigned int insn_regnum) | ||
233 | { | ||
234 | return f->regs[insn_regnum]; | ||
235 | } | ||
236 | |||
237 | static inline unsigned int *fps_regaddr(struct fpustate *f, | ||
238 | unsigned int insn_regnum) | ||
239 | { | ||
240 | return &f->regs[insn_regnum]; | ||
241 | } | ||
242 | |||
243 | struct edge_tab { | ||
244 | u16 left, right; | ||
245 | }; | ||
246 | static struct edge_tab edge8_tab[8] = { | ||
247 | { 0xff, 0x80 }, | ||
248 | { 0x7f, 0xc0 }, | ||
249 | { 0x3f, 0xe0 }, | ||
250 | { 0x1f, 0xf0 }, | ||
251 | { 0x0f, 0xf8 }, | ||
252 | { 0x07, 0xfc }, | ||
253 | { 0x03, 0xfe }, | ||
254 | { 0x01, 0xff }, | ||
255 | }; | ||
256 | static struct edge_tab edge8_tab_l[8] = { | ||
257 | { 0xff, 0x01 }, | ||
258 | { 0xfe, 0x03 }, | ||
259 | { 0xfc, 0x07 }, | ||
260 | { 0xf8, 0x0f }, | ||
261 | { 0xf0, 0x1f }, | ||
262 | { 0xe0, 0x3f }, | ||
263 | { 0xc0, 0x7f }, | ||
264 | { 0x80, 0xff }, | ||
265 | }; | ||
266 | static struct edge_tab edge16_tab[4] = { | ||
267 | { 0xf, 0x8 }, | ||
268 | { 0x7, 0xc }, | ||
269 | { 0x3, 0xe }, | ||
270 | { 0x1, 0xf }, | ||
271 | }; | ||
272 | static struct edge_tab edge16_tab_l[4] = { | ||
273 | { 0xf, 0x1 }, | ||
274 | { 0xe, 0x3 }, | ||
275 | { 0xc, 0x7 }, | ||
276 | { 0x8, 0xf }, | ||
277 | }; | ||
278 | static struct edge_tab edge32_tab[2] = { | ||
279 | { 0x3, 0x2 }, | ||
280 | { 0x1, 0x3 }, | ||
281 | }; | ||
282 | static struct edge_tab edge32_tab_l[2] = { | ||
283 | { 0x3, 0x1 }, | ||
284 | { 0x2, 0x3 }, | ||
285 | }; | ||
286 | |||
287 | static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf) | ||
288 | { | ||
289 | unsigned long orig_rs1, rs1, orig_rs2, rs2, rd_val; | ||
290 | u16 left, right; | ||
291 | |||
292 | maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0); | ||
293 | orig_rs1 = rs1 = fetch_reg(RS1(insn), regs); | ||
294 | orig_rs2 = rs2 = fetch_reg(RS2(insn), regs); | ||
295 | |||
296 | if (test_thread_flag(TIF_32BIT)) { | ||
297 | rs1 = rs1 & 0xffffffff; | ||
298 | rs2 = rs2 & 0xffffffff; | ||
299 | } | ||
300 | switch (opf) { | ||
301 | default: | ||
302 | case EDGE8_OPF: | ||
303 | case EDGE8N_OPF: | ||
304 | left = edge8_tab[rs1 & 0x7].left; | ||
305 | right = edge8_tab[rs2 & 0x7].right; | ||
306 | break; | ||
307 | case EDGE8L_OPF: | ||
308 | case EDGE8LN_OPF: | ||
309 | left = edge8_tab_l[rs1 & 0x7].left; | ||
310 | right = edge8_tab_l[rs2 & 0x7].right; | ||
311 | break; | ||
312 | |||
313 | case EDGE16_OPF: | ||
314 | case EDGE16N_OPF: | ||
315 | left = edge16_tab[(rs1 >> 1) & 0x3].left; | ||
316 | right = edge16_tab[(rs2 >> 1) & 0x3].right; | ||
317 | break; | ||
318 | |||
319 | case EDGE16L_OPF: | ||
320 | case EDGE16LN_OPF: | ||
321 | left = edge16_tab_l[(rs1 >> 1) & 0x3].left; | ||
322 | right = edge16_tab_l[(rs2 >> 1) & 0x3].right; | ||
323 | break; | ||
324 | |||
325 | case EDGE32_OPF: | ||
326 | case EDGE32N_OPF: | ||
327 | left = edge32_tab[(rs1 >> 2) & 0x1].left; | ||
328 | right = edge32_tab[(rs2 >> 2) & 0x1].right; | ||
329 | break; | ||
330 | |||
331 | case EDGE32L_OPF: | ||
332 | case EDGE32LN_OPF: | ||
333 | left = edge32_tab_l[(rs1 >> 2) & 0x1].left; | ||
334 | right = edge32_tab_l[(rs2 >> 2) & 0x1].right; | ||
335 | break; | ||
336 | }; | ||
337 | |||
338 | if ((rs1 & ~0x7UL) == (rs2 & ~0x7UL)) | ||
339 | rd_val = right & left; | ||
340 | else | ||
341 | rd_val = left; | ||
342 | |||
343 | store_reg(regs, rd_val, RD(insn)); | ||
344 | |||
345 | switch (opf) { | ||
346 | case EDGE8_OPF: | ||
347 | case EDGE8L_OPF: | ||
348 | case EDGE16_OPF: | ||
349 | case EDGE16L_OPF: | ||
350 | case EDGE32_OPF: | ||
351 | case EDGE32L_OPF: { | ||
352 | unsigned long ccr, tstate; | ||
353 | |||
354 | __asm__ __volatile__("subcc %1, %2, %%g0\n\t" | ||
355 | "rd %%ccr, %0" | ||
356 | : "=r" (ccr) | ||
357 | : "r" (orig_rs1), "r" (orig_rs2) | ||
358 | : "cc"); | ||
359 | tstate = regs->tstate & ~(TSTATE_XCC | TSTATE_ICC); | ||
360 | regs->tstate = tstate | (ccr << 32UL); | ||
361 | } | ||
362 | }; | ||
363 | } | ||
364 | |||
365 | static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf) | ||
366 | { | ||
367 | unsigned long rs1, rs2, rd_val; | ||
368 | unsigned int bits, bits_mask; | ||
369 | |||
370 | maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0); | ||
371 | rs1 = fetch_reg(RS1(insn), regs); | ||
372 | rs2 = fetch_reg(RS2(insn), regs); | ||
373 | |||
374 | bits = (rs2 > 5 ? 5 : rs2); | ||
375 | bits_mask = (1UL << bits) - 1UL; | ||
376 | |||
377 | rd_val = ((((rs1 >> 11) & 0x3) << 0) | | ||
378 | (((rs1 >> 33) & 0x3) << 2) | | ||
379 | (((rs1 >> 55) & 0x1) << 4) | | ||
380 | (((rs1 >> 13) & 0xf) << 5) | | ||
381 | (((rs1 >> 35) & 0xf) << 9) | | ||
382 | (((rs1 >> 56) & 0xf) << 13) | | ||
383 | (((rs1 >> 17) & bits_mask) << 17) | | ||
384 | (((rs1 >> 39) & bits_mask) << (17 + bits)) | | ||
385 | (((rs1 >> 60) & 0xf) << (17 + (2*bits)))); | ||
386 | |||
387 | switch (opf) { | ||
388 | case ARRAY16_OPF: | ||
389 | rd_val <<= 1; | ||
390 | break; | ||
391 | |||
392 | case ARRAY32_OPF: | ||
393 | rd_val <<= 2; | ||
394 | }; | ||
395 | |||
396 | store_reg(regs, rd_val, RD(insn)); | ||
397 | } | ||
398 | |||
399 | static void bmask(struct pt_regs *regs, unsigned int insn) | ||
400 | { | ||
401 | unsigned long rs1, rs2, rd_val, gsr; | ||
402 | |||
403 | maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0); | ||
404 | rs1 = fetch_reg(RS1(insn), regs); | ||
405 | rs2 = fetch_reg(RS2(insn), regs); | ||
406 | rd_val = rs1 + rs2; | ||
407 | |||
408 | store_reg(regs, rd_val, RD(insn)); | ||
409 | |||
410 | gsr = current_thread_info()->gsr[0] & 0xffffffff; | ||
411 | gsr |= rd_val << 32UL; | ||
412 | current_thread_info()->gsr[0] = gsr; | ||
413 | } | ||
414 | |||
415 | static void bshuffle(struct pt_regs *regs, unsigned int insn) | ||
416 | { | ||
417 | struct fpustate *f = FPUSTATE; | ||
418 | unsigned long rs1, rs2, rd_val; | ||
419 | unsigned long bmask, i; | ||
420 | |||
421 | bmask = current_thread_info()->gsr[0] >> 32UL; | ||
422 | |||
423 | rs1 = fpd_regval(f, RS1(insn)); | ||
424 | rs2 = fpd_regval(f, RS2(insn)); | ||
425 | |||
426 | rd_val = 0UL; | ||
427 | for (i = 0; i < 8; i++) { | ||
428 | unsigned long which = (bmask >> (i * 4)) & 0xf; | ||
429 | unsigned long byte; | ||
430 | |||
431 | if (which < 8) | ||
432 | byte = (rs1 >> (which * 8)) & 0xff; | ||
433 | else | ||
434 | byte = (rs2 >> ((which-8)*8)) & 0xff; | ||
435 | rd_val |= (byte << (i * 8)); | ||
436 | } | ||
437 | |||
438 | *fpd_regaddr(f, RD(insn)) = rd_val; | ||
439 | } | ||
440 | |||
441 | static void pdist(struct pt_regs *regs, unsigned int insn) | ||
442 | { | ||
443 | struct fpustate *f = FPUSTATE; | ||
444 | unsigned long rs1, rs2, *rd, rd_val; | ||
445 | unsigned long i; | ||
446 | |||
447 | rs1 = fpd_regval(f, RS1(insn)); | ||
448 | rs2 = fpd_regval(f, RS2(insn)); | ||
449 | rd = fpd_regaddr(f, RD(insn)); | ||
450 | |||
451 | rd_val = *rd; | ||
452 | |||
453 | for (i = 0; i < 8; i++) { | ||
454 | s16 s1, s2; | ||
455 | |||
456 | s1 = (rs1 >> (56 - (i * 8))) & 0xff; | ||
457 | s2 = (rs2 >> (56 - (i * 8))) & 0xff; | ||
458 | |||
459 | /* Absolute value of difference. */ | ||
460 | s1 -= s2; | ||
461 | if (s1 < 0) | ||
462 | s1 = ~s1 + 1; | ||
463 | |||
464 | rd_val += s1; | ||
465 | } | ||
466 | |||
467 | *rd = rd_val; | ||
468 | } | ||
469 | |||
470 | static void pformat(struct pt_regs *regs, unsigned int insn, unsigned int opf) | ||
471 | { | ||
472 | struct fpustate *f = FPUSTATE; | ||
473 | unsigned long rs1, rs2, gsr, scale, rd_val; | ||
474 | |||
475 | gsr = current_thread_info()->gsr[0]; | ||
476 | scale = (gsr >> 3) & (opf == FPACK16_OPF ? 0xf : 0x1f); | ||
477 | switch (opf) { | ||
478 | case FPACK16_OPF: { | ||
479 | unsigned long byte; | ||
480 | |||
481 | rs2 = fpd_regval(f, RS2(insn)); | ||
482 | rd_val = 0; | ||
483 | for (byte = 0; byte < 4; byte++) { | ||
484 | unsigned int val; | ||
485 | s16 src = (rs2 >> (byte * 16UL)) & 0xffffUL; | ||
486 | int scaled = src << scale; | ||
487 | int from_fixed = scaled >> 7; | ||
488 | |||
489 | val = ((from_fixed < 0) ? | ||
490 | 0 : | ||
491 | (from_fixed > 255) ? | ||
492 | 255 : from_fixed); | ||
493 | |||
494 | rd_val |= (val << (8 * byte)); | ||
495 | } | ||
496 | *fps_regaddr(f, RD(insn)) = rd_val; | ||
497 | break; | ||
498 | } | ||
499 | |||
500 | case FPACK32_OPF: { | ||
501 | unsigned long word; | ||
502 | |||
503 | rs1 = fpd_regval(f, RS1(insn)); | ||
504 | rs2 = fpd_regval(f, RS2(insn)); | ||
505 | rd_val = (rs1 << 8) & ~(0x000000ff000000ffUL); | ||
506 | for (word = 0; word < 2; word++) { | ||
507 | unsigned long val; | ||
508 | s32 src = (rs2 >> (word * 32UL)); | ||
509 | s64 scaled = src << scale; | ||
510 | s64 from_fixed = scaled >> 23; | ||
511 | |||
512 | val = ((from_fixed < 0) ? | ||
513 | 0 : | ||
514 | (from_fixed > 255) ? | ||
515 | 255 : from_fixed); | ||
516 | |||
517 | rd_val |= (val << (32 * word)); | ||
518 | } | ||
519 | *fpd_regaddr(f, RD(insn)) = rd_val; | ||
520 | break; | ||
521 | } | ||
522 | |||
523 | case FPACKFIX_OPF: { | ||
524 | unsigned long word; | ||
525 | |||
526 | rs2 = fpd_regval(f, RS2(insn)); | ||
527 | |||
528 | rd_val = 0; | ||
529 | for (word = 0; word < 2; word++) { | ||
530 | long val; | ||
531 | s32 src = (rs2 >> (word * 32UL)); | ||
532 | s64 scaled = src << scale; | ||
533 | s64 from_fixed = scaled >> 16; | ||
534 | |||
535 | val = ((from_fixed < -32768) ? | ||
536 | -32768 : | ||
537 | (from_fixed > 32767) ? | ||
538 | 32767 : from_fixed); | ||
539 | |||
540 | rd_val |= ((val & 0xffff) << (word * 16)); | ||
541 | } | ||
542 | *fps_regaddr(f, RD(insn)) = rd_val; | ||
543 | break; | ||
544 | } | ||
545 | |||
546 | case FEXPAND_OPF: { | ||
547 | unsigned long byte; | ||
548 | |||
549 | rs2 = fps_regval(f, RS2(insn)); | ||
550 | |||
551 | rd_val = 0; | ||
552 | for (byte = 0; byte < 4; byte++) { | ||
553 | unsigned long val; | ||
554 | u8 src = (rs2 >> (byte * 8)) & 0xff; | ||
555 | |||
556 | val = src << 4; | ||
557 | |||
558 | rd_val |= (val << (byte * 16)); | ||
559 | } | ||
560 | *fpd_regaddr(f, RD(insn)) = rd_val; | ||
561 | break; | ||
562 | } | ||
563 | |||
564 | case FPMERGE_OPF: { | ||
565 | rs1 = fps_regval(f, RS1(insn)); | ||
566 | rs2 = fps_regval(f, RS2(insn)); | ||
567 | |||
568 | rd_val = (((rs2 & 0x000000ff) << 0) | | ||
569 | ((rs1 & 0x000000ff) << 8) | | ||
570 | ((rs2 & 0x0000ff00) << 8) | | ||
571 | ((rs1 & 0x0000ff00) << 16) | | ||
572 | ((rs2 & 0x00ff0000) << 16) | | ||
573 | ((rs1 & 0x00ff0000) << 24) | | ||
574 | ((rs2 & 0xff000000) << 24) | | ||
575 | ((rs1 & 0xff000000) << 32)); | ||
576 | *fpd_regaddr(f, RD(insn)) = rd_val; | ||
577 | break; | ||
578 | } | ||
579 | }; | ||
580 | } | ||
581 | |||
582 | static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf) | ||
583 | { | ||
584 | struct fpustate *f = FPUSTATE; | ||
585 | unsigned long rs1, rs2, rd_val; | ||
586 | |||
587 | switch (opf) { | ||
588 | case FMUL8x16_OPF: { | ||
589 | unsigned long byte; | ||
590 | |||
591 | rs1 = fps_regval(f, RS1(insn)); | ||
592 | rs2 = fpd_regval(f, RS2(insn)); | ||
593 | |||
594 | rd_val = 0; | ||
595 | for (byte = 0; byte < 4; byte++) { | ||
596 | u16 src1 = (rs1 >> (byte * 8)) & 0x00ff; | ||
597 | s16 src2 = (rs2 >> (byte * 16)) & 0xffff; | ||
598 | u32 prod = src1 * src2; | ||
599 | u16 scaled = ((prod & 0x00ffff00) >> 8); | ||
600 | |||
601 | /* Round up. */ | ||
602 | if (prod & 0x80) | ||
603 | scaled++; | ||
604 | rd_val |= ((scaled & 0xffffUL) << (byte * 16UL)); | ||
605 | } | ||
606 | |||
607 | *fpd_regaddr(f, RD(insn)) = rd_val; | ||
608 | break; | ||
609 | } | ||
610 | |||
611 | case FMUL8x16AU_OPF: | ||
612 | case FMUL8x16AL_OPF: { | ||
613 | unsigned long byte; | ||
614 | s16 src2; | ||
615 | |||
616 | rs1 = fps_regval(f, RS1(insn)); | ||
617 | rs2 = fps_regval(f, RS2(insn)); | ||
618 | |||
619 | rd_val = 0; | ||
620 | src2 = (rs2 >> (opf == FMUL8x16AU_OPF) ? 16 : 0); | ||
621 | for (byte = 0; byte < 4; byte++) { | ||
622 | u16 src1 = (rs1 >> (byte * 8)) & 0x00ff; | ||
623 | u32 prod = src1 * src2; | ||
624 | u16 scaled = ((prod & 0x00ffff00) >> 8); | ||
625 | |||
626 | /* Round up. */ | ||
627 | if (prod & 0x80) | ||
628 | scaled++; | ||
629 | rd_val |= ((scaled & 0xffffUL) << (byte * 16UL)); | ||
630 | } | ||
631 | |||
632 | *fpd_regaddr(f, RD(insn)) = rd_val; | ||
633 | break; | ||
634 | } | ||
635 | |||
636 | case FMUL8SUx16_OPF: | ||
637 | case FMUL8ULx16_OPF: { | ||
638 | unsigned long byte, ushift; | ||
639 | |||
640 | rs1 = fpd_regval(f, RS1(insn)); | ||
641 | rs2 = fpd_regval(f, RS2(insn)); | ||
642 | |||
643 | rd_val = 0; | ||
644 | ushift = (opf == FMUL8SUx16_OPF) ? 8 : 0; | ||
645 | for (byte = 0; byte < 4; byte++) { | ||
646 | u16 src1; | ||
647 | s16 src2; | ||
648 | u32 prod; | ||
649 | u16 scaled; | ||
650 | |||
651 | src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff); | ||
652 | src2 = ((rs2 >> (16 * byte)) & 0xffff); | ||
653 | prod = src1 * src2; | ||
654 | scaled = ((prod & 0x00ffff00) >> 8); | ||
655 | |||
656 | /* Round up. */ | ||
657 | if (prod & 0x80) | ||
658 | scaled++; | ||
659 | rd_val |= ((scaled & 0xffffUL) << (byte * 16UL)); | ||
660 | } | ||
661 | |||
662 | *fpd_regaddr(f, RD(insn)) = rd_val; | ||
663 | break; | ||
664 | } | ||
665 | |||
666 | case FMULD8SUx16_OPF: | ||
667 | case FMULD8ULx16_OPF: { | ||
668 | unsigned long byte, ushift; | ||
669 | |||
670 | rs1 = fps_regval(f, RS1(insn)); | ||
671 | rs2 = fps_regval(f, RS2(insn)); | ||
672 | |||
673 | rd_val = 0; | ||
674 | ushift = (opf == FMULD8SUx16_OPF) ? 8 : 0; | ||
675 | for (byte = 0; byte < 2; byte++) { | ||
676 | u16 src1; | ||
677 | s16 src2; | ||
678 | u32 prod; | ||
679 | u16 scaled; | ||
680 | |||
681 | src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff); | ||
682 | src2 = ((rs2 >> (16 * byte)) & 0xffff); | ||
683 | prod = src1 * src2; | ||
684 | scaled = ((prod & 0x00ffff00) >> 8); | ||
685 | |||
686 | /* Round up. */ | ||
687 | if (prod & 0x80) | ||
688 | scaled++; | ||
689 | rd_val |= ((scaled & 0xffffUL) << | ||
690 | ((byte * 32UL) + 7UL)); | ||
691 | } | ||
692 | *fpd_regaddr(f, RD(insn)) = rd_val; | ||
693 | break; | ||
694 | } | ||
695 | }; | ||
696 | } | ||
697 | |||
698 | static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf) | ||
699 | { | ||
700 | struct fpustate *f = FPUSTATE; | ||
701 | unsigned long rs1, rs2, rd_val, i; | ||
702 | |||
703 | rs1 = fpd_regval(f, RS1(insn)); | ||
704 | rs2 = fpd_regval(f, RS2(insn)); | ||
705 | |||
706 | rd_val = 0; | ||
707 | |||
708 | switch (opf) { | ||
709 | case FCMPGT16_OPF: | ||
710 | for (i = 0; i < 4; i++) { | ||
711 | s16 a = (rs1 >> (i * 16)) & 0xffff; | ||
712 | s16 b = (rs2 >> (i * 16)) & 0xffff; | ||
713 | |||
714 | if (a > b) | ||
715 | rd_val |= 1 << i; | ||
716 | } | ||
717 | break; | ||
718 | |||
719 | case FCMPGT32_OPF: | ||
720 | for (i = 0; i < 2; i++) { | ||
721 | s32 a = (rs1 >> (i * 32)) & 0xffff; | ||
722 | s32 b = (rs2 >> (i * 32)) & 0xffff; | ||
723 | |||
724 | if (a > b) | ||
725 | rd_val |= 1 << i; | ||
726 | } | ||
727 | break; | ||
728 | |||
729 | case FCMPLE16_OPF: | ||
730 | for (i = 0; i < 4; i++) { | ||
731 | s16 a = (rs1 >> (i * 16)) & 0xffff; | ||
732 | s16 b = (rs2 >> (i * 16)) & 0xffff; | ||
733 | |||
734 | if (a <= b) | ||
735 | rd_val |= 1 << i; | ||
736 | } | ||
737 | break; | ||
738 | |||
739 | case FCMPLE32_OPF: | ||
740 | for (i = 0; i < 2; i++) { | ||
741 | s32 a = (rs1 >> (i * 32)) & 0xffff; | ||
742 | s32 b = (rs2 >> (i * 32)) & 0xffff; | ||
743 | |||
744 | if (a <= b) | ||
745 | rd_val |= 1 << i; | ||
746 | } | ||
747 | break; | ||
748 | |||
749 | case FCMPNE16_OPF: | ||
750 | for (i = 0; i < 4; i++) { | ||
751 | s16 a = (rs1 >> (i * 16)) & 0xffff; | ||
752 | s16 b = (rs2 >> (i * 16)) & 0xffff; | ||
753 | |||
754 | if (a != b) | ||
755 | rd_val |= 1 << i; | ||
756 | } | ||
757 | break; | ||
758 | |||
759 | case FCMPNE32_OPF: | ||
760 | for (i = 0; i < 2; i++) { | ||
761 | s32 a = (rs1 >> (i * 32)) & 0xffff; | ||
762 | s32 b = (rs2 >> (i * 32)) & 0xffff; | ||
763 | |||
764 | if (a != b) | ||
765 | rd_val |= 1 << i; | ||
766 | } | ||
767 | break; | ||
768 | |||
769 | case FCMPEQ16_OPF: | ||
770 | for (i = 0; i < 4; i++) { | ||
771 | s16 a = (rs1 >> (i * 16)) & 0xffff; | ||
772 | s16 b = (rs2 >> (i * 16)) & 0xffff; | ||
773 | |||
774 | if (a == b) | ||
775 | rd_val |= 1 << i; | ||
776 | } | ||
777 | break; | ||
778 | |||
779 | case FCMPEQ32_OPF: | ||
780 | for (i = 0; i < 2; i++) { | ||
781 | s32 a = (rs1 >> (i * 32)) & 0xffff; | ||
782 | s32 b = (rs2 >> (i * 32)) & 0xffff; | ||
783 | |||
784 | if (a == b) | ||
785 | rd_val |= 1 << i; | ||
786 | } | ||
787 | break; | ||
788 | }; | ||
789 | |||
790 | maybe_flush_windows(0, 0, RD(insn), 0); | ||
791 | store_reg(regs, rd_val, RD(insn)); | ||
792 | } | ||
793 | |||
794 | /* Emulate the VIS instructions which are not implemented in | ||
795 | * hardware on Niagara. | ||
796 | */ | ||
797 | int vis_emul(struct pt_regs *regs, unsigned int insn) | ||
798 | { | ||
799 | unsigned long pc = regs->tpc; | ||
800 | unsigned int opf; | ||
801 | |||
802 | BUG_ON(regs->tstate & TSTATE_PRIV); | ||
803 | |||
804 | if (test_thread_flag(TIF_32BIT)) | ||
805 | pc = (u32)pc; | ||
806 | |||
807 | if (get_user(insn, (u32 __user *) pc)) | ||
808 | return -EFAULT; | ||
809 | |||
810 | save_and_clear_fpu(); | ||
811 | |||
812 | opf = (insn & VIS_OPF_MASK) >> VIS_OPF_SHIFT; | ||
813 | switch (opf) { | ||
814 | default: | ||
815 | return -EINVAL; | ||
816 | |||
817 | /* Pixel Formatting Instructions. */ | ||
818 | case FPACK16_OPF: | ||
819 | case FPACK32_OPF: | ||
820 | case FPACKFIX_OPF: | ||
821 | case FEXPAND_OPF: | ||
822 | case FPMERGE_OPF: | ||
823 | pformat(regs, insn, opf); | ||
824 | break; | ||
825 | |||
826 | /* Partitioned Multiply Instructions */ | ||
827 | case FMUL8x16_OPF: | ||
828 | case FMUL8x16AU_OPF: | ||
829 | case FMUL8x16AL_OPF: | ||
830 | case FMUL8SUx16_OPF: | ||
831 | case FMUL8ULx16_OPF: | ||
832 | case FMULD8SUx16_OPF: | ||
833 | case FMULD8ULx16_OPF: | ||
834 | pmul(regs, insn, opf); | ||
835 | break; | ||
836 | |||
837 | /* Pixel Compare Instructions */ | ||
838 | case FCMPGT16_OPF: | ||
839 | case FCMPGT32_OPF: | ||
840 | case FCMPLE16_OPF: | ||
841 | case FCMPLE32_OPF: | ||
842 | case FCMPNE16_OPF: | ||
843 | case FCMPNE32_OPF: | ||
844 | case FCMPEQ16_OPF: | ||
845 | case FCMPEQ32_OPF: | ||
846 | pcmp(regs, insn, opf); | ||
847 | break; | ||
848 | |||
849 | /* Edge Handling Instructions */ | ||
850 | case EDGE8_OPF: | ||
851 | case EDGE8N_OPF: | ||
852 | case EDGE8L_OPF: | ||
853 | case EDGE8LN_OPF: | ||
854 | case EDGE16_OPF: | ||
855 | case EDGE16N_OPF: | ||
856 | case EDGE16L_OPF: | ||
857 | case EDGE16LN_OPF: | ||
858 | case EDGE32_OPF: | ||
859 | case EDGE32N_OPF: | ||
860 | case EDGE32L_OPF: | ||
861 | case EDGE32LN_OPF: | ||
862 | edge(regs, insn, opf); | ||
863 | break; | ||
864 | |||
865 | /* Pixel Component Distance */ | ||
866 | case PDIST_OPF: | ||
867 | pdist(regs, insn); | ||
868 | break; | ||
869 | |||
870 | /* Three-Dimensional Array Addressing Instructions */ | ||
871 | case ARRAY8_OPF: | ||
872 | case ARRAY16_OPF: | ||
873 | case ARRAY32_OPF: | ||
874 | array(regs, insn, opf); | ||
875 | break; | ||
876 | |||
877 | /* Byte Mask and Shuffle Instructions */ | ||
878 | case BMASK_OPF: | ||
879 | bmask(regs, insn); | ||
880 | break; | ||
881 | |||
882 | case BSHUFFLE_OPF: | ||
883 | bshuffle(regs, insn); | ||
884 | break; | ||
885 | }; | ||
886 | |||
887 | regs->tpc = regs->tnpc; | ||
888 | regs->tnpc += 4; | ||
889 | return 0; | ||
890 | } | ||
diff --git a/arch/sparc/kernel/winfixup.S b/arch/sparc/kernel/winfixup.S new file mode 100644 index 000000000000..a6b0863c27df --- /dev/null +++ b/arch/sparc/kernel/winfixup.S | |||
@@ -0,0 +1,156 @@ | |||
1 | /* winfixup.S: Handle cases where user stack pointer is found to be bogus. | ||
2 | * | ||
3 | * Copyright (C) 1997, 2006 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #include <asm/asi.h> | ||
7 | #include <asm/head.h> | ||
8 | #include <asm/page.h> | ||
9 | #include <asm/ptrace.h> | ||
10 | #include <asm/processor.h> | ||
11 | #include <asm/spitfire.h> | ||
12 | #include <asm/thread_info.h> | ||
13 | |||
14 | .text | ||
15 | |||
16 | /* It used to be the case that these register window fault | ||
17 | * handlers could run via the save and restore instructions | ||
18 | * done by the trap entry and exit code. They now do the | ||
19 | * window spill/fill by hand, so that case no longer can occur. | ||
20 | */ | ||
21 | |||
22 | .align 32 | ||
23 | fill_fixup: | ||
24 | TRAP_LOAD_THREAD_REG(%g6, %g1) | ||
25 | rdpr %tstate, %g1 | ||
26 | and %g1, TSTATE_CWP, %g1 | ||
27 | or %g4, FAULT_CODE_WINFIXUP, %g4 | ||
28 | stb %g4, [%g6 + TI_FAULT_CODE] | ||
29 | stx %g5, [%g6 + TI_FAULT_ADDR] | ||
30 | wrpr %g1, %cwp | ||
31 | ba,pt %xcc, etrap | ||
32 | rd %pc, %g7 | ||
33 | call do_sparc64_fault | ||
34 | add %sp, PTREGS_OFF, %o0 | ||
35 | ba,pt %xcc, rtrap | ||
36 | nop | ||
37 | |||
38 | /* Be very careful about usage of the trap globals here. | ||
39 | * You cannot touch %g5 as that has the fault information. | ||
40 | */ | ||
41 | spill_fixup: | ||
42 | spill_fixup_mna: | ||
43 | spill_fixup_dax: | ||
44 | TRAP_LOAD_THREAD_REG(%g6, %g1) | ||
45 | ldx [%g6 + TI_FLAGS], %g1 | ||
46 | andcc %g1, _TIF_32BIT, %g0 | ||
47 | ldub [%g6 + TI_WSAVED], %g1 | ||
48 | sll %g1, 3, %g3 | ||
49 | add %g6, %g3, %g3 | ||
50 | stx %sp, [%g3 + TI_RWIN_SPTRS] | ||
51 | sll %g1, 7, %g3 | ||
52 | bne,pt %xcc, 1f | ||
53 | add %g6, %g3, %g3 | ||
54 | stx %l0, [%g3 + TI_REG_WINDOW + 0x00] | ||
55 | stx %l1, [%g3 + TI_REG_WINDOW + 0x08] | ||
56 | stx %l2, [%g3 + TI_REG_WINDOW + 0x10] | ||
57 | stx %l3, [%g3 + TI_REG_WINDOW + 0x18] | ||
58 | stx %l4, [%g3 + TI_REG_WINDOW + 0x20] | ||
59 | stx %l5, [%g3 + TI_REG_WINDOW + 0x28] | ||
60 | stx %l6, [%g3 + TI_REG_WINDOW + 0x30] | ||
61 | stx %l7, [%g3 + TI_REG_WINDOW + 0x38] | ||
62 | stx %i0, [%g3 + TI_REG_WINDOW + 0x40] | ||
63 | stx %i1, [%g3 + TI_REG_WINDOW + 0x48] | ||
64 | stx %i2, [%g3 + TI_REG_WINDOW + 0x50] | ||
65 | stx %i3, [%g3 + TI_REG_WINDOW + 0x58] | ||
66 | stx %i4, [%g3 + TI_REG_WINDOW + 0x60] | ||
67 | stx %i5, [%g3 + TI_REG_WINDOW + 0x68] | ||
68 | stx %i6, [%g3 + TI_REG_WINDOW + 0x70] | ||
69 | ba,pt %xcc, 2f | ||
70 | stx %i7, [%g3 + TI_REG_WINDOW + 0x78] | ||
71 | 1: stw %l0, [%g3 + TI_REG_WINDOW + 0x00] | ||
72 | stw %l1, [%g3 + TI_REG_WINDOW + 0x04] | ||
73 | stw %l2, [%g3 + TI_REG_WINDOW + 0x08] | ||
74 | stw %l3, [%g3 + TI_REG_WINDOW + 0x0c] | ||
75 | stw %l4, [%g3 + TI_REG_WINDOW + 0x10] | ||
76 | stw %l5, [%g3 + TI_REG_WINDOW + 0x14] | ||
77 | stw %l6, [%g3 + TI_REG_WINDOW + 0x18] | ||
78 | stw %l7, [%g3 + TI_REG_WINDOW + 0x1c] | ||
79 | stw %i0, [%g3 + TI_REG_WINDOW + 0x20] | ||
80 | stw %i1, [%g3 + TI_REG_WINDOW + 0x24] | ||
81 | stw %i2, [%g3 + TI_REG_WINDOW + 0x28] | ||
82 | stw %i3, [%g3 + TI_REG_WINDOW + 0x2c] | ||
83 | stw %i4, [%g3 + TI_REG_WINDOW + 0x30] | ||
84 | stw %i5, [%g3 + TI_REG_WINDOW + 0x34] | ||
85 | stw %i6, [%g3 + TI_REG_WINDOW + 0x38] | ||
86 | stw %i7, [%g3 + TI_REG_WINDOW + 0x3c] | ||
87 | 2: add %g1, 1, %g1 | ||
88 | stb %g1, [%g6 + TI_WSAVED] | ||
89 | rdpr %tstate, %g1 | ||
90 | andcc %g1, TSTATE_PRIV, %g0 | ||
91 | saved | ||
92 | be,pn %xcc, 1f | ||
93 | and %g1, TSTATE_CWP, %g1 | ||
94 | retry | ||
95 | 1: mov FAULT_CODE_WRITE | FAULT_CODE_DTLB | FAULT_CODE_WINFIXUP, %g4 | ||
96 | stb %g4, [%g6 + TI_FAULT_CODE] | ||
97 | stx %g5, [%g6 + TI_FAULT_ADDR] | ||
98 | wrpr %g1, %cwp | ||
99 | ba,pt %xcc, etrap | ||
100 | rd %pc, %g7 | ||
101 | call do_sparc64_fault | ||
102 | add %sp, PTREGS_OFF, %o0 | ||
103 | ba,a,pt %xcc, rtrap | ||
104 | |||
105 | winfix_mna: | ||
106 | andn %g3, 0x7f, %g3 | ||
107 | add %g3, 0x78, %g3 | ||
108 | wrpr %g3, %tnpc | ||
109 | done | ||
110 | |||
111 | fill_fixup_mna: | ||
112 | rdpr %tstate, %g1 | ||
113 | and %g1, TSTATE_CWP, %g1 | ||
114 | wrpr %g1, %cwp | ||
115 | ba,pt %xcc, etrap | ||
116 | rd %pc, %g7 | ||
117 | sethi %hi(tlb_type), %g1 | ||
118 | lduw [%g1 + %lo(tlb_type)], %g1 | ||
119 | cmp %g1, 3 | ||
120 | bne,pt %icc, 1f | ||
121 | add %sp, PTREGS_OFF, %o0 | ||
122 | mov %l4, %o2 | ||
123 | call sun4v_do_mna | ||
124 | mov %l5, %o1 | ||
125 | ba,a,pt %xcc, rtrap | ||
126 | 1: mov %l4, %o1 | ||
127 | mov %l5, %o2 | ||
128 | call mem_address_unaligned | ||
129 | nop | ||
130 | ba,a,pt %xcc, rtrap | ||
131 | |||
132 | winfix_dax: | ||
133 | andn %g3, 0x7f, %g3 | ||
134 | add %g3, 0x74, %g3 | ||
135 | wrpr %g3, %tnpc | ||
136 | done | ||
137 | |||
138 | fill_fixup_dax: | ||
139 | rdpr %tstate, %g1 | ||
140 | and %g1, TSTATE_CWP, %g1 | ||
141 | wrpr %g1, %cwp | ||
142 | ba,pt %xcc, etrap | ||
143 | rd %pc, %g7 | ||
144 | sethi %hi(tlb_type), %g1 | ||
145 | mov %l4, %o1 | ||
146 | lduw [%g1 + %lo(tlb_type)], %g1 | ||
147 | mov %l5, %o2 | ||
148 | cmp %g1, 3 | ||
149 | bne,pt %icc, 1f | ||
150 | add %sp, PTREGS_OFF, %o0 | ||
151 | call sun4v_data_access_exception | ||
152 | nop | ||
153 | ba,a,pt %xcc, rtrap | ||
154 | 1: call spitfire_data_access_exception | ||
155 | nop | ||
156 | ba,a,pt %xcc, rtrap | ||