aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kernel/vmlinux.lds.S15
-rw-r--r--arch/arm/mach-l7200/core.c20
-rw-r--r--arch/arm/mach-pxa/corgi_lcd.c20
-rw-r--r--arch/arm/mach-pxa/generic.c5
-rw-r--r--arch/arm/mach-pxa/spitz.c4
-rw-r--r--arch/arm/mach-s3c2410/Kconfig1
-rw-r--r--arch/m32r/kernel/smp.c12
-rw-r--r--arch/mips/pci/fixup-tb0226.c33
-rw-r--r--arch/ppc64/kernel/pmac_setup.c3
-rw-r--r--arch/sparc64/kernel/pci_iommu.c363
-rw-r--r--arch/sparc64/kernel/pci_psycho.c44
-rw-r--r--arch/sparc64/kernel/pci_sabre.c39
-rw-r--r--arch/sparc64/kernel/pci_schizo.c57
-rw-r--r--arch/sparc64/kernel/smp.c7
-rw-r--r--arch/sparc64/mm/ultra.S16
-rw-r--r--arch/sparc64/prom/misc.c12
16 files changed, 247 insertions, 404 deletions
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 08e58ecd44be..0d5db5279c5c 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -89,13 +89,6 @@ SECTIONS
89 *(.got) /* Global offset table */ 89 *(.got) /* Global offset table */
90 } 90 }
91 91
92 . = ALIGN(16);
93 __ex_table : { /* Exception table */
94 __start___ex_table = .;
95 *(__ex_table)
96 __stop___ex_table = .;
97 }
98
99 RODATA 92 RODATA
100 93
101 _etext = .; /* End of text and rodata section */ 94 _etext = .; /* End of text and rodata section */
@@ -138,6 +131,14 @@ SECTIONS
138 *(.data.cacheline_aligned) 131 *(.data.cacheline_aligned)
139 132
140 /* 133 /*
134 * The exception fixup table (might need resorting at runtime)
135 */
136 . = ALIGN(32);
137 __start___ex_table = .;
138 *(__ex_table)
139 __stop___ex_table = .;
140
141 /*
141 * and the usual data section 142 * and the usual data section
142 */ 143 */
143 *(.data) 144 *(.data)
diff --git a/arch/arm/mach-l7200/core.c b/arch/arm/mach-l7200/core.c
index 5fd8c9f97f9a..03ed742ae2be 100644
--- a/arch/arm/mach-l7200/core.c
+++ b/arch/arm/mach-l7200/core.c
@@ -7,11 +7,17 @@
7 */ 7 */
8#include <linux/kernel.h> 8#include <linux/kernel.h>
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/device.h>
10 11
12#include <asm/types.h>
13#include <asm/irq.h>
14#include <asm/mach-types.h>
11#include <asm/hardware.h> 15#include <asm/hardware.h>
12#include <asm/page.h> 16#include <asm/page.h>
13 17
18#include <asm/mach/arch.h>
14#include <asm/mach/map.h> 19#include <asm/mach/map.h>
20#include <asm/mach/irq.h>
15 21
16/* 22/*
17 * IRQ base register 23 * IRQ base register
@@ -47,6 +53,12 @@ static void l7200_unmask_irq(unsigned int irq)
47{ 53{
48 IRQ_ENABLE = 1 << irq; 54 IRQ_ENABLE = 1 << irq;
49} 55}
56
57static struct irqchip l7200_irq_chip = {
58 .ack = l7200_mask_irq,
59 .mask = l7200_mask_irq,
60 .unmask = l7200_unmask_irq
61};
50 62
51static void __init l7200_init_irq(void) 63static void __init l7200_init_irq(void)
52{ 64{
@@ -56,11 +68,9 @@ static void __init l7200_init_irq(void)
56 FIQ_ENABLECLEAR = 0xffffffff; /* clear all fast interrupt enables */ 68 FIQ_ENABLECLEAR = 0xffffffff; /* clear all fast interrupt enables */
57 69
58 for (irq = 0; irq < NR_IRQS; irq++) { 70 for (irq = 0; irq < NR_IRQS; irq++) {
59 irq_desc[irq].valid = 1; 71 set_irq_chip(irq, &l7200_irq_chip);
60 irq_desc[irq].probe_ok = 1; 72 set_irq_flags(irq, IRQF_VALID);
61 irq_desc[irq].mask_ack = l7200_mask_irq; 73 set_irq_handler(irq, do_level_IRQ);
62 irq_desc[irq].mask = l7200_mask_irq;
63 irq_desc[irq].unmask = l7200_unmask_irq;
64 } 74 }
65 75
66 init_FIQ(); 76 init_FIQ();
diff --git a/arch/arm/mach-pxa/corgi_lcd.c b/arch/arm/mach-pxa/corgi_lcd.c
index c02ef7c0f7ef..850538fadece 100644
--- a/arch/arm/mach-pxa/corgi_lcd.c
+++ b/arch/arm/mach-pxa/corgi_lcd.c
@@ -467,6 +467,7 @@ void corgi_put_hsync(void)
467{ 467{
468 if (get_hsync_time) 468 if (get_hsync_time)
469 symbol_put(w100fb_get_hsynclen); 469 symbol_put(w100fb_get_hsynclen);
470 get_hsync_time = NULL;
470} 471}
471 472
472void corgi_wait_hsync(void) 473void corgi_wait_hsync(void)
@@ -476,20 +477,37 @@ void corgi_wait_hsync(void)
476#endif 477#endif
477 478
478#ifdef CONFIG_PXA_SHARP_Cxx00 479#ifdef CONFIG_PXA_SHARP_Cxx00
480static struct device *spitz_pxafb_dev;
481
482static int is_pxafb_device(struct device * dev, void * data)
483{
484 struct platform_device *pdev = container_of(dev, struct platform_device, dev);
485
486 return (strncmp(pdev->name, "pxa2xx-fb", 9) == 0);
487}
488
479unsigned long spitz_get_hsync_len(void) 489unsigned long spitz_get_hsync_len(void)
480{ 490{
491 if (!spitz_pxafb_dev) {
492 spitz_pxafb_dev = bus_find_device(&platform_bus_type, NULL, NULL, is_pxafb_device);
493 if (!spitz_pxafb_dev)
494 return 0;
495 }
481 if (!get_hsync_time) 496 if (!get_hsync_time)
482 get_hsync_time = symbol_get(pxafb_get_hsync_time); 497 get_hsync_time = symbol_get(pxafb_get_hsync_time);
483 if (!get_hsync_time) 498 if (!get_hsync_time)
484 return 0; 499 return 0;
485 500
486 return pxafb_get_hsync_time(&pxafb_device.dev); 501 return pxafb_get_hsync_time(spitz_pxafb_dev);
487} 502}
488 503
489void spitz_put_hsync(void) 504void spitz_put_hsync(void)
490{ 505{
506 put_device(spitz_pxafb_dev);
491 if (get_hsync_time) 507 if (get_hsync_time)
492 symbol_put(pxafb_get_hsync_time); 508 symbol_put(pxafb_get_hsync_time);
509 spitz_pxafb_dev = NULL;
510 get_hsync_time = NULL;
493} 511}
494 512
495void spitz_wait_hsync(void) 513void spitz_wait_hsync(void)
diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c
index d0660a8c4b70..d327c127eddb 100644
--- a/arch/arm/mach-pxa/generic.c
+++ b/arch/arm/mach-pxa/generic.c
@@ -208,6 +208,11 @@ static struct platform_device pxafb_device = {
208 .resource = pxafb_resources, 208 .resource = pxafb_resources,
209}; 209};
210 210
211void __init set_pxa_fb_parent(struct device *parent_dev)
212{
213 pxafb_device.dev.parent = parent_dev;
214}
215
211static struct platform_device ffuart_device = { 216static struct platform_device ffuart_device = {
212 .name = "pxa2xx-uart", 217 .name = "pxa2xx-uart",
213 .id = 0, 218 .id = 0,
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 568afe3d6e1a..d0ab428c2d7d 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -36,7 +36,6 @@
36#include <asm/arch/irq.h> 36#include <asm/arch/irq.h>
37#include <asm/arch/mmc.h> 37#include <asm/arch/mmc.h>
38#include <asm/arch/udc.h> 38#include <asm/arch/udc.h>
39#include <asm/arch/ohci.h>
40#include <asm/arch/pxafb.h> 39#include <asm/arch/pxafb.h>
41#include <asm/arch/akita.h> 40#include <asm/arch/akita.h>
42#include <asm/arch/spitz.h> 41#include <asm/arch/spitz.h>
@@ -304,7 +303,6 @@ static struct platform_device *devices[] __initdata = {
304 &spitzkbd_device, 303 &spitzkbd_device,
305 &spitzts_device, 304 &spitzts_device,
306 &spitzbl_device, 305 &spitzbl_device,
307 &spitzbattery_device,
308}; 306};
309 307
310static void __init common_init(void) 308static void __init common_init(void)
@@ -328,7 +326,7 @@ static void __init common_init(void)
328 326
329 platform_add_devices(devices, ARRAY_SIZE(devices)); 327 platform_add_devices(devices, ARRAY_SIZE(devices));
330 pxa_set_mci_info(&spitz_mci_platform_data); 328 pxa_set_mci_info(&spitz_mci_platform_data);
331 pxafb_device.dev.parent = &spitzssp_device.dev; 329 set_pxa_fb_parent(&spitzssp_device.dev);
332 set_pxa_fb_info(&spitz_pxafb_info); 330 set_pxa_fb_info(&spitz_pxafb_info);
333} 331}
334 332
diff --git a/arch/arm/mach-s3c2410/Kconfig b/arch/arm/mach-s3c2410/Kconfig
index 06807c6ee68a..c796bcdd6158 100644
--- a/arch/arm/mach-s3c2410/Kconfig
+++ b/arch/arm/mach-s3c2410/Kconfig
@@ -12,6 +12,7 @@ config MACH_ANUBIS
12config ARCH_BAST 12config ARCH_BAST
13 bool "Simtec Electronics BAST (EB2410ITX)" 13 bool "Simtec Electronics BAST (EB2410ITX)"
14 select CPU_S3C2410 14 select CPU_S3C2410
15 select ISA
15 help 16 help
16 Say Y here if you are using the Simtec Electronics EB2410ITX 17 Say Y here if you are using the Simtec Electronics EB2410ITX
17 development board (also known as BAST) 18 development board (also known as BAST)
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index a4576ac7e870..8b1f6eb76870 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -275,12 +275,14 @@ static void flush_tlb_all_ipi(void *info)
275 *==========================================================================*/ 275 *==========================================================================*/
276void smp_flush_tlb_mm(struct mm_struct *mm) 276void smp_flush_tlb_mm(struct mm_struct *mm)
277{ 277{
278 int cpu_id = smp_processor_id(); 278 int cpu_id;
279 cpumask_t cpu_mask; 279 cpumask_t cpu_mask;
280 unsigned long *mmc = &mm->context[cpu_id]; 280 unsigned long *mmc;
281 unsigned long flags; 281 unsigned long flags;
282 282
283 preempt_disable(); 283 preempt_disable();
284 cpu_id = smp_processor_id();
285 mmc = &mm->context[cpu_id];
284 cpu_mask = mm->cpu_vm_mask; 286 cpu_mask = mm->cpu_vm_mask;
285 cpu_clear(cpu_id, cpu_mask); 287 cpu_clear(cpu_id, cpu_mask);
286 288
@@ -343,12 +345,14 @@ void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
343void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va) 345void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
344{ 346{
345 struct mm_struct *mm = vma->vm_mm; 347 struct mm_struct *mm = vma->vm_mm;
346 int cpu_id = smp_processor_id(); 348 int cpu_id;
347 cpumask_t cpu_mask; 349 cpumask_t cpu_mask;
348 unsigned long *mmc = &mm->context[cpu_id]; 350 unsigned long *mmc;
349 unsigned long flags; 351 unsigned long flags;
350 352
351 preempt_disable(); 353 preempt_disable();
354 cpu_id = smp_processor_id();
355 mmc = &mm->context[cpu_id];
352 cpu_mask = mm->cpu_vm_mask; 356 cpu_mask = mm->cpu_vm_mask;
353 cpu_clear(cpu_id, cpu_mask); 357 cpu_clear(cpu_id, cpu_mask);
354 358
diff --git a/arch/mips/pci/fixup-tb0226.c b/arch/mips/pci/fixup-tb0226.c
index 61513d5d97da..b5d42b12de10 100644
--- a/arch/mips/pci/fixup-tb0226.c
+++ b/arch/mips/pci/fixup-tb0226.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * fixup-tb0226.c, The TANBAC TB0226 specific PCI fixups. 2 * fixup-tb0226.c, The TANBAC TB0226 specific PCI fixups.
3 * 3 *
4 * Copyright (C) 2002-2004 Yoichi Yuasa <yuasa@hh.iij4u.or.jp> 4 * Copyright (C) 2002-2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -20,6 +20,7 @@
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/pci.h> 21#include <linux/pci.h>
22 22
23#include <asm/vr41xx/giu.h>
23#include <asm/vr41xx/tb0226.h> 24#include <asm/vr41xx/tb0226.h>
24 25
25int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin) 26int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
@@ -29,42 +30,42 @@ int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
29 switch (slot) { 30 switch (slot) {
30 case 12: 31 case 12:
31 vr41xx_set_irq_trigger(GD82559_1_PIN, 32 vr41xx_set_irq_trigger(GD82559_1_PIN,
32 TRIGGER_LEVEL, 33 IRQ_TRIGGER_LEVEL,
33 SIGNAL_THROUGH); 34 IRQ_SIGNAL_THROUGH);
34 vr41xx_set_irq_level(GD82559_1_PIN, LEVEL_LOW); 35 vr41xx_set_irq_level(GD82559_1_PIN, IRQ_LEVEL_LOW);
35 irq = GD82559_1_IRQ; 36 irq = GD82559_1_IRQ;
36 break; 37 break;
37 case 13: 38 case 13:
38 vr41xx_set_irq_trigger(GD82559_2_PIN, 39 vr41xx_set_irq_trigger(GD82559_2_PIN,
39 TRIGGER_LEVEL, 40 IRQ_TRIGGER_LEVEL,
40 SIGNAL_THROUGH); 41 IRQ_SIGNAL_THROUGH);
41 vr41xx_set_irq_level(GD82559_2_PIN, LEVEL_LOW); 42 vr41xx_set_irq_level(GD82559_2_PIN, IRQ_LEVEL_LOW);
42 irq = GD82559_2_IRQ; 43 irq = GD82559_2_IRQ;
43 break; 44 break;
44 case 14: 45 case 14:
45 switch (pin) { 46 switch (pin) {
46 case 1: 47 case 1:
47 vr41xx_set_irq_trigger(UPD720100_INTA_PIN, 48 vr41xx_set_irq_trigger(UPD720100_INTA_PIN,
48 TRIGGER_LEVEL, 49 IRQ_TRIGGER_LEVEL,
49 SIGNAL_THROUGH); 50 IRQ_SIGNAL_THROUGH);
50 vr41xx_set_irq_level(UPD720100_INTA_PIN, 51 vr41xx_set_irq_level(UPD720100_INTA_PIN,
51 LEVEL_LOW); 52 IRQ_LEVEL_LOW);
52 irq = UPD720100_INTA_IRQ; 53 irq = UPD720100_INTA_IRQ;
53 break; 54 break;
54 case 2: 55 case 2:
55 vr41xx_set_irq_trigger(UPD720100_INTB_PIN, 56 vr41xx_set_irq_trigger(UPD720100_INTB_PIN,
56 TRIGGER_LEVEL, 57 IRQ_TRIGGER_LEVEL,
57 SIGNAL_THROUGH); 58 IRQ_SIGNAL_THROUGH);
58 vr41xx_set_irq_level(UPD720100_INTB_PIN, 59 vr41xx_set_irq_level(UPD720100_INTB_PIN,
59 LEVEL_LOW); 60 IRQ_LEVEL_LOW);
60 irq = UPD720100_INTB_IRQ; 61 irq = UPD720100_INTB_IRQ;
61 break; 62 break;
62 case 3: 63 case 3:
63 vr41xx_set_irq_trigger(UPD720100_INTC_PIN, 64 vr41xx_set_irq_trigger(UPD720100_INTC_PIN,
64 TRIGGER_LEVEL, 65 IRQ_TRIGGER_LEVEL,
65 SIGNAL_THROUGH); 66 IRQ_SIGNAL_THROUGH);
66 vr41xx_set_irq_level(UPD720100_INTC_PIN, 67 vr41xx_set_irq_level(UPD720100_INTC_PIN,
67 LEVEL_LOW); 68 IRQ_LEVEL_LOW);
68 irq = UPD720100_INTC_IRQ; 69 irq = UPD720100_INTC_IRQ;
69 break; 70 break;
70 default: 71 default:
diff --git a/arch/ppc64/kernel/pmac_setup.c b/arch/ppc64/kernel/pmac_setup.c
index 25755252067a..fa8121d53b89 100644
--- a/arch/ppc64/kernel/pmac_setup.c
+++ b/arch/ppc64/kernel/pmac_setup.c
@@ -115,7 +115,7 @@ static void __pmac pmac_show_cpuinfo(struct seq_file *m)
115 115
116 /* find motherboard type */ 116 /* find motherboard type */
117 seq_printf(m, "machine\t\t: "); 117 seq_printf(m, "machine\t\t: ");
118 np = find_devices("device-tree"); 118 np = of_find_node_by_path("/");
119 if (np != NULL) { 119 if (np != NULL) {
120 pp = (char *) get_property(np, "model", NULL); 120 pp = (char *) get_property(np, "model", NULL);
121 if (pp != NULL) 121 if (pp != NULL)
@@ -133,6 +133,7 @@ static void __pmac pmac_show_cpuinfo(struct seq_file *m)
133 } 133 }
134 seq_printf(m, "\n"); 134 seq_printf(m, "\n");
135 } 135 }
136 of_node_put(np);
136 } else 137 } else
137 seq_printf(m, "PowerMac\n"); 138 seq_printf(m, "PowerMac\n");
138 139
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c
index 425c60cfea19..a11910be1013 100644
--- a/arch/sparc64/kernel/pci_iommu.c
+++ b/arch/sparc64/kernel/pci_iommu.c
@@ -49,12 +49,6 @@ static void __iommu_flushall(struct pci_iommu *iommu)
49 49
50 /* Ensure completion of previous PIO writes. */ 50 /* Ensure completion of previous PIO writes. */
51 (void) pci_iommu_read(iommu->write_complete_reg); 51 (void) pci_iommu_read(iommu->write_complete_reg);
52
53 /* Now update everyone's flush point. */
54 for (entry = 0; entry < PBM_NCLUSTERS; entry++) {
55 iommu->alloc_info[entry].flush =
56 iommu->alloc_info[entry].next;
57 }
58} 52}
59 53
60#define IOPTE_CONSISTENT(CTX) \ 54#define IOPTE_CONSISTENT(CTX) \
@@ -80,120 +74,117 @@ static void inline iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte)
80 iopte_val(*iopte) = val; 74 iopte_val(*iopte) = val;
81} 75}
82 76
83void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize) 77/* Based largely upon the ppc64 iommu allocator. */
78static long pci_arena_alloc(struct pci_iommu *iommu, unsigned long npages)
84{ 79{
85 int i; 80 struct pci_iommu_arena *arena = &iommu->arena;
86 81 unsigned long n, i, start, end, limit;
87 tsbsize /= sizeof(iopte_t); 82 int pass;
88 83
89 for (i = 0; i < tsbsize; i++) 84 limit = arena->limit;
90 iopte_make_dummy(iommu, &iommu->page_table[i]); 85 start = arena->hint;
91} 86 pass = 0;
92 87
93static iopte_t *alloc_streaming_cluster(struct pci_iommu *iommu, unsigned long npages) 88again:
94{ 89 n = find_next_zero_bit(arena->map, limit, start);
95 iopte_t *iopte, *limit, *first; 90 end = n + npages;
96 unsigned long cnum, ent, flush_point; 91 if (unlikely(end >= limit)) {
97 92 if (likely(pass < 1)) {
98 cnum = 0; 93 limit = start;
99 while ((1UL << cnum) < npages) 94 start = 0;
100 cnum++; 95 __iommu_flushall(iommu);
101 iopte = (iommu->page_table + 96 pass++;
102 (cnum << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS))); 97 goto again;
103 98 } else {
104 if (cnum == 0) 99 /* Scanned the whole thing, give up. */
105 limit = (iommu->page_table + 100 return -1;
106 iommu->lowest_consistent_map);
107 else
108 limit = (iopte +
109 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
110
111 iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
112 flush_point = iommu->alloc_info[cnum].flush;
113
114 first = iopte;
115 for (;;) {
116 if (IOPTE_IS_DUMMY(iommu, iopte)) {
117 if ((iopte + (1 << cnum)) >= limit)
118 ent = 0;
119 else
120 ent = ent + 1;
121 iommu->alloc_info[cnum].next = ent;
122 if (ent == flush_point)
123 __iommu_flushall(iommu);
124 break;
125 } 101 }
126 iopte += (1 << cnum); 102 }
127 ent++; 103
128 if (iopte >= limit) { 104 for (i = n; i < end; i++) {
129 iopte = (iommu->page_table + 105 if (test_bit(i, arena->map)) {
130 (cnum << 106 start = i + 1;
131 (iommu->page_table_sz_bits - PBM_LOGCLUSTERS))); 107 goto again;
132 ent = 0;
133 } 108 }
134 if (ent == flush_point)
135 __iommu_flushall(iommu);
136 if (iopte == first)
137 goto bad;
138 } 109 }
139 110
140 /* I've got your streaming cluster right here buddy boy... */ 111 for (i = n; i < end; i++)
141 return iopte; 112 __set_bit(i, arena->map);
142 113
143bad: 114 arena->hint = end;
144 printk(KERN_EMERG "pci_iommu: alloc_streaming_cluster of npages(%ld) failed!\n", 115
145 npages); 116 return n;
146 return NULL;
147} 117}
148 118
149static void free_streaming_cluster(struct pci_iommu *iommu, dma_addr_t base, 119static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
150 unsigned long npages, unsigned long ctx)
151{ 120{
152 unsigned long cnum, ent; 121 unsigned long i;
153 122
154 cnum = 0; 123 for (i = base; i < (base + npages); i++)
155 while ((1UL << cnum) < npages) 124 __clear_bit(i, arena->map);
156 cnum++; 125}
157 126
158 ent = (base << (32 - IO_PAGE_SHIFT + PBM_LOGCLUSTERS - iommu->page_table_sz_bits)) 127void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask)
159 >> (32 + PBM_LOGCLUSTERS + cnum - iommu->page_table_sz_bits); 128{
129 unsigned long i, tsbbase, order, sz, num_tsb_entries;
130
131 num_tsb_entries = tsbsize / sizeof(iopte_t);
132
133 /* Setup initial software IOMMU state. */
134 spin_lock_init(&iommu->lock);
135 iommu->ctx_lowest_free = 1;
136 iommu->page_table_map_base = dma_offset;
137 iommu->dma_addr_mask = dma_addr_mask;
138
139 /* Allocate and initialize the free area map. */
140 sz = num_tsb_entries / 8;
141 sz = (sz + 7UL) & ~7UL;
142 iommu->arena.map = kmalloc(sz, GFP_KERNEL);
143 if (!iommu->arena.map) {
144 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
145 prom_halt();
146 }
147 memset(iommu->arena.map, 0, sz);
148 iommu->arena.limit = num_tsb_entries;
160 149
161 /* If the global flush might not have caught this entry, 150 /* Allocate and initialize the dummy page which we
162 * adjust the flush point such that we will flush before 151 * set inactive IO PTEs to point to.
163 * ever trying to reuse it.
164 */ 152 */
165#define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y))) 153 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
166 if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush)) 154 if (!iommu->dummy_page) {
167 iommu->alloc_info[cnum].flush = ent; 155 prom_printf("PCI_IOMMU: Error, gfp(dummy_page) failed.\n");
168#undef between 156 prom_halt();
157 }
158 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
159 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
160
161 /* Now allocate and setup the IOMMU page table itself. */
162 order = get_order(tsbsize);
163 tsbbase = __get_free_pages(GFP_KERNEL, order);
164 if (!tsbbase) {
165 prom_printf("PCI_IOMMU: Error, gfp(tsb) failed.\n");
166 prom_halt();
167 }
168 iommu->page_table = (iopte_t *)tsbbase;
169
170 for (i = 0; i < num_tsb_entries; i++)
171 iopte_make_dummy(iommu, &iommu->page_table[i]);
169} 172}
170 173
171/* We allocate consistent mappings from the end of cluster zero. */ 174static inline iopte_t *alloc_npages(struct pci_iommu *iommu, unsigned long npages)
172static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long npages)
173{ 175{
174 iopte_t *iopte; 176 long entry;
175 177
176 iopte = iommu->page_table + (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)); 178 entry = pci_arena_alloc(iommu, npages);
177 while (iopte > iommu->page_table) { 179 if (unlikely(entry < 0))
178 iopte--; 180 return NULL;
179 if (IOPTE_IS_DUMMY(iommu, iopte)) {
180 unsigned long tmp = npages;
181 181
182 while (--tmp) { 182 return iommu->page_table + entry;
183 iopte--; 183}
184 if (!IOPTE_IS_DUMMY(iommu, iopte))
185 break;
186 }
187 if (tmp == 0) {
188 u32 entry = (iopte - iommu->page_table);
189 184
190 if (entry < iommu->lowest_consistent_map) 185static inline void free_npages(struct pci_iommu *iommu, dma_addr_t base, unsigned long npages)
191 iommu->lowest_consistent_map = entry; 186{
192 return iopte; 187 pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
193 }
194 }
195 }
196 return NULL;
197} 188}
198 189
199static int iommu_alloc_ctx(struct pci_iommu *iommu) 190static int iommu_alloc_ctx(struct pci_iommu *iommu)
@@ -233,7 +224,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
233 struct pcidev_cookie *pcp; 224 struct pcidev_cookie *pcp;
234 struct pci_iommu *iommu; 225 struct pci_iommu *iommu;
235 iopte_t *iopte; 226 iopte_t *iopte;
236 unsigned long flags, order, first_page, ctx; 227 unsigned long flags, order, first_page;
237 void *ret; 228 void *ret;
238 int npages; 229 int npages;
239 230
@@ -251,9 +242,10 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
251 iommu = pcp->pbm->iommu; 242 iommu = pcp->pbm->iommu;
252 243
253 spin_lock_irqsave(&iommu->lock, flags); 244 spin_lock_irqsave(&iommu->lock, flags);
254 iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT); 245 iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
255 if (iopte == NULL) { 246 spin_unlock_irqrestore(&iommu->lock, flags);
256 spin_unlock_irqrestore(&iommu->lock, flags); 247
248 if (unlikely(iopte == NULL)) {
257 free_pages(first_page, order); 249 free_pages(first_page, order);
258 return NULL; 250 return NULL;
259 } 251 }
@@ -262,31 +254,15 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
262 ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); 254 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
263 ret = (void *) first_page; 255 ret = (void *) first_page;
264 npages = size >> IO_PAGE_SHIFT; 256 npages = size >> IO_PAGE_SHIFT;
265 ctx = 0;
266 if (iommu->iommu_ctxflush)
267 ctx = iommu_alloc_ctx(iommu);
268 first_page = __pa(first_page); 257 first_page = __pa(first_page);
269 while (npages--) { 258 while (npages--) {
270 iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) | 259 iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
271 IOPTE_WRITE | 260 IOPTE_WRITE |
272 (first_page & IOPTE_PAGE)); 261 (first_page & IOPTE_PAGE));
273 iopte++; 262 iopte++;
274 first_page += IO_PAGE_SIZE; 263 first_page += IO_PAGE_SIZE;
275 } 264 }
276 265
277 {
278 int i;
279 u32 daddr = *dma_addrp;
280
281 npages = size >> IO_PAGE_SHIFT;
282 for (i = 0; i < npages; i++) {
283 pci_iommu_write(iommu->iommu_flush, daddr);
284 daddr += IO_PAGE_SIZE;
285 }
286 }
287
288 spin_unlock_irqrestore(&iommu->lock, flags);
289
290 return ret; 266 return ret;
291} 267}
292 268
@@ -296,7 +272,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_
296 struct pcidev_cookie *pcp; 272 struct pcidev_cookie *pcp;
297 struct pci_iommu *iommu; 273 struct pci_iommu *iommu;
298 iopte_t *iopte; 274 iopte_t *iopte;
299 unsigned long flags, order, npages, i, ctx; 275 unsigned long flags, order, npages;
300 276
301 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 277 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
302 pcp = pdev->sysdata; 278 pcp = pdev->sysdata;
@@ -306,46 +282,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_
306 282
307 spin_lock_irqsave(&iommu->lock, flags); 283 spin_lock_irqsave(&iommu->lock, flags);
308 284
309 if ((iopte - iommu->page_table) == 285 free_npages(iommu, dvma, npages);
310 iommu->lowest_consistent_map) {
311 iopte_t *walk = iopte + npages;
312 iopte_t *limit;
313
314 limit = (iommu->page_table +
315 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
316 while (walk < limit) {
317 if (!IOPTE_IS_DUMMY(iommu, walk))
318 break;
319 walk++;
320 }
321 iommu->lowest_consistent_map =
322 (walk - iommu->page_table);
323 }
324
325 /* Data for consistent mappings cannot enter the streaming
326 * buffers, so we only need to update the TSB. We flush
327 * the IOMMU here as well to prevent conflicts with the
328 * streaming mapping deferred tlb flush scheme.
329 */
330
331 ctx = 0;
332 if (iommu->iommu_ctxflush)
333 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
334
335 for (i = 0; i < npages; i++, iopte++)
336 iopte_make_dummy(iommu, iopte);
337
338 if (iommu->iommu_ctxflush) {
339 pci_iommu_write(iommu->iommu_ctxflush, ctx);
340 } else {
341 for (i = 0; i < npages; i++) {
342 u32 daddr = dvma + (i << IO_PAGE_SHIFT);
343
344 pci_iommu_write(iommu->iommu_flush, daddr);
345 }
346 }
347
348 iommu_free_ctx(iommu, ctx);
349 286
350 spin_unlock_irqrestore(&iommu->lock, flags); 287 spin_unlock_irqrestore(&iommu->lock, flags);
351 288
@@ -372,25 +309,27 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct
372 iommu = pcp->pbm->iommu; 309 iommu = pcp->pbm->iommu;
373 strbuf = &pcp->pbm->stc; 310 strbuf = &pcp->pbm->stc;
374 311
375 if (direction == PCI_DMA_NONE) 312 if (unlikely(direction == PCI_DMA_NONE))
376 BUG(); 313 goto bad_no_ctx;
377 314
378 oaddr = (unsigned long)ptr; 315 oaddr = (unsigned long)ptr;
379 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); 316 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
380 npages >>= IO_PAGE_SHIFT; 317 npages >>= IO_PAGE_SHIFT;
381 318
382 spin_lock_irqsave(&iommu->lock, flags); 319 spin_lock_irqsave(&iommu->lock, flags);
320 base = alloc_npages(iommu, npages);
321 ctx = 0;
322 if (iommu->iommu_ctxflush)
323 ctx = iommu_alloc_ctx(iommu);
324 spin_unlock_irqrestore(&iommu->lock, flags);
383 325
384 base = alloc_streaming_cluster(iommu, npages); 326 if (unlikely(!base))
385 if (base == NULL)
386 goto bad; 327 goto bad;
328
387 bus_addr = (iommu->page_table_map_base + 329 bus_addr = (iommu->page_table_map_base +
388 ((base - iommu->page_table) << IO_PAGE_SHIFT)); 330 ((base - iommu->page_table) << IO_PAGE_SHIFT));
389 ret = bus_addr | (oaddr & ~IO_PAGE_MASK); 331 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
390 base_paddr = __pa(oaddr & IO_PAGE_MASK); 332 base_paddr = __pa(oaddr & IO_PAGE_MASK);
391 ctx = 0;
392 if (iommu->iommu_ctxflush)
393 ctx = iommu_alloc_ctx(iommu);
394 if (strbuf->strbuf_enabled) 333 if (strbuf->strbuf_enabled)
395 iopte_protection = IOPTE_STREAMING(ctx); 334 iopte_protection = IOPTE_STREAMING(ctx);
396 else 335 else
@@ -401,12 +340,13 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct
401 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE) 340 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
402 iopte_val(*base) = iopte_protection | base_paddr; 341 iopte_val(*base) = iopte_protection | base_paddr;
403 342
404 spin_unlock_irqrestore(&iommu->lock, flags);
405
406 return ret; 343 return ret;
407 344
408bad: 345bad:
409 spin_unlock_irqrestore(&iommu->lock, flags); 346 iommu_free_ctx(iommu, ctx);
347bad_no_ctx:
348 if (printk_ratelimit())
349 WARN_ON(1);
410 return PCI_DMA_ERROR_CODE; 350 return PCI_DMA_ERROR_CODE;
411} 351}
412 352
@@ -481,10 +421,13 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
481 struct pci_iommu *iommu; 421 struct pci_iommu *iommu;
482 struct pci_strbuf *strbuf; 422 struct pci_strbuf *strbuf;
483 iopte_t *base; 423 iopte_t *base;
484 unsigned long flags, npages, ctx; 424 unsigned long flags, npages, ctx, i;
485 425
486 if (direction == PCI_DMA_NONE) 426 if (unlikely(direction == PCI_DMA_NONE)) {
487 BUG(); 427 if (printk_ratelimit())
428 WARN_ON(1);
429 return;
430 }
488 431
489 pcp = pdev->sysdata; 432 pcp = pdev->sysdata;
490 iommu = pcp->pbm->iommu; 433 iommu = pcp->pbm->iommu;
@@ -510,13 +453,14 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
510 453
511 /* Step 1: Kick data out of streaming buffers if necessary. */ 454 /* Step 1: Kick data out of streaming buffers if necessary. */
512 if (strbuf->strbuf_enabled) 455 if (strbuf->strbuf_enabled)
513 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); 456 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx,
457 npages, direction);
514 458
515 /* Step 2: Clear out first TSB entry. */ 459 /* Step 2: Clear out TSB entries. */
516 iopte_make_dummy(iommu, base); 460 for (i = 0; i < npages; i++)
461 iopte_make_dummy(iommu, base + i);
517 462
518 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, 463 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
519 npages, ctx);
520 464
521 iommu_free_ctx(iommu, ctx); 465 iommu_free_ctx(iommu, ctx);
522 466
@@ -621,6 +565,8 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
621 pci_map_single(pdev, 565 pci_map_single(pdev,
622 (page_address(sglist->page) + sglist->offset), 566 (page_address(sglist->page) + sglist->offset),
623 sglist->length, direction); 567 sglist->length, direction);
568 if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
569 return 0;
624 sglist->dma_length = sglist->length; 570 sglist->dma_length = sglist->length;
625 return 1; 571 return 1;
626 } 572 }
@@ -629,21 +575,29 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
629 iommu = pcp->pbm->iommu; 575 iommu = pcp->pbm->iommu;
630 strbuf = &pcp->pbm->stc; 576 strbuf = &pcp->pbm->stc;
631 577
632 if (direction == PCI_DMA_NONE) 578 if (unlikely(direction == PCI_DMA_NONE))
633 BUG(); 579 goto bad_no_ctx;
634 580
635 /* Step 1: Prepare scatter list. */ 581 /* Step 1: Prepare scatter list. */
636 582
637 npages = prepare_sg(sglist, nelems); 583 npages = prepare_sg(sglist, nelems);
638 584
639 /* Step 2: Allocate a cluster. */ 585 /* Step 2: Allocate a cluster and context, if necessary. */
640 586
641 spin_lock_irqsave(&iommu->lock, flags); 587 spin_lock_irqsave(&iommu->lock, flags);
642 588
643 base = alloc_streaming_cluster(iommu, npages); 589 base = alloc_npages(iommu, npages);
590 ctx = 0;
591 if (iommu->iommu_ctxflush)
592 ctx = iommu_alloc_ctx(iommu);
593
594 spin_unlock_irqrestore(&iommu->lock, flags);
595
644 if (base == NULL) 596 if (base == NULL)
645 goto bad; 597 goto bad;
646 dma_base = iommu->page_table_map_base + ((base - iommu->page_table) << IO_PAGE_SHIFT); 598
599 dma_base = iommu->page_table_map_base +
600 ((base - iommu->page_table) << IO_PAGE_SHIFT);
647 601
648 /* Step 3: Normalize DMA addresses. */ 602 /* Step 3: Normalize DMA addresses. */
649 used = nelems; 603 used = nelems;
@@ -656,30 +610,28 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
656 } 610 }
657 used = nelems - used; 611 used = nelems - used;
658 612
659 /* Step 4: Choose a context if necessary. */ 613 /* Step 4: Create the mappings. */
660 ctx = 0;
661 if (iommu->iommu_ctxflush)
662 ctx = iommu_alloc_ctx(iommu);
663
664 /* Step 5: Create the mappings. */
665 if (strbuf->strbuf_enabled) 614 if (strbuf->strbuf_enabled)
666 iopte_protection = IOPTE_STREAMING(ctx); 615 iopte_protection = IOPTE_STREAMING(ctx);
667 else 616 else
668 iopte_protection = IOPTE_CONSISTENT(ctx); 617 iopte_protection = IOPTE_CONSISTENT(ctx);
669 if (direction != PCI_DMA_TODEVICE) 618 if (direction != PCI_DMA_TODEVICE)
670 iopte_protection |= IOPTE_WRITE; 619 iopte_protection |= IOPTE_WRITE;
671 fill_sg (base, sglist, used, nelems, iopte_protection); 620
621 fill_sg(base, sglist, used, nelems, iopte_protection);
622
672#ifdef VERIFY_SG 623#ifdef VERIFY_SG
673 verify_sglist(sglist, nelems, base, npages); 624 verify_sglist(sglist, nelems, base, npages);
674#endif 625#endif
675 626
676 spin_unlock_irqrestore(&iommu->lock, flags);
677
678 return used; 627 return used;
679 628
680bad: 629bad:
681 spin_unlock_irqrestore(&iommu->lock, flags); 630 iommu_free_ctx(iommu, ctx);
682 return PCI_DMA_ERROR_CODE; 631bad_no_ctx:
632 if (printk_ratelimit())
633 WARN_ON(1);
634 return 0;
683} 635}
684 636
685/* Unmap a set of streaming mode DMA translations. */ 637/* Unmap a set of streaming mode DMA translations. */
@@ -692,8 +644,10 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
692 unsigned long flags, ctx, i, npages; 644 unsigned long flags, ctx, i, npages;
693 u32 bus_addr; 645 u32 bus_addr;
694 646
695 if (direction == PCI_DMA_NONE) 647 if (unlikely(direction == PCI_DMA_NONE)) {
696 BUG(); 648 if (printk_ratelimit())
649 WARN_ON(1);
650 }
697 651
698 pcp = pdev->sysdata; 652 pcp = pdev->sysdata;
699 iommu = pcp->pbm->iommu; 653 iommu = pcp->pbm->iommu;
@@ -705,7 +659,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
705 if (sglist[i].dma_length == 0) 659 if (sglist[i].dma_length == 0)
706 break; 660 break;
707 i--; 661 i--;
708 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT; 662 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
663 bus_addr) >> IO_PAGE_SHIFT;
709 664
710 base = iommu->page_table + 665 base = iommu->page_table +
711 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); 666 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
@@ -726,11 +681,11 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
726 if (strbuf->strbuf_enabled) 681 if (strbuf->strbuf_enabled)
727 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); 682 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
728 683
729 /* Step 2: Clear out first TSB entry. */ 684 /* Step 2: Clear out the TSB entries. */
730 iopte_make_dummy(iommu, base); 685 for (i = 0; i < npages; i++)
686 iopte_make_dummy(iommu, base + i);
731 687
732 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, 688 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
733 npages, ctx);
734 689
735 iommu_free_ctx(iommu, ctx); 690 iommu_free_ctx(iommu, ctx);
736 691
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
index 6ed1ef25e0ac..c03ed5f49d31 100644
--- a/arch/sparc64/kernel/pci_psycho.c
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -1207,13 +1207,9 @@ static void psycho_scan_bus(struct pci_controller_info *p)
1207static void psycho_iommu_init(struct pci_controller_info *p) 1207static void psycho_iommu_init(struct pci_controller_info *p)
1208{ 1208{
1209 struct pci_iommu *iommu = p->pbm_A.iommu; 1209 struct pci_iommu *iommu = p->pbm_A.iommu;
1210 unsigned long tsbbase, i; 1210 unsigned long i;
1211 u64 control; 1211 u64 control;
1212 1212
1213 /* Setup initial software IOMMU state. */
1214 spin_lock_init(&iommu->lock);
1215 iommu->ctx_lowest_free = 1;
1216
1217 /* Register addresses. */ 1213 /* Register addresses. */
1218 iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL; 1214 iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL;
1219 iommu->iommu_tsbbase = p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE; 1215 iommu->iommu_tsbbase = p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE;
@@ -1240,40 +1236,10 @@ static void psycho_iommu_init(struct pci_controller_info *p)
1240 /* Leave diag mode enabled for full-flushing done 1236 /* Leave diag mode enabled for full-flushing done
1241 * in pci_iommu.c 1237 * in pci_iommu.c
1242 */ 1238 */
1239 pci_iommu_table_init(iommu, IO_TSB_SIZE, 0xc0000000, 0xffffffff);
1243 1240
1244 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0); 1241 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE,
1245 if (!iommu->dummy_page) { 1242 __pa(iommu->page_table));
1246 prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
1247 prom_halt();
1248 }
1249 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
1250 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
1251
1252 /* Using assumed page size 8K with 128K entries we need 1MB iommu page
1253 * table (128K ioptes * 8 bytes per iopte). This is
1254 * page order 7 on UltraSparc.
1255 */
1256 tsbbase = __get_free_pages(GFP_KERNEL, get_order(IO_TSB_SIZE));
1257 if (!tsbbase) {
1258 prom_printf("PSYCHO_IOMMU: Error, gfp(tsb) failed.\n");
1259 prom_halt();
1260 }
1261 iommu->page_table = (iopte_t *)tsbbase;
1262 iommu->page_table_sz_bits = 17;
1263 iommu->page_table_map_base = 0xc0000000;
1264 iommu->dma_addr_mask = 0xffffffff;
1265 pci_iommu_table_init(iommu, IO_TSB_SIZE);
1266
1267 /* We start with no consistent mappings. */
1268 iommu->lowest_consistent_map =
1269 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
1270
1271 for (i = 0; i < PBM_NCLUSTERS; i++) {
1272 iommu->alloc_info[i].flush = 0;
1273 iommu->alloc_info[i].next = 0;
1274 }
1275
1276 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE, __pa(tsbbase));
1277 1243
1278 control = psycho_read(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL); 1244 control = psycho_read(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL);
1279 control &= ~(PSYCHO_IOMMU_CTRL_TSBSZ | PSYCHO_IOMMU_CTRL_TBWSZ); 1245 control &= ~(PSYCHO_IOMMU_CTRL_TSBSZ | PSYCHO_IOMMU_CTRL_TBWSZ);
@@ -1281,7 +1247,7 @@ static void psycho_iommu_init(struct pci_controller_info *p)
1281 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL, control); 1247 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL, control);
1282 1248
1283 /* If necessary, hook us up for starfire IRQ translations. */ 1249 /* If necessary, hook us up for starfire IRQ translations. */
1284 if(this_is_starfire) 1250 if (this_is_starfire)
1285 p->starfire_cookie = starfire_hookup(p->pbm_A.portid); 1251 p->starfire_cookie = starfire_hookup(p->pbm_A.portid);
1286 else 1252 else
1287 p->starfire_cookie = NULL; 1253 p->starfire_cookie = NULL;
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
index 0ee6bd5b9ac6..da8e1364194f 100644
--- a/arch/sparc64/kernel/pci_sabre.c
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -1267,13 +1267,9 @@ static void sabre_iommu_init(struct pci_controller_info *p,
1267 u32 dma_mask) 1267 u32 dma_mask)
1268{ 1268{
1269 struct pci_iommu *iommu = p->pbm_A.iommu; 1269 struct pci_iommu *iommu = p->pbm_A.iommu;
1270 unsigned long tsbbase, i, order; 1270 unsigned long i;
1271 u64 control; 1271 u64 control;
1272 1272
1273 /* Setup initial software IOMMU state. */
1274 spin_lock_init(&iommu->lock);
1275 iommu->ctx_lowest_free = 1;
1276
1277 /* Register addresses. */ 1273 /* Register addresses. */
1278 iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL; 1274 iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL;
1279 iommu->iommu_tsbbase = p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE; 1275 iommu->iommu_tsbbase = p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE;
@@ -1295,26 +1291,10 @@ static void sabre_iommu_init(struct pci_controller_info *p,
1295 /* Leave diag mode enabled for full-flushing done 1291 /* Leave diag mode enabled for full-flushing done
1296 * in pci_iommu.c 1292 * in pci_iommu.c
1297 */ 1293 */
1294 pci_iommu_table_init(iommu, tsbsize * 1024 * 8, dvma_offset, dma_mask);
1298 1295
1299 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0); 1296 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE,
1300 if (!iommu->dummy_page) { 1297 __pa(iommu->page_table));
1301 prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
1302 prom_halt();
1303 }
1304 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
1305 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
1306
1307 tsbbase = __get_free_pages(GFP_KERNEL, order = get_order(tsbsize * 1024 * 8));
1308 if (!tsbbase) {
1309 prom_printf("SABRE_IOMMU: Error, gfp(tsb) failed.\n");
1310 prom_halt();
1311 }
1312 iommu->page_table = (iopte_t *)tsbbase;
1313 iommu->page_table_map_base = dvma_offset;
1314 iommu->dma_addr_mask = dma_mask;
1315 pci_iommu_table_init(iommu, PAGE_SIZE << order);
1316
1317 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE, __pa(tsbbase));
1318 1298
1319 control = sabre_read(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL); 1299 control = sabre_read(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL);
1320 control &= ~(SABRE_IOMMUCTRL_TSBSZ | SABRE_IOMMUCTRL_TBWSZ); 1300 control &= ~(SABRE_IOMMUCTRL_TSBSZ | SABRE_IOMMUCTRL_TBWSZ);
@@ -1322,11 +1302,9 @@ static void sabre_iommu_init(struct pci_controller_info *p,
1322 switch(tsbsize) { 1302 switch(tsbsize) {
1323 case 64: 1303 case 64:
1324 control |= SABRE_IOMMU_TSBSZ_64K; 1304 control |= SABRE_IOMMU_TSBSZ_64K;
1325 iommu->page_table_sz_bits = 16;
1326 break; 1305 break;
1327 case 128: 1306 case 128:
1328 control |= SABRE_IOMMU_TSBSZ_128K; 1307 control |= SABRE_IOMMU_TSBSZ_128K;
1329 iommu->page_table_sz_bits = 17;
1330 break; 1308 break;
1331 default: 1309 default:
1332 prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize); 1310 prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
@@ -1334,15 +1312,6 @@ static void sabre_iommu_init(struct pci_controller_info *p,
1334 break; 1312 break;
1335 } 1313 }
1336 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control); 1314 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control);
1337
1338 /* We start with no consistent mappings. */
1339 iommu->lowest_consistent_map =
1340 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
1341
1342 for (i = 0; i < PBM_NCLUSTERS; i++) {
1343 iommu->alloc_info[i].flush = 0;
1344 iommu->alloc_info[i].next = 0;
1345 }
1346} 1315}
1347 1316
1348static void pbm_register_toplevel_resources(struct pci_controller_info *p, 1317static void pbm_register_toplevel_resources(struct pci_controller_info *p,
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
index cae5b61fe2f0..d8c4e0919b4e 100644
--- a/arch/sparc64/kernel/pci_schizo.c
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -1765,7 +1765,7 @@ static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm)
1765static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm) 1765static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
1766{ 1766{
1767 struct pci_iommu *iommu = pbm->iommu; 1767 struct pci_iommu *iommu = pbm->iommu;
1768 unsigned long tsbbase, i, tagbase, database, order; 1768 unsigned long i, tagbase, database;
1769 u32 vdma[2], dma_mask; 1769 u32 vdma[2], dma_mask;
1770 u64 control; 1770 u64 control;
1771 int err, tsbsize; 1771 int err, tsbsize;
@@ -1800,10 +1800,6 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
1800 prom_halt(); 1800 prom_halt();
1801 }; 1801 };
1802 1802
1803 /* Setup initial software IOMMU state. */
1804 spin_lock_init(&iommu->lock);
1805 iommu->ctx_lowest_free = 1;
1806
1807 /* Register addresses, SCHIZO has iommu ctx flushing. */ 1803 /* Register addresses, SCHIZO has iommu ctx flushing. */
1808 iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL; 1804 iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL;
1809 iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE; 1805 iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE;
@@ -1832,56 +1828,9 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
1832 /* Leave diag mode enabled for full-flushing done 1828 /* Leave diag mode enabled for full-flushing done
1833 * in pci_iommu.c 1829 * in pci_iommu.c
1834 */ 1830 */
1831 pci_iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask);
1835 1832
1836 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0); 1833 schizo_write(iommu->iommu_tsbbase, __pa(iommu->page_table));
1837 if (!iommu->dummy_page) {
1838 prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
1839 prom_halt();
1840 }
1841 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
1842 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
1843
1844 /* Using assumed page size 8K with 128K entries we need 1MB iommu page
1845 * table (128K ioptes * 8 bytes per iopte). This is
1846 * page order 7 on UltraSparc.
1847 */
1848 order = get_order(tsbsize * 8 * 1024);
1849 tsbbase = __get_free_pages(GFP_KERNEL, order);
1850 if (!tsbbase) {
1851 prom_printf("%s: Error, gfp(tsb) failed.\n", pbm->name);
1852 prom_halt();
1853 }
1854
1855 iommu->page_table = (iopte_t *)tsbbase;
1856 iommu->page_table_map_base = vdma[0];
1857 iommu->dma_addr_mask = dma_mask;
1858 pci_iommu_table_init(iommu, PAGE_SIZE << order);
1859
1860 switch (tsbsize) {
1861 case 64:
1862 iommu->page_table_sz_bits = 16;
1863 break;
1864
1865 case 128:
1866 iommu->page_table_sz_bits = 17;
1867 break;
1868
1869 default:
1870 prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
1871 prom_halt();
1872 break;
1873 };
1874
1875 /* We start with no consistent mappings. */
1876 iommu->lowest_consistent_map =
1877 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
1878
1879 for (i = 0; i < PBM_NCLUSTERS; i++) {
1880 iommu->alloc_info[i].flush = 0;
1881 iommu->alloc_info[i].next = 0;
1882 }
1883
1884 schizo_write(iommu->iommu_tsbbase, __pa(tsbbase));
1885 1834
1886 control = schizo_read(iommu->iommu_control); 1835 control = schizo_read(iommu->iommu_control);
1887 control &= ~(SCHIZO_IOMMU_CTRL_TSBSZ | SCHIZO_IOMMU_CTRL_TBWSZ); 1836 control &= ~(SCHIZO_IOMMU_CTRL_TSBSZ | SCHIZO_IOMMU_CTRL_TBWSZ);
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 590df5a16f5a..b137fd63f5e1 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -1001,13 +1001,6 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1001 preempt_enable(); 1001 preempt_enable();
1002} 1002}
1003 1003
1004extern unsigned long xcall_promstop;
1005
1006void smp_promstop_others(void)
1007{
1008 smp_cross_call(&xcall_promstop, 0, 0, 0);
1009}
1010
1011#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier 1004#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
1012#define prof_counter(__cpu) cpu_data(__cpu).counter 1005#define prof_counter(__cpu) cpu_data(__cpu).counter
1013 1006
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index 058b8126c1a7..e4c9151fa116 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -453,22 +453,6 @@ xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
453 nop 453 nop
454 nop 454 nop
455 455
456 .globl xcall_promstop
457xcall_promstop:
458 rdpr %pstate, %g2
459 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
460 rdpr %pil, %g2
461 wrpr %g0, 15, %pil
462 sethi %hi(109f), %g7
463 b,pt %xcc, etrap_irq
464109: or %g7, %lo(109b), %g7
465 flushw
466 call prom_stopself
467 nop
468 /* We should not return, just spin if we do... */
4691: b,a,pt %xcc, 1b
470 nop
471
472 .data 456 .data
473 457
474errata32_hwbug: 458errata32_hwbug:
diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c
index 9b895faf077b..87f5cfce23bb 100644
--- a/arch/sparc64/prom/misc.c
+++ b/arch/sparc64/prom/misc.c
@@ -68,19 +68,11 @@ void prom_cmdline(void)
68 local_irq_restore(flags); 68 local_irq_restore(flags);
69} 69}
70 70
71#ifdef CONFIG_SMP
72extern void smp_promstop_others(void);
73#endif
74
75/* Drop into the prom, but completely terminate the program. 71/* Drop into the prom, but completely terminate the program.
76 * No chance of continuing. 72 * No chance of continuing.
77 */ 73 */
78void prom_halt(void) 74void prom_halt(void)
79{ 75{
80#ifdef CONFIG_SMP
81 smp_promstop_others();
82 udelay(8000);
83#endif
84again: 76again:
85 p1275_cmd("exit", P1275_INOUT(0, 0)); 77 p1275_cmd("exit", P1275_INOUT(0, 0));
86 goto again; /* PROM is out to get me -DaveM */ 78 goto again; /* PROM is out to get me -DaveM */
@@ -88,10 +80,6 @@ again:
88 80
89void prom_halt_power_off(void) 81void prom_halt_power_off(void)
90{ 82{
91#ifdef CONFIG_SMP
92 smp_promstop_others();
93 udelay(8000);
94#endif
95 p1275_cmd("SUNW,power-off", P1275_INOUT(0, 0)); 83 p1275_cmd("SUNW,power-off", P1275_INOUT(0, 0));
96 84
97 /* if nothing else helps, we just halt */ 85 /* if nothing else helps, we just halt */