aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-04-27 12:29:04 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-04-27 12:29:04 -0400
commit0278ef8b484a71917bd4f03a763285cdaac10954 (patch)
tree8f6f7bf2e2a85b4643dfe3d0475811ce858fb4fc /arch
parent15c54033964a943de7b0763efd3bd0ede7326395 (diff)
parentcd9ad58d4061494e7fdd70ded7bcf2418daf356a (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6: (67 commits) [SCSI] SUNESP: Complete driver rewrite to version 2.0 [SPARC64]: Convert PCI over to generic struct iommu/strbuf. [SPARC]: device_node name constification fallout [SPARC64]: Convert SBUS over to generic iommu/strbuf structs. [SPARC64]: Add generic iommu and strbuf structs to iommu.h [SPARC64]: Consolidate {sbus,pci}_iommu_arena. [SPARC]: Make device_node name and type const [SPARC64]: constify some paramaters of OF routines [TIGON3]: of_get_property() returns const. [SPARC64]: Fix PCI rework to adhere to of_get_property() const return. [SPARC64]: Document and fix calculation of pages_avail. [SPARC64]: Make sure pbm->prom_node is setup easly enough in psycho.c [SPARC64]: Use bootmem_bootmap_pages() in choose_bootmap_pfn(). [SPARC64]: Add proper header file extern for cmdline_memory_size. [SPARC64]: Kill sparc_ultra_dump_{i,d}tlb() [SPARC64]: Use DECLARE_BITMAP and BITS_TO_LONGS in mm/init.c [SPARC64]: Give move verbose show_mem() output just like i386. [SPARC64]: Mark show_mem() printk's with KERN_INFO. [SPARC64]: Kill kvaddr_to_phys() and friends. [SPARC64]: Privatize sun4u_get_pte() and fix name. ...
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/lib/Makefile1
-rw-r--r--arch/alpha/lib/strcasecmp.c26
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c2
-rw-r--r--arch/powerpc/lib/Makefile5
-rw-r--r--arch/powerpc/lib/strcase.c25
-rw-r--r--arch/ppc/kernel/ppc_ksyms.c2
-rw-r--r--arch/ppc/lib/Makefile2
-rw-r--r--arch/ppc/lib/strcase.c24
-rw-r--r--arch/sh/lib/Makefile2
-rw-r--r--arch/sh/lib/strcasecmp.c26
-rw-r--r--arch/sparc/kernel/ebus.c14
-rw-r--r--arch/sparc/kernel/of_device.c21
-rw-r--r--arch/sparc/kernel/pcic.c4
-rw-r--r--arch/sparc/kernel/prom.c19
-rw-r--r--arch/sparc/kernel/time.c2
-rw-r--r--arch/sparc64/Kconfig15
-rw-r--r--arch/sparc64/kernel/central.c12
-rw-r--r--arch/sparc64/kernel/chmc.c4
-rw-r--r--arch/sparc64/kernel/ebus.c14
-rw-r--r--arch/sparc64/kernel/irq.c28
-rw-r--r--arch/sparc64/kernel/isa.c36
-rw-r--r--arch/sparc64/kernel/of_device.c59
-rw-r--r--arch/sparc64/kernel/pci.c599
-rw-r--r--arch/sparc64/kernel/pci_common.c806
-rw-r--r--arch/sparc64/kernel/pci_impl.h33
-rw-r--r--arch/sparc64/kernel/pci_iommu.c104
-rw-r--r--arch/sparc64/kernel/pci_psycho.c154
-rw-r--r--arch/sparc64/kernel/pci_sabre.c465
-rw-r--r--arch/sparc64/kernel/pci_schizo.c295
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c402
-rw-r--r--arch/sparc64/kernel/process.c15
-rw-r--r--arch/sparc64/kernel/prom.c39
-rw-r--r--arch/sparc64/kernel/sbus.c243
-rw-r--r--arch/sparc64/kernel/smp.c116
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c1
-rw-r--r--arch/sparc64/kernel/time.c484
-rw-r--r--arch/sparc64/kernel/ttable.S6
-rw-r--r--arch/sparc64/mm/init.c280
-rw-r--r--arch/sparc64/solaris/misc.c3
-rw-r--r--arch/xtensa/lib/Makefile2
-rw-r--r--arch/xtensa/lib/strcasecmp.c32
41 files changed, 1494 insertions, 2928 deletions
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile
index 21cf624d7329..ea098f3b629f 100644
--- a/arch/alpha/lib/Makefile
+++ b/arch/alpha/lib/Makefile
@@ -36,7 +36,6 @@ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \
36 $(ev6-y)csum_ipv6_magic.o \ 36 $(ev6-y)csum_ipv6_magic.o \
37 $(ev6-y)clear_page.o \ 37 $(ev6-y)clear_page.o \
38 $(ev6-y)copy_page.o \ 38 $(ev6-y)copy_page.o \
39 strcasecmp.o \
40 fpreg.o \ 39 fpreg.o \
41 callback_srm.o srm_puts.o srm_printk.o 40 callback_srm.o srm_puts.o srm_printk.o
42 41
diff --git a/arch/alpha/lib/strcasecmp.c b/arch/alpha/lib/strcasecmp.c
deleted file mode 100644
index 4e57a216feaf..000000000000
--- a/arch/alpha/lib/strcasecmp.c
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * linux/arch/alpha/lib/strcasecmp.c
3 */
4
5#include <linux/string.h>
6
7
8/* We handle nothing here except the C locale. Since this is used in
9 only one place, on strings known to contain only 7 bit ASCII, this
10 is ok. */
11
12int strcasecmp(const char *a, const char *b)
13{
14 int ca, cb;
15
16 do {
17 ca = *a++ & 0xff;
18 cb = *b++ & 0xff;
19 if (ca >= 'A' && ca <= 'Z')
20 ca += 'a' - 'A';
21 if (cb >= 'A' && cb <= 'Z')
22 cb += 'a' - 'A';
23 } while (ca == cb && ca != '\0');
24
25 return ca - cb;
26}
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index ecee596d28f6..2f8e9c02c92a 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -84,8 +84,6 @@ EXPORT_SYMBOL(strncpy);
84EXPORT_SYMBOL(strcat); 84EXPORT_SYMBOL(strcat);
85EXPORT_SYMBOL(strlen); 85EXPORT_SYMBOL(strlen);
86EXPORT_SYMBOL(strcmp); 86EXPORT_SYMBOL(strcmp);
87EXPORT_SYMBOL(strcasecmp);
88EXPORT_SYMBOL(strncasecmp);
89 87
90EXPORT_SYMBOL(csum_partial); 88EXPORT_SYMBOL(csum_partial);
91EXPORT_SYMBOL(csum_partial_copy_generic); 89EXPORT_SYMBOL(csum_partial_copy_generic);
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 4b1ba49fbd9e..450258de7ca1 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -7,13 +7,12 @@ EXTRA_CFLAGS += -mno-minimal-toc
7endif 7endif
8 8
9ifeq ($(CONFIG_PPC_MERGE),y) 9ifeq ($(CONFIG_PPC_MERGE),y)
10obj-y := string.o strcase.o 10obj-y := string.o
11obj-$(CONFIG_PPC32) += div64.o copy_32.o checksum_32.o 11obj-$(CONFIG_PPC32) += div64.o copy_32.o checksum_32.o
12endif 12endif
13 13
14obj-$(CONFIG_PPC64) += checksum_64.o copypage_64.o copyuser_64.o \ 14obj-$(CONFIG_PPC64) += checksum_64.o copypage_64.o copyuser_64.o \
15 memcpy_64.o usercopy_64.o mem_64.o string.o \ 15 memcpy_64.o usercopy_64.o mem_64.o string.o
16 strcase.o
17obj-$(CONFIG_QUICC_ENGINE) += rheap.o 16obj-$(CONFIG_QUICC_ENGINE) += rheap.o
18obj-$(CONFIG_XMON) += sstep.o 17obj-$(CONFIG_XMON) += sstep.o
19obj-$(CONFIG_KPROBES) += sstep.o 18obj-$(CONFIG_KPROBES) += sstep.o
diff --git a/arch/powerpc/lib/strcase.c b/arch/powerpc/lib/strcase.c
deleted file mode 100644
index f8ec1eba3fdd..000000000000
--- a/arch/powerpc/lib/strcase.c
+++ /dev/null
@@ -1,25 +0,0 @@
1#include <linux/types.h>
2#include <linux/ctype.h>
3#include <linux/string.h>
4
5int strcasecmp(const char *s1, const char *s2)
6{
7 int c1, c2;
8
9 do {
10 c1 = tolower(*s1++);
11 c2 = tolower(*s2++);
12 } while (c1 == c2 && c1 != 0);
13 return c1 - c2;
14}
15
16int strncasecmp(const char *s1, const char *s2, size_t n)
17{
18 int c1, c2;
19
20 do {
21 c1 = tolower(*s1++);
22 c2 = tolower(*s2++);
23 } while ((--n > 0) && c1 == c2 && c1 != 0);
24 return c1 - c2;
25}
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c
index 1318b6f4c3df..4ad499605d05 100644
--- a/arch/ppc/kernel/ppc_ksyms.c
+++ b/arch/ppc/kernel/ppc_ksyms.c
@@ -93,8 +93,6 @@ EXPORT_SYMBOL(strncpy);
93EXPORT_SYMBOL(strcat); 93EXPORT_SYMBOL(strcat);
94EXPORT_SYMBOL(strlen); 94EXPORT_SYMBOL(strlen);
95EXPORT_SYMBOL(strcmp); 95EXPORT_SYMBOL(strcmp);
96EXPORT_SYMBOL(strcasecmp);
97EXPORT_SYMBOL(strncasecmp);
98EXPORT_SYMBOL(__div64_32); 96EXPORT_SYMBOL(__div64_32);
99 97
100EXPORT_SYMBOL(csum_partial); 98EXPORT_SYMBOL(csum_partial);
diff --git a/arch/ppc/lib/Makefile b/arch/ppc/lib/Makefile
index 50358e4ea159..422bef9bae7b 100644
--- a/arch/ppc/lib/Makefile
+++ b/arch/ppc/lib/Makefile
@@ -2,7 +2,7 @@
2# Makefile for ppc-specific library files.. 2# Makefile for ppc-specific library files..
3# 3#
4 4
5obj-y := checksum.o string.o strcase.o div64.o 5obj-y := checksum.o string.o div64.o
6 6
7obj-$(CONFIG_8xx) += rheap.o 7obj-$(CONFIG_8xx) += rheap.o
8obj-$(CONFIG_CPM2) += rheap.o 8obj-$(CONFIG_CPM2) += rheap.o
diff --git a/arch/ppc/lib/strcase.c b/arch/ppc/lib/strcase.c
deleted file mode 100644
index 3b0094cc2b52..000000000000
--- a/arch/ppc/lib/strcase.c
+++ /dev/null
@@ -1,24 +0,0 @@
1#include <linux/ctype.h>
2#include <linux/types.h>
3
4int strcasecmp(const char *s1, const char *s2)
5{
6 int c1, c2;
7
8 do {
9 c1 = tolower(*s1++);
10 c2 = tolower(*s2++);
11 } while (c1 == c2 && c1 != 0);
12 return c1 - c2;
13}
14
15int strncasecmp(const char *s1, const char *s2, size_t n)
16{
17 int c1, c2;
18
19 do {
20 c1 = tolower(*s1++);
21 c2 = tolower(*s2++);
22 } while ((--n > 0) && c1 == c2 && c1 != 0);
23 return c1 - c2;
24}
diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile
index b5681e3f9684..0b9cca5c7cb4 100644
--- a/arch/sh/lib/Makefile
+++ b/arch/sh/lib/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5lib-y = delay.o memset.o memmove.o memchr.o \ 5lib-y = delay.o memset.o memmove.o memchr.o \
6 checksum.o strcasecmp.o strlen.o div64.o udivdi3.o \ 6 checksum.o strlen.o div64.o udivdi3.o \
7 div64-generic.o 7 div64-generic.o
8 8
9memcpy-y := memcpy.o 9memcpy-y := memcpy.o
diff --git a/arch/sh/lib/strcasecmp.c b/arch/sh/lib/strcasecmp.c
deleted file mode 100644
index 4e57a216feaf..000000000000
--- a/arch/sh/lib/strcasecmp.c
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * linux/arch/alpha/lib/strcasecmp.c
3 */
4
5#include <linux/string.h>
6
7
8/* We handle nothing here except the C locale. Since this is used in
9 only one place, on strings known to contain only 7 bit ASCII, this
10 is ok. */
11
12int strcasecmp(const char *a, const char *b)
13{
14 int ca, cb;
15
16 do {
17 ca = *a++ & 0xff;
18 cb = *b++ & 0xff;
19 if (ca >= 'A' && ca <= 'Z')
20 ca += 'a' - 'A';
21 if (cb >= 'A' && cb <= 'Z')
22 cb += 'a' - 'A';
23 } while (ca == cb && ca != '\0');
24
25 return ca - cb;
26}
diff --git a/arch/sparc/kernel/ebus.c b/arch/sparc/kernel/ebus.c
index ba58c3a061fd..7bb86b9cdaa3 100644
--- a/arch/sparc/kernel/ebus.c
+++ b/arch/sparc/kernel/ebus.c
@@ -25,7 +25,7 @@
25struct linux_ebus *ebus_chain = NULL; 25struct linux_ebus *ebus_chain = NULL;
26 26
27/* We are together with pcic.c under CONFIG_PCI. */ 27/* We are together with pcic.c under CONFIG_PCI. */
28extern unsigned int pcic_pin_to_irq(unsigned int, char *name); 28extern unsigned int pcic_pin_to_irq(unsigned int, const char *name);
29 29
30/* 30/*
31 * IRQ Blacklist 31 * IRQ Blacklist
@@ -69,7 +69,7 @@ static inline unsigned long ebus_alloc(size_t size)
69 69
70/* 70/*
71 */ 71 */
72int __init ebus_blacklist_irq(char *name) 72int __init ebus_blacklist_irq(const char *name)
73{ 73{
74 struct ebus_device_irq *dp; 74 struct ebus_device_irq *dp;
75 75
@@ -86,8 +86,8 @@ int __init ebus_blacklist_irq(char *name)
86void __init fill_ebus_child(struct device_node *dp, 86void __init fill_ebus_child(struct device_node *dp,
87 struct linux_ebus_child *dev) 87 struct linux_ebus_child *dev)
88{ 88{
89 int *regs; 89 const int *regs;
90 int *irqs; 90 const int *irqs;
91 int i, len; 91 int i, len;
92 92
93 dev->prom_node = dp; 93 dev->prom_node = dp;
@@ -146,9 +146,9 @@ void __init fill_ebus_child(struct device_node *dp,
146 146
147void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_device *dev) 147void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_device *dev)
148{ 148{
149 struct linux_prom_registers *regs; 149 const struct linux_prom_registers *regs;
150 struct linux_ebus_child *child; 150 struct linux_ebus_child *child;
151 int *irqs; 151 const int *irqs;
152 int i, n, len; 152 int i, n, len;
153 unsigned long baseaddr; 153 unsigned long baseaddr;
154 154
@@ -269,7 +269,7 @@ void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_device *d
269 269
270void __init ebus_init(void) 270void __init ebus_init(void)
271{ 271{
272 struct linux_prom_pci_registers *regs; 272 const struct linux_prom_pci_registers *regs;
273 struct linux_pbm_info *pbm; 273 struct linux_pbm_info *pbm;
274 struct linux_ebus_device *dev; 274 struct linux_ebus_device *dev;
275 struct linux_ebus *ebus; 275 struct linux_ebus *ebus;
diff --git a/arch/sparc/kernel/of_device.c b/arch/sparc/kernel/of_device.c
index 48c24f7518c2..fd7f8cb668a3 100644
--- a/arch/sparc/kernel/of_device.c
+++ b/arch/sparc/kernel/of_device.c
@@ -210,7 +210,7 @@ struct of_bus {
210 int *addrc, int *sizec); 210 int *addrc, int *sizec);
211 int (*map)(u32 *addr, const u32 *range, 211 int (*map)(u32 *addr, const u32 *range,
212 int na, int ns, int pna); 212 int na, int ns, int pna);
213 unsigned int (*get_flags)(u32 *addr); 213 unsigned int (*get_flags)(const u32 *addr);
214}; 214};
215 215
216/* 216/*
@@ -270,7 +270,7 @@ static int of_bus_default_map(u32 *addr, const u32 *range,
270 return 0; 270 return 0;
271} 271}
272 272
273static unsigned int of_bus_default_get_flags(u32 *addr) 273static unsigned int of_bus_default_get_flags(const u32 *addr)
274{ 274{
275 return IORESOURCE_MEM; 275 return IORESOURCE_MEM;
276} 276}
@@ -334,7 +334,7 @@ static int of_bus_pci_map(u32 *addr, const u32 *range,
334 return 0; 334 return 0;
335} 335}
336 336
337static unsigned int of_bus_pci_get_flags(u32 *addr) 337static unsigned int of_bus_pci_get_flags(const u32 *addr)
338{ 338{
339 unsigned int flags = 0; 339 unsigned int flags = 0;
340 u32 w = addr[0]; 340 u32 w = addr[0];
@@ -375,7 +375,7 @@ static int of_bus_sbus_map(u32 *addr, const u32 *range, int na, int ns, int pna)
375 return of_bus_default_map(addr, range, na, ns, pna); 375 return of_bus_default_map(addr, range, na, ns, pna);
376} 376}
377 377
378static unsigned int of_bus_sbus_get_flags(u32 *addr) 378static unsigned int of_bus_sbus_get_flags(const u32 *addr)
379{ 379{
380 return IORESOURCE_MEM; 380 return IORESOURCE_MEM;
381} 381}
@@ -432,7 +432,7 @@ static int __init build_one_resource(struct device_node *parent,
432 u32 *addr, 432 u32 *addr,
433 int na, int ns, int pna) 433 int na, int ns, int pna)
434{ 434{
435 u32 *ranges; 435 const u32 *ranges;
436 unsigned int rlen; 436 unsigned int rlen;
437 int rone; 437 int rone;
438 438
@@ -470,7 +470,7 @@ static void __init build_device_resources(struct of_device *op,
470 struct of_bus *bus; 470 struct of_bus *bus;
471 int na, ns; 471 int na, ns;
472 int index, num_reg; 472 int index, num_reg;
473 void *preg; 473 const void *preg;
474 474
475 if (!parent) 475 if (!parent)
476 return; 476 return;
@@ -492,7 +492,7 @@ static void __init build_device_resources(struct of_device *op,
492 for (index = 0; index < num_reg; index++) { 492 for (index = 0; index < num_reg; index++) {
493 struct resource *r = &op->resource[index]; 493 struct resource *r = &op->resource[index];
494 u32 addr[OF_MAX_ADDR_CELLS]; 494 u32 addr[OF_MAX_ADDR_CELLS];
495 u32 *reg = (preg + (index * ((na + ns) * 4))); 495 const u32 *reg = (preg + (index * ((na + ns) * 4)));
496 struct device_node *dp = op->node; 496 struct device_node *dp = op->node;
497 struct device_node *pp = p_op->node; 497 struct device_node *pp = p_op->node;
498 struct of_bus *pbus, *dbus; 498 struct of_bus *pbus, *dbus;
@@ -559,7 +559,7 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
559 struct device *parent) 559 struct device *parent)
560{ 560{
561 struct of_device *op = kzalloc(sizeof(*op), GFP_KERNEL); 561 struct of_device *op = kzalloc(sizeof(*op), GFP_KERNEL);
562 struct linux_prom_irqs *intr; 562 const struct linux_prom_irqs *intr;
563 int len, i; 563 int len, i;
564 564
565 if (!op) 565 if (!op)
@@ -579,7 +579,8 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
579 for (i = 0; i < op->num_irqs; i++) 579 for (i = 0; i < op->num_irqs; i++)
580 op->irqs[i] = intr[i].pri; 580 op->irqs[i] = intr[i].pri;
581 } else { 581 } else {
582 unsigned int *irq = of_get_property(dp, "interrupts", &len); 582 const unsigned int *irq =
583 of_get_property(dp, "interrupts", &len);
583 584
584 if (irq) { 585 if (irq) {
585 op->num_irqs = len / sizeof(unsigned int); 586 op->num_irqs = len / sizeof(unsigned int);
@@ -594,7 +595,7 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
594 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0, 595 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
595 }; 596 };
596 struct device_node *io_unit, *sbi = dp->parent; 597 struct device_node *io_unit, *sbi = dp->parent;
597 struct linux_prom_registers *regs; 598 const struct linux_prom_registers *regs;
598 int board, slot; 599 int board, slot;
599 600
600 while (sbi) { 601 while (sbi) {
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 1c927c538b8b..5ca7e8f42bd9 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -37,8 +37,6 @@
37#include <asm/irq_regs.h> 37#include <asm/irq_regs.h>
38 38
39 39
40unsigned int pcic_pin_to_irq(unsigned int pin, char *name);
41
42/* 40/*
43 * I studied different documents and many live PROMs both from 2.30 41 * I studied different documents and many live PROMs both from 2.30
44 * family and 3.xx versions. I came to the amazing conclusion: there is 42 * family and 3.xx versions. I came to the amazing conclusion: there is
@@ -681,7 +679,7 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus)
681 * pcic_pin_to_irq() is exported to ebus.c. 679 * pcic_pin_to_irq() is exported to ebus.c.
682 */ 680 */
683unsigned int 681unsigned int
684pcic_pin_to_irq(unsigned int pin, char *name) 682pcic_pin_to_irq(unsigned int pin, const char *name)
685{ 683{
686 struct linux_pcic *pcic = &pcic0; 684 struct linux_pcic *pcic = &pcic0;
687 unsigned int irq; 685 unsigned int irq;
diff --git a/arch/sparc/kernel/prom.c b/arch/sparc/kernel/prom.c
index 2cc302b6bec0..eed140b3c739 100644
--- a/arch/sparc/kernel/prom.c
+++ b/arch/sparc/kernel/prom.c
@@ -32,12 +32,13 @@ static struct device_node *allnodes;
32 */ 32 */
33static DEFINE_RWLOCK(devtree_lock); 33static DEFINE_RWLOCK(devtree_lock);
34 34
35int of_device_is_compatible(struct device_node *device, const char *compat) 35int of_device_is_compatible(const struct device_node *device,
36 const char *compat)
36{ 37{
37 const char* cp; 38 const char* cp;
38 int cplen, l; 39 int cplen, l;
39 40
40 cp = (char *) of_get_property(device, "compatible", &cplen); 41 cp = of_get_property(device, "compatible", &cplen);
41 if (cp == NULL) 42 if (cp == NULL)
42 return 0; 43 return 0;
43 while (cplen > 0) { 44 while (cplen > 0) {
@@ -150,13 +151,14 @@ struct device_node *of_find_compatible_node(struct device_node *from,
150} 151}
151EXPORT_SYMBOL(of_find_compatible_node); 152EXPORT_SYMBOL(of_find_compatible_node);
152 153
153struct property *of_find_property(struct device_node *np, const char *name, 154struct property *of_find_property(const struct device_node *np,
155 const char *name,
154 int *lenp) 156 int *lenp)
155{ 157{
156 struct property *pp; 158 struct property *pp;
157 159
158 for (pp = np->properties; pp != 0; pp = pp->next) { 160 for (pp = np->properties; pp != 0; pp = pp->next) {
159 if (strcmp(pp->name, name) == 0) { 161 if (strcasecmp(pp->name, name) == 0) {
160 if (lenp != 0) 162 if (lenp != 0)
161 *lenp = pp->length; 163 *lenp = pp->length;
162 break; 164 break;
@@ -170,7 +172,8 @@ EXPORT_SYMBOL(of_find_property);
170 * Find a property with a given name for a given node 172 * Find a property with a given name for a given node
171 * and return the value. 173 * and return the value.
172 */ 174 */
173void *of_get_property(struct device_node *np, const char *name, int *lenp) 175const void *of_get_property(const struct device_node *np, const char *name,
176 int *lenp)
174{ 177{
175 struct property *pp = of_find_property(np,name,lenp); 178 struct property *pp = of_find_property(np,name,lenp);
176 return pp ? pp->value : NULL; 179 return pp ? pp->value : NULL;
@@ -192,7 +195,7 @@ EXPORT_SYMBOL(of_getintprop_default);
192 195
193int of_n_addr_cells(struct device_node *np) 196int of_n_addr_cells(struct device_node *np)
194{ 197{
195 int* ip; 198 const int* ip;
196 do { 199 do {
197 if (np->parent) 200 if (np->parent)
198 np = np->parent; 201 np = np->parent;
@@ -207,7 +210,7 @@ EXPORT_SYMBOL(of_n_addr_cells);
207 210
208int of_n_size_cells(struct device_node *np) 211int of_n_size_cells(struct device_node *np)
209{ 212{
210 int* ip; 213 const int* ip;
211 do { 214 do {
212 if (np->parent) 215 if (np->parent)
213 np = np->parent; 216 np = np->parent;
@@ -239,7 +242,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
239 while (*prevp) { 242 while (*prevp) {
240 struct property *prop = *prevp; 243 struct property *prop = *prevp;
241 244
242 if (!strcmp(prop->name, name)) { 245 if (!strcasecmp(prop->name, name)) {
243 void *old_val = prop->value; 246 void *old_val = prop->value;
244 int ret; 247 int ret;
245 248
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
index 9bb1240aaf8a..f1401b57ccc7 100644
--- a/arch/sparc/kernel/time.c
+++ b/arch/sparc/kernel/time.c
@@ -301,7 +301,7 @@ static __inline__ void sun4_clock_probe(void)
301static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match) 301static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match)
302{ 302{
303 struct device_node *dp = op->node; 303 struct device_node *dp = op->node;
304 char *model = of_get_property(dp, "model", NULL); 304 const char *model = of_get_property(dp, "model", NULL);
305 305
306 if (!model) 306 if (!model)
307 return -ENODEV; 307 return -ENODEV;
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index 1a6348b565fb..590a41b864b9 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -19,6 +19,14 @@ config SPARC64
19 SPARC64 ports; its web page is available at 19 SPARC64 ports; its web page is available at
20 <http://www.ultralinux.org/>. 20 <http://www.ultralinux.org/>.
21 21
22config GENERIC_TIME
23 bool
24 default y
25
26config GENERIC_CLOCKEVENTS
27 bool
28 default y
29
22config 64BIT 30config 64BIT
23 def_bool y 31 def_bool y
24 32
@@ -34,10 +42,6 @@ config LOCKDEP_SUPPORT
34 bool 42 bool
35 default y 43 default y
36 44
37config TIME_INTERPOLATION
38 bool
39 default y
40
41config ARCH_MAY_HAVE_PC_FDC 45config ARCH_MAY_HAVE_PC_FDC
42 bool 46 bool
43 default y 47 default y
@@ -113,6 +117,8 @@ config GENERIC_HARDIRQS
113 117
114menu "General machine setup" 118menu "General machine setup"
115 119
120source "kernel/time/Kconfig"
121
116config SMP 122config SMP
117 bool "Symmetric multi-processing support" 123 bool "Symmetric multi-processing support"
118 ---help--- 124 ---help---
@@ -214,6 +220,7 @@ config ARCH_SPARSEMEM_ENABLE
214 220
215config ARCH_SPARSEMEM_DEFAULT 221config ARCH_SPARSEMEM_DEFAULT
216 def_bool y 222 def_bool y
223 select SPARSEMEM_STATIC
217 224
218config LARGE_ALLOCS 225config LARGE_ALLOCS
219 def_bool y 226 def_bool y
diff --git a/arch/sparc64/kernel/central.c b/arch/sparc64/kernel/central.c
index e724c54af029..c65b2f9c98d8 100644
--- a/arch/sparc64/kernel/central.c
+++ b/arch/sparc64/kernel/central.c
@@ -32,7 +32,7 @@ static void central_probe_failure(int line)
32static void central_ranges_init(struct linux_central *central) 32static void central_ranges_init(struct linux_central *central)
33{ 33{
34 struct device_node *dp = central->prom_node; 34 struct device_node *dp = central->prom_node;
35 void *pval; 35 const void *pval;
36 int len; 36 int len;
37 37
38 central->num_central_ranges = 0; 38 central->num_central_ranges = 0;
@@ -47,7 +47,7 @@ static void central_ranges_init(struct linux_central *central)
47static void fhc_ranges_init(struct linux_fhc *fhc) 47static void fhc_ranges_init(struct linux_fhc *fhc)
48{ 48{
49 struct device_node *dp = fhc->prom_node; 49 struct device_node *dp = fhc->prom_node;
50 void *pval; 50 const void *pval;
51 int len; 51 int len;
52 52
53 fhc->num_fhc_ranges = 0; 53 fhc->num_fhc_ranges = 0;
@@ -119,7 +119,7 @@ static unsigned long prom_reg_to_paddr(struct linux_prom_registers *r)
119static void probe_other_fhcs(void) 119static void probe_other_fhcs(void)
120{ 120{
121 struct device_node *dp; 121 struct device_node *dp;
122 struct linux_prom64_registers *fpregs; 122 const struct linux_prom64_registers *fpregs;
123 123
124 for_each_node_by_name(dp, "fhc") { 124 for_each_node_by_name(dp, "fhc") {
125 struct linux_fhc *fhc; 125 struct linux_fhc *fhc;
@@ -190,7 +190,8 @@ static void probe_clock_board(struct linux_central *central,
190 struct device_node *fp) 190 struct device_node *fp)
191{ 191{
192 struct device_node *dp; 192 struct device_node *dp;
193 struct linux_prom_registers cregs[3], *pr; 193 struct linux_prom_registers cregs[3];
194 const struct linux_prom_registers *pr;
194 int nslots, tmp, nregs; 195 int nslots, tmp, nregs;
195 196
196 dp = fp->child; 197 dp = fp->child;
@@ -299,7 +300,8 @@ static void init_all_fhc_hw(void)
299 300
300void central_probe(void) 301void central_probe(void)
301{ 302{
302 struct linux_prom_registers fpregs[6], *pr; 303 struct linux_prom_registers fpregs[6];
304 const struct linux_prom_registers *pr;
303 struct linux_fhc *fhc; 305 struct linux_fhc *fhc;
304 struct device_node *dp, *fp; 306 struct device_node *dp, *fp;
305 int err; 307 int err;
diff --git a/arch/sparc64/kernel/chmc.c b/arch/sparc64/kernel/chmc.c
index 9699abeb9907..777d34577045 100644
--- a/arch/sparc64/kernel/chmc.c
+++ b/arch/sparc64/kernel/chmc.c
@@ -343,8 +343,8 @@ static int init_one_mctrl(struct device_node *dp)
343{ 343{
344 struct mctrl_info *mp = kzalloc(sizeof(*mp), GFP_KERNEL); 344 struct mctrl_info *mp = kzalloc(sizeof(*mp), GFP_KERNEL);
345 int portid = of_getintprop_default(dp, "portid", -1); 345 int portid = of_getintprop_default(dp, "portid", -1);
346 struct linux_prom64_registers *regs; 346 const struct linux_prom64_registers *regs;
347 void *pval; 347 const void *pval;
348 int len; 348 int len;
349 349
350 if (!mp) 350 if (!mp)
diff --git a/arch/sparc64/kernel/ebus.c b/arch/sparc64/kernel/ebus.c
index 35bf895fdeee..0ace17bafba4 100644
--- a/arch/sparc64/kernel/ebus.c
+++ b/arch/sparc64/kernel/ebus.c
@@ -285,7 +285,7 @@ static void __init fill_ebus_child(struct device_node *dp,
285 int non_standard_regs) 285 int non_standard_regs)
286{ 286{
287 struct of_device *op; 287 struct of_device *op;
288 int *regs; 288 const int *regs;
289 int i, len; 289 int i, len;
290 290
291 dev->prom_node = dp; 291 dev->prom_node = dp;
@@ -438,11 +438,9 @@ static struct pci_dev *find_next_ebus(struct pci_dev *start, int *is_rio_p)
438 438
439void __init ebus_init(void) 439void __init ebus_init(void)
440{ 440{
441 struct pci_pbm_info *pbm;
442 struct linux_ebus_device *dev; 441 struct linux_ebus_device *dev;
443 struct linux_ebus *ebus; 442 struct linux_ebus *ebus;
444 struct pci_dev *pdev; 443 struct pci_dev *pdev;
445 struct pcidev_cookie *cookie;
446 struct device_node *dp; 444 struct device_node *dp;
447 int is_rio; 445 int is_rio;
448 int num_ebus = 0; 446 int num_ebus = 0;
@@ -453,8 +451,7 @@ void __init ebus_init(void)
453 return; 451 return;
454 } 452 }
455 453
456 cookie = pdev->sysdata; 454 dp = pci_device_to_OF_node(pdev);
457 dp = cookie->prom_node;
458 455
459 ebus_chain = ebus = ebus_alloc(sizeof(struct linux_ebus)); 456 ebus_chain = ebus = ebus_alloc(sizeof(struct linux_ebus));
460 ebus->next = NULL; 457 ebus->next = NULL;
@@ -480,8 +477,7 @@ void __init ebus_init(void)
480 break; 477 break;
481 } 478 }
482 ebus->is_rio = is_rio; 479 ebus->is_rio = is_rio;
483 cookie = pdev->sysdata; 480 dp = pci_device_to_OF_node(pdev);
484 dp = cookie->prom_node;
485 continue; 481 continue;
486 } 482 }
487 printk("ebus%d:", num_ebus); 483 printk("ebus%d:", num_ebus);
@@ -489,7 +485,6 @@ void __init ebus_init(void)
489 ebus->index = num_ebus; 485 ebus->index = num_ebus;
490 ebus->prom_node = dp; 486 ebus->prom_node = dp;
491 ebus->self = pdev; 487 ebus->self = pdev;
492 ebus->parent = pbm = cookie->pbm;
493 488
494 ebus->ofdev.node = dp; 489 ebus->ofdev.node = dp;
495 ebus->ofdev.dev.parent = &pdev->dev; 490 ebus->ofdev.dev.parent = &pdev->dev;
@@ -531,8 +526,7 @@ void __init ebus_init(void)
531 if (!pdev) 526 if (!pdev)
532 break; 527 break;
533 528
534 cookie = pdev->sysdata; 529 dp = pci_device_to_OF_node(pdev);
535 dp = cookie->prom_node;
536 530
537 ebus->next = ebus_alloc(sizeof(struct linux_ebus)); 531 ebus->next = ebus_alloc(sizeof(struct linux_ebus));
538 ebus = ebus->next; 532 ebus = ebus->next;
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index c443db184371..6241e3dbbd57 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -589,32 +589,6 @@ void ack_bad_irq(unsigned int virt_irq)
589 ino, virt_irq); 589 ino, virt_irq);
590} 590}
591 591
592#ifndef CONFIG_SMP
593extern irqreturn_t timer_interrupt(int, void *);
594
595void timer_irq(int irq, struct pt_regs *regs)
596{
597 unsigned long clr_mask = 1 << irq;
598 unsigned long tick_mask = tick_ops->softint_mask;
599 struct pt_regs *old_regs;
600
601 if (get_softint() & tick_mask) {
602 irq = 0;
603 clr_mask = tick_mask;
604 }
605 clear_softint(clr_mask);
606
607 old_regs = set_irq_regs(regs);
608 irq_enter();
609
610 kstat_this_cpu.irqs[0]++;
611 timer_interrupt(irq, NULL);
612
613 irq_exit();
614 set_irq_regs(old_regs);
615}
616#endif
617
618void handler_irq(int irq, struct pt_regs *regs) 592void handler_irq(int irq, struct pt_regs *regs)
619{ 593{
620 struct ino_bucket *bucket; 594 struct ino_bucket *bucket;
@@ -653,7 +627,7 @@ static u64 prom_limit0, prom_limit1;
653static void map_prom_timers(void) 627static void map_prom_timers(void)
654{ 628{
655 struct device_node *dp; 629 struct device_node *dp;
656 unsigned int *addr; 630 const unsigned int *addr;
657 631
658 /* PROM timer node hangs out in the top level of device siblings... */ 632 /* PROM timer node hangs out in the top level of device siblings... */
659 dp = of_find_node_by_path("/"); 633 dp = of_find_node_by_path("/");
diff --git a/arch/sparc64/kernel/isa.c b/arch/sparc64/kernel/isa.c
index 98721a8f8619..6a6882e57ff2 100644
--- a/arch/sparc64/kernel/isa.c
+++ b/arch/sparc64/kernel/isa.c
@@ -24,27 +24,9 @@ static void __init report_dev(struct sparc_isa_device *isa_dev, int child)
24 24
25static void __init isa_dev_get_resource(struct sparc_isa_device *isa_dev) 25static void __init isa_dev_get_resource(struct sparc_isa_device *isa_dev)
26{ 26{
27 struct linux_prom_registers *pregs; 27 struct of_device *op = of_find_device_by_node(isa_dev->prom_node);
28 unsigned long base, len;
29 int prop_len;
30
31 pregs = of_get_property(isa_dev->prom_node, "reg", &prop_len);
32 if (!pregs)
33 return;
34
35 /* Only the first one is interesting. */
36 len = pregs[0].reg_size;
37 base = (((unsigned long)pregs[0].which_io << 32) |
38 (unsigned long)pregs[0].phys_addr);
39 base += isa_dev->bus->parent->io_space.start;
40
41 isa_dev->resource.start = base;
42 isa_dev->resource.end = (base + len - 1UL);
43 isa_dev->resource.flags = IORESOURCE_IO;
44 isa_dev->resource.name = isa_dev->prom_node->name;
45 28
46 request_resource(&isa_dev->bus->parent->io_space, 29 memcpy(&isa_dev->resource, &op->resource[0], sizeof(struct resource));
47 &isa_dev->resource);
48} 30}
49 31
50static void __init isa_dev_get_irq(struct sparc_isa_device *isa_dev) 32static void __init isa_dev_get_irq(struct sparc_isa_device *isa_dev)
@@ -158,19 +140,10 @@ void __init isa_init(void)
158 140
159 pdev = NULL; 141 pdev = NULL;
160 while ((pdev = pci_get_device(vendor, device, pdev)) != NULL) { 142 while ((pdev = pci_get_device(vendor, device, pdev)) != NULL) {
161 struct pcidev_cookie *pdev_cookie;
162 struct pci_pbm_info *pbm;
163 struct sparc_isa_bridge *isa_br; 143 struct sparc_isa_bridge *isa_br;
164 struct device_node *dp; 144 struct device_node *dp;
165 145
166 pdev_cookie = pdev->sysdata; 146 dp = pci_device_to_OF_node(pdev);
167 if (!pdev_cookie) {
168 printk("ISA: Warning, ISA bridge ignored due to "
169 "lack of OBP data.\n");
170 continue;
171 }
172 pbm = pdev_cookie->pbm;
173 dp = pdev_cookie->prom_node;
174 147
175 isa_br = kzalloc(sizeof(*isa_br), GFP_KERNEL); 148 isa_br = kzalloc(sizeof(*isa_br), GFP_KERNEL);
176 if (!isa_br) { 149 if (!isa_br) {
@@ -195,10 +168,9 @@ void __init isa_init(void)
195 isa_br->next = isa_chain; 168 isa_br->next = isa_chain;
196 isa_chain = isa_br; 169 isa_chain = isa_br;
197 170
198 isa_br->parent = pbm;
199 isa_br->self = pdev; 171 isa_br->self = pdev;
200 isa_br->index = index++; 172 isa_br->index = index++;
201 isa_br->prom_node = pdev_cookie->prom_node; 173 isa_br->prom_node = dp;
202 174
203 printk("isa%d:", isa_br->index); 175 printk("isa%d:", isa_br->index);
204 176
diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c
index fb9bf1e4d036..9ac9a307999a 100644
--- a/arch/sparc64/kernel/of_device.c
+++ b/arch/sparc64/kernel/of_device.c
@@ -245,7 +245,7 @@ struct of_bus {
245 int *addrc, int *sizec); 245 int *addrc, int *sizec);
246 int (*map)(u32 *addr, const u32 *range, 246 int (*map)(u32 *addr, const u32 *range,
247 int na, int ns, int pna); 247 int na, int ns, int pna);
248 unsigned int (*get_flags)(u32 *addr); 248 unsigned int (*get_flags)(const u32 *addr);
249}; 249};
250 250
251/* 251/*
@@ -305,7 +305,7 @@ static int of_bus_default_map(u32 *addr, const u32 *range,
305 return 0; 305 return 0;
306} 306}
307 307
308static unsigned int of_bus_default_get_flags(u32 *addr) 308static unsigned int of_bus_default_get_flags(const u32 *addr)
309{ 309{
310 return IORESOURCE_MEM; 310 return IORESOURCE_MEM;
311} 311}
@@ -317,6 +317,11 @@ static unsigned int of_bus_default_get_flags(u32 *addr)
317static int of_bus_pci_match(struct device_node *np) 317static int of_bus_pci_match(struct device_node *np)
318{ 318{
319 if (!strcmp(np->type, "pci") || !strcmp(np->type, "pciex")) { 319 if (!strcmp(np->type, "pci") || !strcmp(np->type, "pciex")) {
320 const char *model = of_get_property(np, "model", NULL);
321
322 if (model && !strcmp(model, "SUNW,simba"))
323 return 0;
324
320 /* Do not do PCI specific frobbing if the 325 /* Do not do PCI specific frobbing if the
321 * PCI bridge lacks a ranges property. We 326 * PCI bridge lacks a ranges property. We
322 * want to pass it through up to the next 327 * want to pass it through up to the next
@@ -332,6 +337,21 @@ static int of_bus_pci_match(struct device_node *np)
332 return 0; 337 return 0;
333} 338}
334 339
340static int of_bus_simba_match(struct device_node *np)
341{
342 const char *model = of_get_property(np, "model", NULL);
343
344 if (model && !strcmp(model, "SUNW,simba"))
345 return 1;
346 return 0;
347}
348
349static int of_bus_simba_map(u32 *addr, const u32 *range,
350 int na, int ns, int pna)
351{
352 return 0;
353}
354
335static void of_bus_pci_count_cells(struct device_node *np, 355static void of_bus_pci_count_cells(struct device_node *np,
336 int *addrc, int *sizec) 356 int *addrc, int *sizec)
337{ 357{
@@ -369,7 +389,7 @@ static int of_bus_pci_map(u32 *addr, const u32 *range,
369 return 0; 389 return 0;
370} 390}
371 391
372static unsigned int of_bus_pci_get_flags(u32 *addr) 392static unsigned int of_bus_pci_get_flags(const u32 *addr)
373{ 393{
374 unsigned int flags = 0; 394 unsigned int flags = 0;
375 u32 w = addr[0]; 395 u32 w = addr[0];
@@ -436,6 +456,15 @@ static struct of_bus of_busses[] = {
436 .map = of_bus_pci_map, 456 .map = of_bus_pci_map,
437 .get_flags = of_bus_pci_get_flags, 457 .get_flags = of_bus_pci_get_flags,
438 }, 458 },
459 /* SIMBA */
460 {
461 .name = "simba",
462 .addr_prop_name = "assigned-addresses",
463 .match = of_bus_simba_match,
464 .count_cells = of_bus_pci_count_cells,
465 .map = of_bus_simba_map,
466 .get_flags = of_bus_pci_get_flags,
467 },
439 /* SBUS */ 468 /* SBUS */
440 { 469 {
441 .name = "sbus", 470 .name = "sbus",
@@ -482,7 +511,7 @@ static int __init build_one_resource(struct device_node *parent,
482 u32 *addr, 511 u32 *addr,
483 int na, int ns, int pna) 512 int na, int ns, int pna)
484{ 513{
485 u32 *ranges; 514 const u32 *ranges;
486 unsigned int rlen; 515 unsigned int rlen;
487 int rone; 516 int rone;
488 517
@@ -513,7 +542,7 @@ static int __init build_one_resource(struct device_node *parent,
513 542
514static int __init use_1to1_mapping(struct device_node *pp) 543static int __init use_1to1_mapping(struct device_node *pp)
515{ 544{
516 char *model; 545 const char *model;
517 546
518 /* If this is on the PMU bus, don't try to translate it even 547 /* If this is on the PMU bus, don't try to translate it even
519 * if a ranges property exists. 548 * if a ranges property exists.
@@ -548,7 +577,7 @@ static void __init build_device_resources(struct of_device *op,
548 struct of_bus *bus; 577 struct of_bus *bus;
549 int na, ns; 578 int na, ns;
550 int index, num_reg; 579 int index, num_reg;
551 void *preg; 580 const void *preg;
552 581
553 if (!parent) 582 if (!parent)
554 return; 583 return;
@@ -578,7 +607,7 @@ static void __init build_device_resources(struct of_device *op,
578 for (index = 0; index < num_reg; index++) { 607 for (index = 0; index < num_reg; index++) {
579 struct resource *r = &op->resource[index]; 608 struct resource *r = &op->resource[index];
580 u32 addr[OF_MAX_ADDR_CELLS]; 609 u32 addr[OF_MAX_ADDR_CELLS];
581 u32 *reg = (preg + (index * ((na + ns) * 4))); 610 const u32 *reg = (preg + (index * ((na + ns) * 4)));
582 struct device_node *dp = op->node; 611 struct device_node *dp = op->node;
583 struct device_node *pp = p_op->node; 612 struct device_node *pp = p_op->node;
584 struct of_bus *pbus, *dbus; 613 struct of_bus *pbus, *dbus;
@@ -643,14 +672,14 @@ static void __init build_device_resources(struct of_device *op,
643 672
644static struct device_node * __init 673static struct device_node * __init
645apply_interrupt_map(struct device_node *dp, struct device_node *pp, 674apply_interrupt_map(struct device_node *dp, struct device_node *pp,
646 u32 *imap, int imlen, u32 *imask, 675 const u32 *imap, int imlen, const u32 *imask,
647 unsigned int *irq_p) 676 unsigned int *irq_p)
648{ 677{
649 struct device_node *cp; 678 struct device_node *cp;
650 unsigned int irq = *irq_p; 679 unsigned int irq = *irq_p;
651 struct of_bus *bus; 680 struct of_bus *bus;
652 phandle handle; 681 phandle handle;
653 u32 *reg; 682 const u32 *reg;
654 int na, num_reg, i; 683 int na, num_reg, i;
655 684
656 bus = of_match_bus(pp); 685 bus = of_match_bus(pp);
@@ -705,7 +734,7 @@ static unsigned int __init pci_irq_swizzle(struct device_node *dp,
705 struct device_node *pp, 734 struct device_node *pp,
706 unsigned int irq) 735 unsigned int irq)
707{ 736{
708 struct linux_prom_pci_registers *regs; 737 const struct linux_prom_pci_registers *regs;
709 unsigned int bus, devfn, slot, ret; 738 unsigned int bus, devfn, slot, ret;
710 739
711 if (irq < 1 || irq > 4) 740 if (irq < 1 || irq > 4)
@@ -730,12 +759,6 @@ static unsigned int __init pci_irq_swizzle(struct device_node *dp,
730 * D: 2-bit slot number, derived from PCI device number as 759 * D: 2-bit slot number, derived from PCI device number as
731 * (dev - 1) for bus A, or (dev - 2) for bus B 760 * (dev - 1) for bus A, or (dev - 2) for bus B
732 * L: 2-bit line number 761 * L: 2-bit line number
733 *
734 * Actually, more "portable" way to calculate the funky
735 * slot number is to subtract pbm->pci_first_slot from the
736 * device number, and that's exactly what the pre-OF
737 * sparc64 code did, but we're building this stuff generically
738 * using the OBP tree, not in the PCI controller layer.
739 */ 762 */
740 if (bus & 0x80) { 763 if (bus & 0x80) {
741 /* PBM-A */ 764 /* PBM-A */
@@ -794,7 +817,7 @@ static unsigned int __init build_one_device_irq(struct of_device *op,
794 pp = dp->parent; 817 pp = dp->parent;
795 ip = NULL; 818 ip = NULL;
796 while (pp) { 819 while (pp) {
797 void *imap, *imsk; 820 const void *imap, *imsk;
798 int imlen; 821 int imlen;
799 822
800 imap = of_get_property(pp, "interrupt-map", &imlen); 823 imap = of_get_property(pp, "interrupt-map", &imlen);
@@ -859,7 +882,7 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
859 struct device *parent) 882 struct device *parent)
860{ 883{
861 struct of_device *op = kzalloc(sizeof(*op), GFP_KERNEL); 884 struct of_device *op = kzalloc(sizeof(*op), GFP_KERNEL);
862 unsigned int *irq; 885 const unsigned int *irq;
863 int len, i; 886 int len, i;
864 887
865 if (!op) 888 if (!op)
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index 12109886bb1e..023af41ad68d 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -1,9 +1,11 @@
1/* $Id: pci.c,v 1.39 2002/01/05 01:13:43 davem Exp $ 1/* pci.c: UltraSparc PCI controller support.
2 * pci.c: UltraSparc PCI controller support.
3 * 2 *
4 * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com) 3 * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
5 * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) 4 * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz) 5 * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
6 *
7 * OF tree based PCI bus probing taken from the PowerPC port
8 * with minor modifications, see there for credits.
7 */ 9 */
8 10
9#include <linux/module.h> 11#include <linux/module.h>
@@ -24,6 +26,9 @@
24#include <asm/ebus.h> 26#include <asm/ebus.h>
25#include <asm/isa.h> 27#include <asm/isa.h>
26#include <asm/prom.h> 28#include <asm/prom.h>
29#include <asm/apb.h>
30
31#include "pci_impl.h"
27 32
28unsigned long pci_memspace_mask = 0xffffffffUL; 33unsigned long pci_memspace_mask = 0xffffffffUL;
29 34
@@ -277,10 +282,10 @@ int __init pcic_present(void)
277 return pci_controller_scan(pci_is_controller); 282 return pci_controller_scan(pci_is_controller);
278} 283}
279 284
280struct pci_iommu_ops *pci_iommu_ops; 285const struct pci_iommu_ops *pci_iommu_ops;
281EXPORT_SYMBOL(pci_iommu_ops); 286EXPORT_SYMBOL(pci_iommu_ops);
282 287
283extern struct pci_iommu_ops pci_sun4u_iommu_ops, 288extern const struct pci_iommu_ops pci_sun4u_iommu_ops,
284 pci_sun4v_iommu_ops; 289 pci_sun4v_iommu_ops;
285 290
286/* Find each controller in the system, attach and initialize 291/* Find each controller in the system, attach and initialize
@@ -300,6 +305,467 @@ static void __init pci_controller_probe(void)
300 pci_controller_scan(pci_controller_init); 305 pci_controller_scan(pci_controller_init);
301} 306}
302 307
308static unsigned long pci_parse_of_flags(u32 addr0)
309{
310 unsigned long flags = 0;
311
312 if (addr0 & 0x02000000) {
313 flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
314 flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
315 flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
316 if (addr0 & 0x40000000)
317 flags |= IORESOURCE_PREFETCH
318 | PCI_BASE_ADDRESS_MEM_PREFETCH;
319 } else if (addr0 & 0x01000000)
320 flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
321 return flags;
322}
323
324/* The of_device layer has translated all of the assigned-address properties
325 * into physical address resources, we only have to figure out the register
326 * mapping.
327 */
328static void pci_parse_of_addrs(struct of_device *op,
329 struct device_node *node,
330 struct pci_dev *dev)
331{
332 struct resource *op_res;
333 const u32 *addrs;
334 int proplen;
335
336 addrs = of_get_property(node, "assigned-addresses", &proplen);
337 if (!addrs)
338 return;
339 printk(" parse addresses (%d bytes) @ %p\n", proplen, addrs);
340 op_res = &op->resource[0];
341 for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) {
342 struct resource *res;
343 unsigned long flags;
344 int i;
345
346 flags = pci_parse_of_flags(addrs[0]);
347 if (!flags)
348 continue;
349 i = addrs[0] & 0xff;
350 printk(" start: %lx, end: %lx, i: %x\n",
351 op_res->start, op_res->end, i);
352
353 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
354 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
355 } else if (i == dev->rom_base_reg) {
356 res = &dev->resource[PCI_ROM_RESOURCE];
357 flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
358 } else {
359 printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
360 continue;
361 }
362 res->start = op_res->start;
363 res->end = op_res->end;
364 res->flags = flags;
365 res->name = pci_name(dev);
366 }
367}
368
369struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
370 struct device_node *node,
371 struct pci_bus *bus, int devfn,
372 int host_controller)
373{
374 struct dev_archdata *sd;
375 struct pci_dev *dev;
376 const char *type;
377 u32 class;
378
379 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
380 if (!dev)
381 return NULL;
382
383 sd = &dev->dev.archdata;
384 sd->iommu = pbm->iommu;
385 sd->stc = &pbm->stc;
386 sd->host_controller = pbm;
387 sd->prom_node = node;
388 sd->op = of_find_device_by_node(node);
389 sd->msi_num = 0xffffffff;
390
391 type = of_get_property(node, "device_type", NULL);
392 if (type == NULL)
393 type = "";
394
395 printk(" create device, devfn: %x, type: %s hostcontroller(%d)\n",
396 devfn, type, host_controller);
397
398 dev->bus = bus;
399 dev->sysdata = node;
400 dev->dev.parent = bus->bridge;
401 dev->dev.bus = &pci_bus_type;
402 dev->devfn = devfn;
403 dev->multifunction = 0; /* maybe a lie? */
404
405 if (host_controller) {
406 dev->vendor = 0x108e;
407 dev->device = 0x8000;
408 dev->subsystem_vendor = 0x0000;
409 dev->subsystem_device = 0x0000;
410 dev->cfg_size = 256;
411 dev->class = PCI_CLASS_BRIDGE_HOST << 8;
412 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
413 0x00, PCI_SLOT(devfn), PCI_FUNC(devfn));
414 } else {
415 dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff);
416 dev->device = of_getintprop_default(node, "device-id", 0xffff);
417 dev->subsystem_vendor =
418 of_getintprop_default(node, "subsystem-vendor-id", 0);
419 dev->subsystem_device =
420 of_getintprop_default(node, "subsystem-id", 0);
421
422 dev->cfg_size = pci_cfg_space_size(dev);
423
424 /* We can't actually use the firmware value, we have
425 * to read what is in the register right now. One
426 * reason is that in the case of IDE interfaces the
427 * firmware can sample the value before the the IDE
428 * interface is programmed into native mode.
429 */
430 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
431 dev->class = class >> 8;
432
433 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
434 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
435 }
436 printk(" class: 0x%x device name: %s\n",
437 dev->class, pci_name(dev));
438
439 dev->current_state = 4; /* unknown power state */
440 dev->error_state = pci_channel_io_normal;
441
442 if (host_controller) {
443 dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
444 dev->rom_base_reg = PCI_ROM_ADDRESS1;
445 dev->irq = PCI_IRQ_NONE;
446 } else {
447 if (!strcmp(type, "pci") || !strcmp(type, "pciex")) {
448 /* a PCI-PCI bridge */
449 dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
450 dev->rom_base_reg = PCI_ROM_ADDRESS1;
451 } else if (!strcmp(type, "cardbus")) {
452 dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
453 } else {
454 dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
455 dev->rom_base_reg = PCI_ROM_ADDRESS;
456
457 dev->irq = sd->op->irqs[0];
458 if (dev->irq == 0xffffffff)
459 dev->irq = PCI_IRQ_NONE;
460 }
461 }
462 pci_parse_of_addrs(sd->op, node, dev);
463
464 printk(" adding to system ...\n");
465
466 pci_device_add(dev, bus);
467
468 return dev;
469}
470
471static void __init apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p)
472{
473 u32 idx, first, last;
474
475 first = 8;
476 last = 0;
477 for (idx = 0; idx < 8; idx++) {
478 if ((map & (1 << idx)) != 0) {
479 if (first > idx)
480 first = idx;
481 if (last < idx)
482 last = idx;
483 }
484 }
485
486 *first_p = first;
487 *last_p = last;
488}
489
490static void __init pci_resource_adjust(struct resource *res,
491 struct resource *root)
492{
493 res->start += root->start;
494 res->end += root->start;
495}
496
497/* Cook up fake bus resources for SUNW,simba PCI bridges which lack
498 * a proper 'ranges' property.
499 */
500static void __init apb_fake_ranges(struct pci_dev *dev,
501 struct pci_bus *bus,
502 struct pci_pbm_info *pbm)
503{
504 struct resource *res;
505 u32 first, last;
506 u8 map;
507
508 pci_read_config_byte(dev, APB_IO_ADDRESS_MAP, &map);
509 apb_calc_first_last(map, &first, &last);
510 res = bus->resource[0];
511 res->start = (first << 21);
512 res->end = (last << 21) + ((1 << 21) - 1);
513 res->flags = IORESOURCE_IO;
514 pci_resource_adjust(res, &pbm->io_space);
515
516 pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map);
517 apb_calc_first_last(map, &first, &last);
518 res = bus->resource[1];
519 res->start = (first << 21);
520 res->end = (last << 21) + ((1 << 21) - 1);
521 res->flags = IORESOURCE_MEM;
522 pci_resource_adjust(res, &pbm->mem_space);
523}
524
525static void __init pci_of_scan_bus(struct pci_pbm_info *pbm,
526 struct device_node *node,
527 struct pci_bus *bus);
528
529#define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1])
530
531void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm,
532 struct device_node *node,
533 struct pci_dev *dev)
534{
535 struct pci_bus *bus;
536 const u32 *busrange, *ranges;
537 int len, i, simba;
538 struct resource *res;
539 unsigned int flags;
540 u64 size;
541
542 printk("of_scan_pci_bridge(%s)\n", node->full_name);
543
544 /* parse bus-range property */
545 busrange = of_get_property(node, "bus-range", &len);
546 if (busrange == NULL || len != 8) {
547 printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n",
548 node->full_name);
549 return;
550 }
551 ranges = of_get_property(node, "ranges", &len);
552 simba = 0;
553 if (ranges == NULL) {
554 const char *model = of_get_property(node, "model", NULL);
555 if (model && !strcmp(model, "SUNW,simba")) {
556 simba = 1;
557 } else {
558 printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n",
559 node->full_name);
560 return;
561 }
562 }
563
564 bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
565 if (!bus) {
566 printk(KERN_ERR "Failed to create pci bus for %s\n",
567 node->full_name);
568 return;
569 }
570
571 bus->primary = dev->bus->number;
572 bus->subordinate = busrange[1];
573 bus->bridge_ctl = 0;
574
575 /* parse ranges property, or cook one up by hand for Simba */
576 /* PCI #address-cells == 3 and #size-cells == 2 always */
577 res = &dev->resource[PCI_BRIDGE_RESOURCES];
578 for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) {
579 res->flags = 0;
580 bus->resource[i] = res;
581 ++res;
582 }
583 if (simba) {
584 apb_fake_ranges(dev, bus, pbm);
585 goto simba_cont;
586 }
587 i = 1;
588 for (; len >= 32; len -= 32, ranges += 8) {
589 struct resource *root;
590
591 flags = pci_parse_of_flags(ranges[0]);
592 size = GET_64BIT(ranges, 6);
593 if (flags == 0 || size == 0)
594 continue;
595 if (flags & IORESOURCE_IO) {
596 res = bus->resource[0];
597 if (res->flags) {
598 printk(KERN_ERR "PCI: ignoring extra I/O range"
599 " for bridge %s\n", node->full_name);
600 continue;
601 }
602 root = &pbm->io_space;
603 } else {
604 if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
605 printk(KERN_ERR "PCI: too many memory ranges"
606 " for bridge %s\n", node->full_name);
607 continue;
608 }
609 res = bus->resource[i];
610 ++i;
611 root = &pbm->mem_space;
612 }
613
614 res->start = GET_64BIT(ranges, 1);
615 res->end = res->start + size - 1;
616 res->flags = flags;
617
618 /* Another way to implement this would be to add an of_device
619 * layer routine that can calculate a resource for a given
620 * range property value in a PCI device.
621 */
622 pci_resource_adjust(res, root);
623 }
624simba_cont:
625 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
626 bus->number);
627 printk(" bus name: %s\n", bus->name);
628
629 pci_of_scan_bus(pbm, node, bus);
630}
631
632static void __init pci_of_scan_bus(struct pci_pbm_info *pbm,
633 struct device_node *node,
634 struct pci_bus *bus)
635{
636 struct device_node *child;
637 const u32 *reg;
638 int reglen, devfn;
639 struct pci_dev *dev;
640
641 printk("PCI: scan_bus[%s] bus no %d\n",
642 node->full_name, bus->number);
643
644 child = NULL;
645 while ((child = of_get_next_child(node, child)) != NULL) {
646 printk(" * %s\n", child->full_name);
647 reg = of_get_property(child, "reg", &reglen);
648 if (reg == NULL || reglen < 20)
649 continue;
650 devfn = (reg[0] >> 8) & 0xff;
651
652 /* create a new pci_dev for this device */
653 dev = of_create_pci_dev(pbm, child, bus, devfn, 0);
654 if (!dev)
655 continue;
656 printk("PCI: dev header type: %x\n", dev->hdr_type);
657
658 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
659 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
660 of_scan_pci_bridge(pbm, child, dev);
661 }
662}
663
664static ssize_t
665show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char * buf)
666{
667 struct pci_dev *pdev;
668 struct device_node *dp;
669
670 pdev = to_pci_dev(dev);
671 dp = pdev->dev.archdata.prom_node;
672
673 return snprintf (buf, PAGE_SIZE, "%s\n", dp->full_name);
674}
675
676static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL);
677
678static void __devinit pci_bus_register_of_sysfs(struct pci_bus *bus)
679{
680 struct pci_dev *dev;
681 struct pci_bus *child_bus;
682 int err;
683
684 list_for_each_entry(dev, &bus->devices, bus_list) {
685 /* we don't really care if we can create this file or
686 * not, but we need to assign the result of the call
687 * or the world will fall under alien invasion and
688 * everybody will be frozen on a spaceship ready to be
689 * eaten on alpha centauri by some green and jelly
690 * humanoid.
691 */
692 err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr);
693 }
694 list_for_each_entry(child_bus, &bus->children, node)
695 pci_bus_register_of_sysfs(child_bus);
696}
697
698int pci_host_bridge_read_pci_cfg(struct pci_bus *bus_dev,
699 unsigned int devfn,
700 int where, int size,
701 u32 *value)
702{
703 static u8 fake_pci_config[] = {
704 0x8e, 0x10, /* Vendor: 0x108e (Sun) */
705 0x00, 0x80, /* Device: 0x8000 (PBM) */
706 0x46, 0x01, /* Command: 0x0146 (SERR, PARITY, MASTER, MEM) */
707 0xa0, 0x22, /* Status: 0x02a0 (DEVSEL_MED, FB2B, 66MHZ) */
708 0x00, 0x00, 0x00, 0x06, /* Class: 0x06000000 host bridge */
709 0x00, /* Cacheline: 0x00 */
710 0x40, /* Latency: 0x40 */
711 0x00, /* Header-Type: 0x00 normal */
712 };
713
714 *value = 0;
715 if (where >= 0 && where < sizeof(fake_pci_config) &&
716 (where + size) >= 0 &&
717 (where + size) < sizeof(fake_pci_config) &&
718 size <= sizeof(u32)) {
719 while (size--) {
720 *value <<= 8;
721 *value |= fake_pci_config[where + size];
722 }
723 }
724
725 return PCIBIOS_SUCCESSFUL;
726}
727
728int pci_host_bridge_write_pci_cfg(struct pci_bus *bus_dev,
729 unsigned int devfn,
730 int where, int size,
731 u32 value)
732{
733 return PCIBIOS_SUCCESSFUL;
734}
735
736struct pci_bus * __init pci_scan_one_pbm(struct pci_pbm_info *pbm)
737{
738 struct pci_controller_info *p = pbm->parent;
739 struct device_node *node = pbm->prom_node;
740 struct pci_dev *host_pdev;
741 struct pci_bus *bus;
742
743 printk("PCI: Scanning PBM %s\n", node->full_name);
744
745 /* XXX parent device? XXX */
746 bus = pci_create_bus(NULL, pbm->pci_first_busno, p->pci_ops, pbm);
747 if (!bus) {
748 printk(KERN_ERR "Failed to create bus for %s\n",
749 node->full_name);
750 return NULL;
751 }
752 bus->secondary = pbm->pci_first_busno;
753 bus->subordinate = pbm->pci_last_busno;
754
755 bus->resource[0] = &pbm->io_space;
756 bus->resource[1] = &pbm->mem_space;
757
758 /* Create the dummy host bridge and link it in. */
759 host_pdev = of_create_pci_dev(pbm, node, bus, 0x00, 1);
760 bus->self = host_pdev;
761
762 pci_of_scan_bus(pbm, node, bus);
763 pci_bus_add_devices(bus);
764 pci_bus_register_of_sysfs(bus);
765
766 return bus;
767}
768
303static void __init pci_scan_each_controller_bus(void) 769static void __init pci_scan_each_controller_bus(void)
304{ 770{
305 struct pci_controller_info *p; 771 struct pci_controller_info *p;
@@ -360,8 +826,33 @@ void pcibios_align_resource(void *data, struct resource *res,
360{ 826{
361} 827}
362 828
363int pcibios_enable_device(struct pci_dev *pdev, int mask) 829int pcibios_enable_device(struct pci_dev *dev, int mask)
364{ 830{
831 u16 cmd, oldcmd;
832 int i;
833
834 pci_read_config_word(dev, PCI_COMMAND, &cmd);
835 oldcmd = cmd;
836
837 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
838 struct resource *res = &dev->resource[i];
839
840 /* Only set up the requested stuff */
841 if (!(mask & (1<<i)))
842 continue;
843
844 if (res->flags & IORESOURCE_IO)
845 cmd |= PCI_COMMAND_IO;
846 if (res->flags & IORESOURCE_MEM)
847 cmd |= PCI_COMMAND_MEMORY;
848 }
849
850 if (cmd != oldcmd) {
851 printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
852 pci_name(dev), cmd);
853 /* Enable the appropriate bits in the PCI command register. */
854 pci_write_config_word(dev, PCI_COMMAND, cmd);
855 }
365 return 0; 856 return 0;
366} 857}
367 858
@@ -380,7 +871,7 @@ void pcibios_resource_to_bus(struct pci_dev *pdev, struct pci_bus_region *region
380 else 871 else
381 root = &pbm->mem_space; 872 root = &pbm->mem_space;
382 873
383 pbm->parent->resource_adjust(pdev, &zero_res, root); 874 pci_resource_adjust(&zero_res, root);
384 875
385 region->start = res->start - zero_res.start; 876 region->start = res->start - zero_res.start;
386 region->end = res->end - zero_res.start; 877 region->end = res->end - zero_res.start;
@@ -401,7 +892,7 @@ void pcibios_bus_to_resource(struct pci_dev *pdev, struct resource *res,
401 else 892 else
402 root = &pbm->mem_space; 893 root = &pbm->mem_space;
403 894
404 pbm->parent->resource_adjust(pdev, res, root); 895 pci_resource_adjust(res, root);
405} 896}
406EXPORT_SYMBOL(pcibios_bus_to_resource); 897EXPORT_SYMBOL(pcibios_bus_to_resource);
407 898
@@ -422,55 +913,17 @@ char * __devinit pcibios_setup(char *str)
422static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma, 913static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma,
423 enum pci_mmap_state mmap_state) 914 enum pci_mmap_state mmap_state)
424{ 915{
425 struct pcidev_cookie *pcp = pdev->sysdata; 916 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
426 struct pci_pbm_info *pbm;
427 struct pci_controller_info *p; 917 struct pci_controller_info *p;
428 unsigned long space_size, user_offset, user_size; 918 unsigned long space_size, user_offset, user_size;
429 919
430 if (!pcp)
431 return -ENXIO;
432 pbm = pcp->pbm;
433 if (!pbm)
434 return -ENXIO;
435
436 p = pbm->parent; 920 p = pbm->parent;
437 if (p->pbms_same_domain) { 921 if (mmap_state == pci_mmap_io) {
438 unsigned long lowest, highest; 922 space_size = (pbm->io_space.end -
439 923 pbm->io_space.start) + 1;
440 lowest = ~0UL; highest = 0UL;
441 if (mmap_state == pci_mmap_io) {
442 if (p->pbm_A.io_space.flags) {
443 lowest = p->pbm_A.io_space.start;
444 highest = p->pbm_A.io_space.end + 1;
445 }
446 if (p->pbm_B.io_space.flags) {
447 if (lowest > p->pbm_B.io_space.start)
448 lowest = p->pbm_B.io_space.start;
449 if (highest < p->pbm_B.io_space.end + 1)
450 highest = p->pbm_B.io_space.end + 1;
451 }
452 space_size = highest - lowest;
453 } else {
454 if (p->pbm_A.mem_space.flags) {
455 lowest = p->pbm_A.mem_space.start;
456 highest = p->pbm_A.mem_space.end + 1;
457 }
458 if (p->pbm_B.mem_space.flags) {
459 if (lowest > p->pbm_B.mem_space.start)
460 lowest = p->pbm_B.mem_space.start;
461 if (highest < p->pbm_B.mem_space.end + 1)
462 highest = p->pbm_B.mem_space.end + 1;
463 }
464 space_size = highest - lowest;
465 }
466 } else { 924 } else {
467 if (mmap_state == pci_mmap_io) { 925 space_size = (pbm->mem_space.end -
468 space_size = (pbm->io_space.end - 926 pbm->mem_space.start) + 1;
469 pbm->io_space.start) + 1;
470 } else {
471 space_size = (pbm->mem_space.end -
472 pbm->mem_space.start) + 1;
473 }
474 } 927 }
475 928
476 /* Make sure the request is in range. */ 929 /* Make sure the request is in range. */
@@ -481,31 +934,12 @@ static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struc
481 (user_offset + user_size) > space_size) 934 (user_offset + user_size) > space_size)
482 return -EINVAL; 935 return -EINVAL;
483 936
484 if (p->pbms_same_domain) { 937 if (mmap_state == pci_mmap_io) {
485 unsigned long lowest = ~0UL; 938 vma->vm_pgoff = (pbm->io_space.start +
486 939 user_offset) >> PAGE_SHIFT;
487 if (mmap_state == pci_mmap_io) {
488 if (p->pbm_A.io_space.flags)
489 lowest = p->pbm_A.io_space.start;
490 if (p->pbm_B.io_space.flags &&
491 lowest > p->pbm_B.io_space.start)
492 lowest = p->pbm_B.io_space.start;
493 } else {
494 if (p->pbm_A.mem_space.flags)
495 lowest = p->pbm_A.mem_space.start;
496 if (p->pbm_B.mem_space.flags &&
497 lowest > p->pbm_B.mem_space.start)
498 lowest = p->pbm_B.mem_space.start;
499 }
500 vma->vm_pgoff = (lowest + user_offset) >> PAGE_SHIFT;
501 } else { 940 } else {
502 if (mmap_state == pci_mmap_io) { 941 vma->vm_pgoff = (pbm->mem_space.start +
503 vma->vm_pgoff = (pbm->io_space.start + 942 user_offset) >> PAGE_SHIFT;
504 user_offset) >> PAGE_SHIFT;
505 } else {
506 vma->vm_pgoff = (pbm->mem_space.start +
507 user_offset) >> PAGE_SHIFT;
508 }
509 } 943 }
510 944
511 return 0; 945 return 0;
@@ -639,9 +1073,8 @@ int pci_domain_nr(struct pci_bus *pbus)
639 struct pci_controller_info *p = pbm->parent; 1073 struct pci_controller_info *p = pbm->parent;
640 1074
641 ret = p->index; 1075 ret = p->index;
642 if (p->pbms_same_domain == 0) 1076 ret = ((ret << 1) +
643 ret = ((ret << 1) + 1077 ((pbm == &pbm->parent->pbm_B) ? 1 : 0));
644 ((pbm == &pbm->parent->pbm_B) ? 1 : 0));
645 } 1078 }
646 1079
647 return ret; 1080 return ret;
@@ -651,8 +1084,7 @@ EXPORT_SYMBOL(pci_domain_nr);
651#ifdef CONFIG_PCI_MSI 1084#ifdef CONFIG_PCI_MSI
652int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) 1085int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
653{ 1086{
654 struct pcidev_cookie *pcp = pdev->sysdata; 1087 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
655 struct pci_pbm_info *pbm = pcp->pbm;
656 struct pci_controller_info *p = pbm->parent; 1088 struct pci_controller_info *p = pbm->parent;
657 int virt_irq, err; 1089 int virt_irq, err;
658 1090
@@ -670,8 +1102,7 @@ void arch_teardown_msi_irq(unsigned int virt_irq)
670{ 1102{
671 struct msi_desc *entry = get_irq_msi(virt_irq); 1103 struct msi_desc *entry = get_irq_msi(virt_irq);
672 struct pci_dev *pdev = entry->dev; 1104 struct pci_dev *pdev = entry->dev;
673 struct pcidev_cookie *pcp = pdev->sysdata; 1105 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
674 struct pci_pbm_info *pbm = pcp->pbm;
675 struct pci_controller_info *p = pbm->parent; 1106 struct pci_controller_info *p = pbm->parent;
676 1107
677 if (!pbm->msi_num || !p->setup_msi_irq) 1108 if (!pbm->msi_num || !p->setup_msi_irq)
@@ -683,9 +1114,7 @@ void arch_teardown_msi_irq(unsigned int virt_irq)
683 1114
684struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) 1115struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
685{ 1116{
686 struct pcidev_cookie *pc = pdev->sysdata; 1117 return pdev->dev.archdata.prom_node;
687
688 return pc->op->node;
689} 1118}
690EXPORT_SYMBOL(pci_device_to_OF_node); 1119EXPORT_SYMBOL(pci_device_to_OF_node);
691 1120
diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c
index 5a92cb90ebe0..1e6aeedf43c4 100644
--- a/arch/sparc64/kernel/pci_common.c
+++ b/arch/sparc64/kernel/pci_common.c
@@ -1,7 +1,6 @@
1/* $Id: pci_common.c,v 1.29 2002/02/01 00:56:03 davem Exp $ 1/* pci_common.c: PCI controller common support.
2 * pci_common.c: PCI controller common support.
3 * 2 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com) 3 * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
5 */ 4 */
6 5
7#include <linux/string.h> 6#include <linux/string.h>
@@ -16,748 +15,137 @@
16 15
17#include "pci_impl.h" 16#include "pci_impl.h"
18 17
19/* Fix self device of BUS and hook it into BUS->self. 18static void pci_register_legacy_regions(struct resource *io_res,
20 * The pci_scan_bus does not do this for the host bridge. 19 struct resource *mem_res)
21 */
22void __init pci_fixup_host_bridge_self(struct pci_bus *pbus)
23{
24 struct pci_dev *pdev;
25
26 list_for_each_entry(pdev, &pbus->devices, bus_list) {
27 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_HOST) {
28 pbus->self = pdev;
29 return;
30 }
31 }
32
33 prom_printf("PCI: Critical error, cannot find host bridge PDEV.\n");
34 prom_halt();
35}
36
37/* Find the OBP PROM device tree node for a PCI device. */
38static struct device_node * __init
39find_device_prom_node(struct pci_pbm_info *pbm, struct pci_dev *pdev,
40 struct device_node *bus_node,
41 struct linux_prom_pci_registers **pregs,
42 int *nregs)
43{ 20{
44 struct device_node *dp; 21 struct resource *p;
45
46 *nregs = 0;
47
48 /*
49 * Return the PBM's PROM node in case we are it's PCI device,
50 * as the PBM's reg property is different to standard PCI reg
51 * properties. We would delete this device entry otherwise,
52 * which confuses XFree86's device probing...
53 */
54 if ((pdev->bus->number == pbm->pci_bus->number) && (pdev->devfn == 0) &&
55 (pdev->vendor == PCI_VENDOR_ID_SUN) &&
56 (pdev->device == PCI_DEVICE_ID_SUN_PBM ||
57 pdev->device == PCI_DEVICE_ID_SUN_SCHIZO ||
58 pdev->device == PCI_DEVICE_ID_SUN_TOMATILLO ||
59 pdev->device == PCI_DEVICE_ID_SUN_SABRE ||
60 pdev->device == PCI_DEVICE_ID_SUN_HUMMINGBIRD))
61 return bus_node;
62
63 dp = bus_node->child;
64 while (dp) {
65 struct linux_prom_pci_registers *regs;
66 struct property *prop;
67 int len;
68
69 prop = of_find_property(dp, "reg", &len);
70 if (!prop)
71 goto do_next_sibling;
72
73 regs = prop->value;
74 if (((regs[0].phys_hi >> 8) & 0xff) == pdev->devfn) {
75 *pregs = regs;
76 *nregs = len / sizeof(struct linux_prom_pci_registers);
77 return dp;
78 }
79
80 do_next_sibling:
81 dp = dp->sibling;
82 }
83
84 return NULL;
85}
86 22
87/* Older versions of OBP on PCI systems encode 64-bit MEM 23 /* VGA Video RAM. */
88 * space assignments incorrectly, this fixes them up. We also 24 p = kzalloc(sizeof(*p), GFP_KERNEL);
89 * take the opportunity here to hide other kinds of bogus 25 if (!p)
90 * assignments.
91 */
92static void __init fixup_obp_assignments(struct pci_dev *pdev,
93 struct pcidev_cookie *pcp)
94{
95 int i;
96
97 if (pdev->vendor == PCI_VENDOR_ID_AL &&
98 (pdev->device == PCI_DEVICE_ID_AL_M7101 ||
99 pdev->device == PCI_DEVICE_ID_AL_M1533)) {
100 int i;
101
102 /* Zap all of the normal resources, they are
103 * meaningless and generate bogus resource collision
104 * messages. This is OpenBoot's ill-fated attempt to
105 * represent the implicit resources that these devices
106 * have.
107 */
108 pcp->num_prom_assignments = 0;
109 for (i = 0; i < 6; i++) {
110 pdev->resource[i].start =
111 pdev->resource[i].end =
112 pdev->resource[i].flags = 0;
113 }
114 pdev->resource[PCI_ROM_RESOURCE].start =
115 pdev->resource[PCI_ROM_RESOURCE].end =
116 pdev->resource[PCI_ROM_RESOURCE].flags = 0;
117 return; 26 return;
118 }
119
120 for (i = 0; i < pcp->num_prom_assignments; i++) {
121 struct linux_prom_pci_registers *ap;
122 int space;
123 27
124 ap = &pcp->prom_assignments[i]; 28 p->name = "Video RAM area";
125 space = ap->phys_hi >> 24; 29 p->start = mem_res->start + 0xa0000UL;
126 if ((space & 0x3) == 2 && 30 p->end = p->start + 0x1ffffUL;
127 (space & 0x4) != 0) { 31 p->flags = IORESOURCE_BUSY;
128 ap->phys_hi &= ~(0x7 << 24); 32 request_resource(mem_res, p);
129 ap->phys_hi |= 0x3 << 24;
130 }
131 }
132}
133
134static ssize_t
135show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char * buf)
136{
137 struct pci_dev *pdev;
138 struct pcidev_cookie *sysdata;
139
140 pdev = to_pci_dev(dev);
141 sysdata = pdev->sysdata;
142
143 return snprintf (buf, PAGE_SIZE, "%s\n", sysdata->prom_node->full_name);
144}
145
146static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL);
147 33
148/* Fill in the PCI device cookie sysdata for the given 34 p = kzalloc(sizeof(*p), GFP_KERNEL);
149 * PCI device. This cookie is the means by which one 35 if (!p)
150 * can get to OBP and PCI controller specific information
151 * for a PCI device.
152 */
153static void __init pdev_cookie_fillin(struct pci_pbm_info *pbm,
154 struct pci_dev *pdev,
155 struct device_node *bus_node)
156{
157 struct linux_prom_pci_registers *pregs = NULL;
158 struct pcidev_cookie *pcp;
159 struct device_node *dp;
160 struct property *prop;
161 int nregs, len, err;
162
163 dp = find_device_prom_node(pbm, pdev, bus_node,
164 &pregs, &nregs);
165 if (!dp) {
166 /* If it is not in the OBP device tree then
167 * there must be a damn good reason for it.
168 *
169 * So what we do is delete the device from the
170 * PCI device tree completely. This scenario
171 * is seen, for example, on CP1500 for the
172 * second EBUS/HappyMeal pair if the external
173 * connector for it is not present.
174 */
175 pci_remove_bus_device(pdev);
176 return; 36 return;
177 }
178
179 pcp = kzalloc(sizeof(*pcp), GFP_ATOMIC);
180 if (pcp == NULL) {
181 prom_printf("PCI_COOKIE: Fatal malloc error, aborting...\n");
182 prom_halt();
183 }
184 pcp->pbm = pbm;
185 pcp->prom_node = dp;
186 pcp->op = of_find_device_by_node(dp);
187 memcpy(pcp->prom_regs, pregs,
188 nregs * sizeof(struct linux_prom_pci_registers));
189 pcp->num_prom_regs = nregs;
190
191 /* We can't have the pcidev_cookie assignments be just
192 * direct pointers into the property value, since they
193 * are potentially modified by the probing process.
194 */
195 prop = of_find_property(dp, "assigned-addresses", &len);
196 if (!prop) {
197 pcp->num_prom_assignments = 0;
198 } else {
199 memcpy(pcp->prom_assignments, prop->value, len);
200 pcp->num_prom_assignments =
201 (len / sizeof(pcp->prom_assignments[0]));
202 }
203
204 if (strcmp(dp->name, "ebus") == 0) {
205 struct linux_prom_ebus_ranges *erng;
206 int iter;
207
208 /* EBUS is special... */
209 prop = of_find_property(dp, "ranges", &len);
210 if (!prop) {
211 prom_printf("EBUS: Fatal error, no range property\n");
212 prom_halt();
213 }
214 erng = prop->value;
215 len = (len / sizeof(erng[0]));
216 for (iter = 0; iter < len; iter++) {
217 struct linux_prom_ebus_ranges *ep = &erng[iter];
218 struct linux_prom_pci_registers *ap;
219
220 ap = &pcp->prom_assignments[iter];
221
222 ap->phys_hi = ep->parent_phys_hi;
223 ap->phys_mid = ep->parent_phys_mid;
224 ap->phys_lo = ep->parent_phys_lo;
225 ap->size_hi = 0;
226 ap->size_lo = ep->size;
227 }
228 pcp->num_prom_assignments = len;
229 }
230
231 fixup_obp_assignments(pdev, pcp);
232
233 pdev->sysdata = pcp;
234
235 /* we don't really care if we can create this file or not,
236 * but we need to assign the result of the call or the world will fall
237 * under alien invasion and everybody will be frozen on a spaceship
238 * ready to be eaten on alpha centauri by some green and jelly humanoid.
239 */
240 err = sysfs_create_file(&pdev->dev.kobj, &dev_attr_obppath.attr);
241}
242
243void __init pci_fill_in_pbm_cookies(struct pci_bus *pbus,
244 struct pci_pbm_info *pbm,
245 struct device_node *dp)
246{
247 struct pci_dev *pdev, *pdev_next;
248 struct pci_bus *this_pbus, *pbus_next;
249
250 /* This must be _safe because the cookie fillin
251 routine can delete devices from the tree. */
252 list_for_each_entry_safe(pdev, pdev_next, &pbus->devices, bus_list)
253 pdev_cookie_fillin(pbm, pdev, dp);
254
255 list_for_each_entry_safe(this_pbus, pbus_next, &pbus->children, node) {
256 struct pcidev_cookie *pcp = this_pbus->self->sysdata;
257
258 pci_fill_in_pbm_cookies(this_pbus, pbm, pcp->prom_node);
259 }
260}
261 37
262static void __init bad_assignment(struct pci_dev *pdev, 38 p->name = "System ROM";
263 struct linux_prom_pci_registers *ap, 39 p->start = mem_res->start + 0xf0000UL;
264 struct resource *res, 40 p->end = p->start + 0xffffUL;
265 int do_prom_halt) 41 p->flags = IORESOURCE_BUSY;
266{ 42 request_resource(mem_res, p);
267 prom_printf("PCI: Bogus PROM assignment. BUS[%02x] DEVFN[%x]\n",
268 pdev->bus->number, pdev->devfn);
269 if (ap)
270 prom_printf("PCI: phys[%08x:%08x:%08x] size[%08x:%08x]\n",
271 ap->phys_hi, ap->phys_mid, ap->phys_lo,
272 ap->size_hi, ap->size_lo);
273 if (res)
274 prom_printf("PCI: RES[%016lx-->%016lx:(%lx)]\n",
275 res->start, res->end, res->flags);
276 if (do_prom_halt)
277 prom_halt();
278}
279
280static struct resource *
281__init get_root_resource(struct linux_prom_pci_registers *ap,
282 struct pci_pbm_info *pbm)
283{
284 int space = (ap->phys_hi >> 24) & 3;
285
286 switch (space) {
287 case 0:
288 /* Configuration space, silently ignore it. */
289 return NULL;
290
291 case 1:
292 /* 16-bit IO space */
293 return &pbm->io_space;
294
295 case 2:
296 /* 32-bit MEM space */
297 return &pbm->mem_space;
298
299 case 3:
300 /* 64-bit MEM space, these are allocated out of
301 * the 32-bit mem_space range for the PBM, ie.
302 * we just zero out the upper 32-bits.
303 */
304 return &pbm->mem_space;
305
306 default:
307 printk("PCI: What is resource space %x?\n", space);
308 return NULL;
309 };
310}
311
312static struct resource *
313__init get_device_resource(struct linux_prom_pci_registers *ap,
314 struct pci_dev *pdev)
315{
316 struct resource *res;
317 int breg = (ap->phys_hi & 0xff);
318
319 switch (breg) {
320 case PCI_ROM_ADDRESS:
321 /* Unfortunately I have seen several cases where
322 * buggy FCODE uses a space value of '1' (I/O space)
323 * in the register property for the ROM address
324 * so disable this sanity check for now.
325 */
326#if 0
327 {
328 int space = (ap->phys_hi >> 24) & 3;
329
330 /* It had better be MEM space. */
331 if (space != 2)
332 bad_assignment(pdev, ap, NULL, 0);
333 }
334#endif
335 res = &pdev->resource[PCI_ROM_RESOURCE];
336 break;
337
338 case PCI_BASE_ADDRESS_0:
339 case PCI_BASE_ADDRESS_1:
340 case PCI_BASE_ADDRESS_2:
341 case PCI_BASE_ADDRESS_3:
342 case PCI_BASE_ADDRESS_4:
343 case PCI_BASE_ADDRESS_5:
344 res = &pdev->resource[(breg - PCI_BASE_ADDRESS_0) / 4];
345 break;
346
347 default:
348 bad_assignment(pdev, ap, NULL, 0);
349 res = NULL;
350 break;
351 };
352
353 return res;
354}
355
356static void __init pdev_record_assignments(struct pci_pbm_info *pbm,
357 struct pci_dev *pdev)
358{
359 struct pcidev_cookie *pcp = pdev->sysdata;
360 int i;
361
362 for (i = 0; i < pcp->num_prom_assignments; i++) {
363 struct linux_prom_pci_registers *ap;
364 struct resource *root, *res;
365
366 /* The format of this property is specified in
367 * the PCI Bus Binding to IEEE1275-1994.
368 */
369 ap = &pcp->prom_assignments[i];
370 root = get_root_resource(ap, pbm);
371 res = get_device_resource(ap, pdev);
372 if (root == NULL || res == NULL ||
373 res->flags == 0)
374 continue;
375
376 /* Ok we know which resource this PROM assignment is
377 * for, sanity check it.
378 */
379 if ((res->start & 0xffffffffUL) != ap->phys_lo)
380 bad_assignment(pdev, ap, res, 1);
381
382 /* If it is a 64-bit MEM space assignment, verify that
383 * the resource is too and that the upper 32-bits match.
384 */
385 if (((ap->phys_hi >> 24) & 3) == 3) {
386 if (((res->flags & IORESOURCE_MEM) == 0) ||
387 ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
388 != PCI_BASE_ADDRESS_MEM_TYPE_64))
389 bad_assignment(pdev, ap, res, 1);
390 if ((res->start >> 32) != ap->phys_mid)
391 bad_assignment(pdev, ap, res, 1);
392
393 /* PBM cannot generate cpu initiated PIOs
394 * to the full 64-bit space. Therefore the
395 * upper 32-bits better be zero. If it is
396 * not, just skip it and we will assign it
397 * properly ourselves.
398 */
399 if ((res->start >> 32) != 0UL) {
400 printk(KERN_ERR "PCI: OBP assigns out of range MEM address "
401 "%016lx for region %ld on device %s\n",
402 res->start, (res - &pdev->resource[0]), pci_name(pdev));
403 continue;
404 }
405 }
406
407 /* Adjust the resource into the physical address space
408 * of this PBM.
409 */
410 pbm->parent->resource_adjust(pdev, res, root);
411
412 if (request_resource(root, res) < 0) {
413 int rnum;
414
415 /* OK, there is some conflict. But this is fine
416 * since we'll reassign it in the fixup pass.
417 *
418 * Do not print the warning for ROM resources
419 * as such a conflict is quite common and
420 * harmless as the ROM bar is disabled.
421 */
422 rnum = (res - &pdev->resource[0]);
423 if (rnum != PCI_ROM_RESOURCE)
424 printk(KERN_ERR "PCI: Resource collision, "
425 "region %d "
426 "[%016lx:%016lx] of device %s\n",
427 rnum,
428 res->start, res->end,
429 pci_name(pdev));
430 }
431 }
432}
433
434void __init pci_record_assignments(struct pci_pbm_info *pbm,
435 struct pci_bus *pbus)
436{
437 struct pci_dev *dev;
438 struct pci_bus *bus;
439 43
440 list_for_each_entry(dev, &pbus->devices, bus_list) 44 p = kzalloc(sizeof(*p), GFP_KERNEL);
441 pdev_record_assignments(pbm, dev); 45 if (!p)
46 return;
442 47
443 list_for_each_entry(bus, &pbus->children, node) 48 p->name = "Video ROM";
444 pci_record_assignments(pbm, bus); 49 p->start = mem_res->start + 0xc0000UL;
50 p->end = p->start + 0x7fffUL;
51 p->flags = IORESOURCE_BUSY;
52 request_resource(mem_res, p);
445} 53}
446 54
447/* Return non-zero if PDEV has implicit I/O resources even 55static void pci_register_iommu_region(struct pci_pbm_info *pbm)
448 * though it may not have an I/O base address register
449 * active.
450 */
451static int __init has_implicit_io(struct pci_dev *pdev)
452{ 56{
453 int class = pdev->class >> 8; 57 const u32 *vdma = of_get_property(pbm->prom_node, "virtual-dma", NULL);
454 58
455 if (class == PCI_CLASS_NOT_DEFINED || 59 if (vdma) {
456 class == PCI_CLASS_NOT_DEFINED_VGA || 60 struct resource *rp = kmalloc(sizeof(*rp), GFP_KERNEL);
457 class == PCI_CLASS_STORAGE_IDE ||
458 (pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
459 return 1;
460 61
461 return 0; 62 if (!rp) {
462} 63 prom_printf("Cannot allocate IOMMU resource.\n");
463
464static void __init pdev_assign_unassigned(struct pci_pbm_info *pbm,
465 struct pci_dev *pdev)
466{
467 u32 reg;
468 u16 cmd;
469 int i, io_seen, mem_seen;
470
471 io_seen = mem_seen = 0;
472 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
473 struct resource *root, *res;
474 unsigned long size, min, max, align;
475
476 res = &pdev->resource[i];
477
478 if (res->flags & IORESOURCE_IO)
479 io_seen++;
480 else if (res->flags & IORESOURCE_MEM)
481 mem_seen++;
482
483 /* If it is already assigned or the resource does
484 * not exist, there is nothing to do.
485 */
486 if (res->parent != NULL || res->flags == 0UL)
487 continue;
488
489 /* Determine the root we allocate from. */
490 if (res->flags & IORESOURCE_IO) {
491 root = &pbm->io_space;
492 min = root->start + 0x400UL;
493 max = root->end;
494 } else {
495 root = &pbm->mem_space;
496 min = root->start;
497 max = min + 0x80000000UL;
498 }
499
500 size = res->end - res->start;
501 align = size + 1;
502 if (allocate_resource(root, res, size + 1, min, max, align, NULL, NULL) < 0) {
503 /* uh oh */
504 prom_printf("PCI: Failed to allocate resource %d for %s\n",
505 i, pci_name(pdev));
506 prom_halt(); 64 prom_halt();
507 } 65 }
508 66 rp->name = "IOMMU";
509 /* Update PCI config space. */ 67 rp->start = pbm->mem_space.start + (unsigned long) vdma[0];
510 pbm->parent->base_address_update(pdev, i); 68 rp->end = rp->start + (unsigned long) vdma[1] - 1UL;
511 } 69 rp->flags = IORESOURCE_BUSY;
512 70 request_resource(&pbm->mem_space, rp);
513 /* Special case, disable the ROM. Several devices
514 * act funny (ie. do not respond to memory space writes)
515 * when it is left enabled. A good example are Qlogic,ISP
516 * adapters.
517 */
518 pci_read_config_dword(pdev, PCI_ROM_ADDRESS, &reg);
519 reg &= ~PCI_ROM_ADDRESS_ENABLE;
520 pci_write_config_dword(pdev, PCI_ROM_ADDRESS, reg);
521
522 /* If we saw I/O or MEM resources, enable appropriate
523 * bits in PCI command register.
524 */
525 if (io_seen || mem_seen) {
526 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
527 if (io_seen || has_implicit_io(pdev))
528 cmd |= PCI_COMMAND_IO;
529 if (mem_seen)
530 cmd |= PCI_COMMAND_MEMORY;
531 pci_write_config_word(pdev, PCI_COMMAND, cmd);
532 }
533
534 /* If this is a PCI bridge or an IDE controller,
535 * enable bus mastering. In the former case also
536 * set the cache line size correctly.
537 */
538 if (((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI) ||
539 (((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) &&
540 ((pdev->class & 0x80) != 0))) {
541 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
542 cmd |= PCI_COMMAND_MASTER;
543 pci_write_config_word(pdev, PCI_COMMAND, cmd);
544
545 if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI)
546 pci_write_config_byte(pdev,
547 PCI_CACHE_LINE_SIZE,
548 (64 / sizeof(u32)));
549 } 71 }
550} 72}
551 73
552void __init pci_assign_unassigned(struct pci_pbm_info *pbm, 74void pci_determine_mem_io_space(struct pci_pbm_info *pbm)
553 struct pci_bus *pbus)
554{ 75{
555 struct pci_dev *dev; 76 const struct linux_prom_pci_ranges *pbm_ranges;
556 struct pci_bus *bus; 77 int i, saw_mem, saw_io;
557 78 int num_pbm_ranges;
558 list_for_each_entry(dev, &pbus->devices, bus_list)
559 pdev_assign_unassigned(pbm, dev);
560 79
561 list_for_each_entry(bus, &pbus->children, node) 80 saw_mem = saw_io = 0;
562 pci_assign_unassigned(pbm, bus); 81 pbm_ranges = of_get_property(pbm->prom_node, "ranges", &i);
563} 82 num_pbm_ranges = i / sizeof(*pbm_ranges);
564 83
565static void __init pdev_fixup_irq(struct pci_dev *pdev) 84 for (i = 0; i < num_pbm_ranges; i++) {
566{ 85 const struct linux_prom_pci_ranges *pr = &pbm_ranges[i];
567 struct pcidev_cookie *pcp = pdev->sysdata; 86 unsigned long a;
568 struct of_device *op = pcp->op; 87 u32 parent_phys_hi, parent_phys_lo;
88 int type;
569 89
570 if (op->irqs[0] == 0xffffffff) { 90 parent_phys_hi = pr->parent_phys_hi;
571 pdev->irq = PCI_IRQ_NONE; 91 parent_phys_lo = pr->parent_phys_lo;
572 return; 92 if (tlb_type == hypervisor)
573 } 93 parent_phys_hi &= 0x0fffffff;
574 94
575 pdev->irq = op->irqs[0]; 95 type = (pr->child_phys_hi >> 24) & 0x3;
96 a = (((unsigned long)parent_phys_hi << 32UL) |
97 ((unsigned long)parent_phys_lo << 0UL));
576 98
577 pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, 99 switch (type) {
578 pdev->irq & PCI_IRQ_INO); 100 case 0:
579} 101 /* PCI config space, 16MB */
580 102 pbm->config_space = a;
581void __init pci_fixup_irq(struct pci_pbm_info *pbm, 103 break;
582 struct pci_bus *pbus)
583{
584 struct pci_dev *dev;
585 struct pci_bus *bus;
586
587 list_for_each_entry(dev, &pbus->devices, bus_list)
588 pdev_fixup_irq(dev);
589
590 list_for_each_entry(bus, &pbus->children, node)
591 pci_fixup_irq(pbm, bus);
592}
593
594static void pdev_setup_busmastering(struct pci_dev *pdev, int is_66mhz)
595{
596 u16 cmd;
597 u8 hdr_type, min_gnt, ltimer;
598
599 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
600 cmd |= PCI_COMMAND_MASTER;
601 pci_write_config_word(pdev, PCI_COMMAND, cmd);
602
603 /* Read it back, if the mastering bit did not
604 * get set, the device does not support bus
605 * mastering so we have nothing to do here.
606 */
607 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
608 if ((cmd & PCI_COMMAND_MASTER) == 0)
609 return;
610
611 /* Set correct cache line size, 64-byte on all
612 * Sparc64 PCI systems. Note that the value is
613 * measured in 32-bit words.
614 */
615 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
616 64 / sizeof(u32));
617
618 pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr_type);
619 hdr_type &= ~0x80;
620 if (hdr_type != PCI_HEADER_TYPE_NORMAL)
621 return;
622
623 /* If the latency timer is already programmed with a non-zero
624 * value, assume whoever set it (OBP or whoever) knows what
625 * they are doing.
626 */
627 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ltimer);
628 if (ltimer != 0)
629 return;
630
631 /* XXX Since I'm tipping off the min grant value to
632 * XXX choose a suitable latency timer value, I also
633 * XXX considered making use of the max latency value
634 * XXX as well. Unfortunately I've seen too many bogusly
635 * XXX low settings for it to the point where it lacks
636 * XXX any usefulness. In one case, an ethernet card
637 * XXX claimed a min grant of 10 and a max latency of 5.
638 * XXX Now, if I had two such cards on the same bus I
639 * XXX could not set the desired burst period (calculated
640 * XXX from min grant) without violating the max latency
641 * XXX bound. Duh...
642 * XXX
643 * XXX I blame dumb PC bios implementors for stuff like
644 * XXX this, most of them don't even try to do something
645 * XXX sensible with latency timer values and just set some
646 * XXX default value (usually 32) into every device.
647 */
648
649 pci_read_config_byte(pdev, PCI_MIN_GNT, &min_gnt);
650
651 if (min_gnt == 0) {
652 /* If no min_gnt setting then use a default
653 * value.
654 */
655 if (is_66mhz)
656 ltimer = 16;
657 else
658 ltimer = 32;
659 } else {
660 int shift_factor;
661
662 if (is_66mhz)
663 shift_factor = 2;
664 else
665 shift_factor = 3;
666
667 /* Use a default value when the min_gnt value
668 * is erroneously high.
669 */
670 if (((unsigned int) min_gnt << shift_factor) > 512 ||
671 ((min_gnt << shift_factor) & 0xff) == 0) {
672 ltimer = 8 << shift_factor;
673 } else {
674 ltimer = min_gnt << shift_factor;
675 }
676 }
677 104
678 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ltimer); 105 case 1:
679} 106 /* 16-bit IO space, 16MB */
107 pbm->io_space.start = a;
108 pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
109 pbm->io_space.flags = IORESOURCE_IO;
110 saw_io = 1;
111 break;
680 112
681void pci_determine_66mhz_disposition(struct pci_pbm_info *pbm, 113 case 2:
682 struct pci_bus *pbus) 114 /* 32-bit MEM space, 2GB */
683{ 115 pbm->mem_space.start = a;
684 struct pci_dev *pdev; 116 pbm->mem_space.end = a + (0x80000000UL - 1UL);
685 int all_are_66mhz; 117 pbm->mem_space.flags = IORESOURCE_MEM;
686 u16 status; 118 saw_mem = 1;
119 break;
687 120
688 if (pbm->is_66mhz_capable == 0) { 121 case 3:
689 all_are_66mhz = 0; 122 /* XXX 64-bit MEM handling XXX */
690 goto out;
691 }
692 123
693 all_are_66mhz = 1; 124 default:
694 list_for_each_entry(pdev, &pbus->devices, bus_list) {
695 pci_read_config_word(pdev, PCI_STATUS, &status);
696 if (!(status & PCI_STATUS_66MHZ)) {
697 all_are_66mhz = 0;
698 break; 125 break;
699 } 126 };
700 } 127 }
701out:
702 pbm->all_devs_66mhz = all_are_66mhz;
703
704 printk("PCI%d(PBM%c): Bus running at %dMHz\n",
705 pbm->parent->index,
706 (pbm == &pbm->parent->pbm_A) ? 'A' : 'B',
707 (all_are_66mhz ? 66 : 33));
708}
709
710void pci_setup_busmastering(struct pci_pbm_info *pbm,
711 struct pci_bus *pbus)
712{
713 struct pci_dev *dev;
714 struct pci_bus *bus;
715 int is_66mhz;
716
717 is_66mhz = pbm->is_66mhz_capable && pbm->all_devs_66mhz;
718
719 list_for_each_entry(dev, &pbus->devices, bus_list)
720 pdev_setup_busmastering(dev, is_66mhz);
721
722 list_for_each_entry(bus, &pbus->children, node)
723 pci_setup_busmastering(pbm, bus);
724}
725
726void pci_register_legacy_regions(struct resource *io_res,
727 struct resource *mem_res)
728{
729 struct resource *p;
730
731 /* VGA Video RAM. */
732 p = kzalloc(sizeof(*p), GFP_KERNEL);
733 if (!p)
734 return;
735 128
736 p->name = "Video RAM area"; 129 if (!saw_io || !saw_mem) {
737 p->start = mem_res->start + 0xa0000UL; 130 prom_printf("%s: Fatal error, missing %s PBM range.\n",
738 p->end = p->start + 0x1ffffUL; 131 pbm->name,
739 p->flags = IORESOURCE_BUSY; 132 (!saw_io ? "IO" : "MEM"));
740 request_resource(mem_res, p); 133 prom_halt();
134 }
741 135
742 p = kzalloc(sizeof(*p), GFP_KERNEL); 136 printk("%s: PCI IO[%lx] MEM[%lx]\n",
743 if (!p) 137 pbm->name,
744 return; 138 pbm->io_space.start,
139 pbm->mem_space.start);
745 140
746 p->name = "System ROM"; 141 pbm->io_space.name = pbm->mem_space.name = pbm->name;
747 p->start = mem_res->start + 0xf0000UL;
748 p->end = p->start + 0xffffUL;
749 p->flags = IORESOURCE_BUSY;
750 request_resource(mem_res, p);
751 142
752 p = kzalloc(sizeof(*p), GFP_KERNEL); 143 request_resource(&ioport_resource, &pbm->io_space);
753 if (!p) 144 request_resource(&iomem_resource, &pbm->mem_space);
754 return;
755 145
756 p->name = "Video ROM"; 146 pci_register_legacy_regions(&pbm->io_space,
757 p->start = mem_res->start + 0xc0000UL; 147 &pbm->mem_space);
758 p->end = p->start + 0x7fffUL; 148 pci_register_iommu_region(pbm);
759 p->flags = IORESOURCE_BUSY;
760 request_resource(mem_res, p);
761} 149}
762 150
763/* Generic helper routines for PCI error reporting. */ 151/* Generic helper routines for PCI error reporting. */
diff --git a/arch/sparc64/kernel/pci_impl.h b/arch/sparc64/kernel/pci_impl.h
index 971e2bea30b4..1208583fcb83 100644
--- a/arch/sparc64/kernel/pci_impl.h
+++ b/arch/sparc64/kernel/pci_impl.h
@@ -1,7 +1,6 @@
1/* $Id: pci_impl.h,v 1.9 2001/06/13 06:34:30 davem Exp $ 1/* pci_impl.h: Helper definitions for PCI controller support.
2 * pci_impl.h: Helper definitions for PCI controller support.
3 * 2 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com) 3 * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
5 */ 4 */
6 5
7#ifndef PCI_IMPL_H 6#ifndef PCI_IMPL_H
@@ -13,26 +12,22 @@
13#include <asm/prom.h> 12#include <asm/prom.h>
14 13
15extern struct pci_controller_info *pci_controller_root; 14extern struct pci_controller_info *pci_controller_root;
15extern unsigned long pci_memspace_mask;
16 16
17extern int pci_num_controllers; 17extern int pci_num_controllers;
18 18
19/* PCI bus scanning and fixup support. */ 19/* PCI bus scanning and fixup support. */
20extern void pci_fixup_host_bridge_self(struct pci_bus *pbus); 20extern struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm);
21extern void pci_fill_in_pbm_cookies(struct pci_bus *pbus, 21extern void pci_determine_mem_io_space(struct pci_pbm_info *pbm);
22 struct pci_pbm_info *pbm, 22
23 struct device_node *prom_node); 23extern int pci_host_bridge_read_pci_cfg(struct pci_bus *bus_dev,
24extern void pci_record_assignments(struct pci_pbm_info *pbm, 24 unsigned int devfn,
25 struct pci_bus *pbus); 25 int where, int size,
26extern void pci_assign_unassigned(struct pci_pbm_info *pbm, 26 u32 *value);
27 struct pci_bus *pbus); 27extern int pci_host_bridge_write_pci_cfg(struct pci_bus *bus_dev,
28extern void pci_fixup_irq(struct pci_pbm_info *pbm, 28 unsigned int devfn,
29 struct pci_bus *pbus); 29 int where, int size,
30extern void pci_determine_66mhz_disposition(struct pci_pbm_info *pbm, 30 u32 value);
31 struct pci_bus *pbus);
32extern void pci_setup_busmastering(struct pci_pbm_info *pbm,
33 struct pci_bus *pbus);
34extern void pci_register_legacy_regions(struct resource *io_res,
35 struct resource *mem_res);
36 31
37/* Error reporting support. */ 32/* Error reporting support. */
38extern void pci_scan_for_target_abort(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *); 33extern void pci_scan_for_target_abort(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *);
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c
index 7aca0f33f885..66712772f494 100644
--- a/arch/sparc64/kernel/pci_iommu.c
+++ b/arch/sparc64/kernel/pci_iommu.c
@@ -1,7 +1,6 @@
1/* $Id: pci_iommu.c,v 1.17 2001/12/17 07:05:09 davem Exp $ 1/* pci_iommu.c: UltraSparc PCI controller IOM/STC support.
2 * pci_iommu.c: UltraSparc PCI controller IOM/STC support.
3 * 2 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com) 3 * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
5 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com) 4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
6 */ 5 */
7 6
@@ -36,7 +35,7 @@
36 "i" (ASI_PHYS_BYPASS_EC_E)) 35 "i" (ASI_PHYS_BYPASS_EC_E))
37 36
38/* Must be invoked under the IOMMU lock. */ 37/* Must be invoked under the IOMMU lock. */
39static void __iommu_flushall(struct pci_iommu *iommu) 38static void __iommu_flushall(struct iommu *iommu)
40{ 39{
41 unsigned long tag; 40 unsigned long tag;
42 int entry; 41 int entry;
@@ -64,7 +63,7 @@ static void __iommu_flushall(struct pci_iommu *iommu)
64#define IOPTE_IS_DUMMY(iommu, iopte) \ 63#define IOPTE_IS_DUMMY(iommu, iopte) \
65 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa) 64 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
66 65
67static inline void iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte) 66static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
68{ 67{
69 unsigned long val = iopte_val(*iopte); 68 unsigned long val = iopte_val(*iopte);
70 69
@@ -75,9 +74,9 @@ static inline void iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte)
75} 74}
76 75
77/* Based largely upon the ppc64 iommu allocator. */ 76/* Based largely upon the ppc64 iommu allocator. */
78static long pci_arena_alloc(struct pci_iommu *iommu, unsigned long npages) 77static long pci_arena_alloc(struct iommu *iommu, unsigned long npages)
79{ 78{
80 struct pci_iommu_arena *arena = &iommu->arena; 79 struct iommu_arena *arena = &iommu->arena;
81 unsigned long n, i, start, end, limit; 80 unsigned long n, i, start, end, limit;
82 int pass; 81 int pass;
83 82
@@ -116,7 +115,7 @@ again:
116 return n; 115 return n;
117} 116}
118 117
119static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages) 118static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
120{ 119{
121 unsigned long i; 120 unsigned long i;
122 121
@@ -124,7 +123,7 @@ static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, un
124 __clear_bit(i, arena->map); 123 __clear_bit(i, arena->map);
125} 124}
126 125
127void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask) 126void pci_iommu_table_init(struct iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask)
128{ 127{
129 unsigned long i, tsbbase, order, sz, num_tsb_entries; 128 unsigned long i, tsbbase, order, sz, num_tsb_entries;
130 129
@@ -170,7 +169,7 @@ void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset,
170 iopte_make_dummy(iommu, &iommu->page_table[i]); 169 iopte_make_dummy(iommu, &iommu->page_table[i]);
171} 170}
172 171
173static inline iopte_t *alloc_npages(struct pci_iommu *iommu, unsigned long npages) 172static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages)
174{ 173{
175 long entry; 174 long entry;
176 175
@@ -181,12 +180,12 @@ static inline iopte_t *alloc_npages(struct pci_iommu *iommu, unsigned long npage
181 return iommu->page_table + entry; 180 return iommu->page_table + entry;
182} 181}
183 182
184static inline void free_npages(struct pci_iommu *iommu, dma_addr_t base, unsigned long npages) 183static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages)
185{ 184{
186 pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages); 185 pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
187} 186}
188 187
189static int iommu_alloc_ctx(struct pci_iommu *iommu) 188static int iommu_alloc_ctx(struct iommu *iommu)
190{ 189{
191 int lowest = iommu->ctx_lowest_free; 190 int lowest = iommu->ctx_lowest_free;
192 int sz = IOMMU_NUM_CTXS - lowest; 191 int sz = IOMMU_NUM_CTXS - lowest;
@@ -205,7 +204,7 @@ static int iommu_alloc_ctx(struct pci_iommu *iommu)
205 return n; 204 return n;
206} 205}
207 206
208static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx) 207static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
209{ 208{
210 if (likely(ctx)) { 209 if (likely(ctx)) {
211 __clear_bit(ctx, iommu->ctx_bitmap); 210 __clear_bit(ctx, iommu->ctx_bitmap);
@@ -220,8 +219,7 @@ static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx)
220 */ 219 */
221static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) 220static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp)
222{ 221{
223 struct pcidev_cookie *pcp; 222 struct iommu *iommu;
224 struct pci_iommu *iommu;
225 iopte_t *iopte; 223 iopte_t *iopte;
226 unsigned long flags, order, first_page; 224 unsigned long flags, order, first_page;
227 void *ret; 225 void *ret;
@@ -237,8 +235,7 @@ static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
237 return NULL; 235 return NULL;
238 memset((char *)first_page, 0, PAGE_SIZE << order); 236 memset((char *)first_page, 0, PAGE_SIZE << order);
239 237
240 pcp = pdev->sysdata; 238 iommu = pdev->dev.archdata.iommu;
241 iommu = pcp->pbm->iommu;
242 239
243 spin_lock_irqsave(&iommu->lock, flags); 240 spin_lock_irqsave(&iommu->lock, flags);
244 iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT); 241 iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
@@ -268,14 +265,12 @@ static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
268/* Free and unmap a consistent DMA translation. */ 265/* Free and unmap a consistent DMA translation. */
269static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) 266static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
270{ 267{
271 struct pcidev_cookie *pcp; 268 struct iommu *iommu;
272 struct pci_iommu *iommu;
273 iopte_t *iopte; 269 iopte_t *iopte;
274 unsigned long flags, order, npages; 270 unsigned long flags, order, npages;
275 271
276 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 272 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
277 pcp = pdev->sysdata; 273 iommu = pdev->dev.archdata.iommu;
278 iommu = pcp->pbm->iommu;
279 iopte = iommu->page_table + 274 iopte = iommu->page_table +
280 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); 275 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
281 276
@@ -295,18 +290,16 @@ static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu,
295 */ 290 */
296static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) 291static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
297{ 292{
298 struct pcidev_cookie *pcp; 293 struct iommu *iommu;
299 struct pci_iommu *iommu; 294 struct strbuf *strbuf;
300 struct pci_strbuf *strbuf;
301 iopte_t *base; 295 iopte_t *base;
302 unsigned long flags, npages, oaddr; 296 unsigned long flags, npages, oaddr;
303 unsigned long i, base_paddr, ctx; 297 unsigned long i, base_paddr, ctx;
304 u32 bus_addr, ret; 298 u32 bus_addr, ret;
305 unsigned long iopte_protection; 299 unsigned long iopte_protection;
306 300
307 pcp = pdev->sysdata; 301 iommu = pdev->dev.archdata.iommu;
308 iommu = pcp->pbm->iommu; 302 strbuf = pdev->dev.archdata.stc;
309 strbuf = &pcp->pbm->stc;
310 303
311 if (unlikely(direction == PCI_DMA_NONE)) 304 if (unlikely(direction == PCI_DMA_NONE))
312 goto bad_no_ctx; 305 goto bad_no_ctx;
@@ -349,7 +342,7 @@ bad_no_ctx:
349 return PCI_DMA_ERROR_CODE; 342 return PCI_DMA_ERROR_CODE;
350} 343}
351 344
352static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction) 345static void pci_strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction)
353{ 346{
354 int limit; 347 int limit;
355 348
@@ -416,9 +409,8 @@ do_flush_sync:
416/* Unmap a single streaming mode DMA translation. */ 409/* Unmap a single streaming mode DMA translation. */
417static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) 410static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
418{ 411{
419 struct pcidev_cookie *pcp; 412 struct iommu *iommu;
420 struct pci_iommu *iommu; 413 struct strbuf *strbuf;
421 struct pci_strbuf *strbuf;
422 iopte_t *base; 414 iopte_t *base;
423 unsigned long flags, npages, ctx, i; 415 unsigned long flags, npages, ctx, i;
424 416
@@ -428,9 +420,8 @@ static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_
428 return; 420 return;
429 } 421 }
430 422
431 pcp = pdev->sysdata; 423 iommu = pdev->dev.archdata.iommu;
432 iommu = pcp->pbm->iommu; 424 strbuf = pdev->dev.archdata.stc;
433 strbuf = &pcp->pbm->stc;
434 425
435 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 426 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
436 npages >>= IO_PAGE_SHIFT; 427 npages >>= IO_PAGE_SHIFT;
@@ -549,9 +540,8 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
549 */ 540 */
550static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) 541static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
551{ 542{
552 struct pcidev_cookie *pcp; 543 struct iommu *iommu;
553 struct pci_iommu *iommu; 544 struct strbuf *strbuf;
554 struct pci_strbuf *strbuf;
555 unsigned long flags, ctx, npages, iopte_protection; 545 unsigned long flags, ctx, npages, iopte_protection;
556 iopte_t *base; 546 iopte_t *base;
557 u32 dma_base; 547 u32 dma_base;
@@ -570,9 +560,8 @@ static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n
570 return 1; 560 return 1;
571 } 561 }
572 562
573 pcp = pdev->sysdata; 563 iommu = pdev->dev.archdata.iommu;
574 iommu = pcp->pbm->iommu; 564 strbuf = pdev->dev.archdata.stc;
575 strbuf = &pcp->pbm->stc;
576 565
577 if (unlikely(direction == PCI_DMA_NONE)) 566 if (unlikely(direction == PCI_DMA_NONE))
578 goto bad_no_ctx; 567 goto bad_no_ctx;
@@ -636,9 +625,8 @@ bad_no_ctx:
636/* Unmap a set of streaming mode DMA translations. */ 625/* Unmap a set of streaming mode DMA translations. */
637static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) 626static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
638{ 627{
639 struct pcidev_cookie *pcp; 628 struct iommu *iommu;
640 struct pci_iommu *iommu; 629 struct strbuf *strbuf;
641 struct pci_strbuf *strbuf;
642 iopte_t *base; 630 iopte_t *base;
643 unsigned long flags, ctx, i, npages; 631 unsigned long flags, ctx, i, npages;
644 u32 bus_addr; 632 u32 bus_addr;
@@ -648,9 +636,8 @@ static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
648 WARN_ON(1); 636 WARN_ON(1);
649 } 637 }
650 638
651 pcp = pdev->sysdata; 639 iommu = pdev->dev.archdata.iommu;
652 iommu = pcp->pbm->iommu; 640 strbuf = pdev->dev.archdata.stc;
653 strbuf = &pcp->pbm->stc;
654 641
655 bus_addr = sglist->dma_address & IO_PAGE_MASK; 642 bus_addr = sglist->dma_address & IO_PAGE_MASK;
656 643
@@ -696,14 +683,12 @@ static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
696 */ 683 */
697static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) 684static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
698{ 685{
699 struct pcidev_cookie *pcp; 686 struct iommu *iommu;
700 struct pci_iommu *iommu; 687 struct strbuf *strbuf;
701 struct pci_strbuf *strbuf;
702 unsigned long flags, ctx, npages; 688 unsigned long flags, ctx, npages;
703 689
704 pcp = pdev->sysdata; 690 iommu = pdev->dev.archdata.iommu;
705 iommu = pcp->pbm->iommu; 691 strbuf = pdev->dev.archdata.stc;
706 strbuf = &pcp->pbm->stc;
707 692
708 if (!strbuf->strbuf_enabled) 693 if (!strbuf->strbuf_enabled)
709 return; 694 return;
@@ -736,15 +721,13 @@ static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_
736 */ 721 */
737static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) 722static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
738{ 723{
739 struct pcidev_cookie *pcp; 724 struct iommu *iommu;
740 struct pci_iommu *iommu; 725 struct strbuf *strbuf;
741 struct pci_strbuf *strbuf;
742 unsigned long flags, ctx, npages, i; 726 unsigned long flags, ctx, npages, i;
743 u32 bus_addr; 727 u32 bus_addr;
744 728
745 pcp = pdev->sysdata; 729 iommu = pdev->dev.archdata.iommu;
746 iommu = pcp->pbm->iommu; 730 strbuf = pdev->dev.archdata.stc;
747 strbuf = &pcp->pbm->stc;
748 731
749 if (!strbuf->strbuf_enabled) 732 if (!strbuf->strbuf_enabled)
750 return; 733 return;
@@ -775,7 +758,7 @@ static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist
775 spin_unlock_irqrestore(&iommu->lock, flags); 758 spin_unlock_irqrestore(&iommu->lock, flags);
776} 759}
777 760
778struct pci_iommu_ops pci_sun4u_iommu_ops = { 761const struct pci_iommu_ops pci_sun4u_iommu_ops = {
779 .alloc_consistent = pci_4u_alloc_consistent, 762 .alloc_consistent = pci_4u_alloc_consistent,
780 .free_consistent = pci_4u_free_consistent, 763 .free_consistent = pci_4u_free_consistent,
781 .map_single = pci_4u_map_single, 764 .map_single = pci_4u_map_single,
@@ -809,13 +792,12 @@ static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
809 792
810int pci_dma_supported(struct pci_dev *pdev, u64 device_mask) 793int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
811{ 794{
812 struct pcidev_cookie *pcp = pdev->sysdata;
813 u64 dma_addr_mask; 795 u64 dma_addr_mask;
814 796
815 if (pdev == NULL) { 797 if (pdev == NULL) {
816 dma_addr_mask = 0xffffffff; 798 dma_addr_mask = 0xffffffff;
817 } else { 799 } else {
818 struct pci_iommu *iommu = pcp->pbm->iommu; 800 struct iommu *iommu = pdev->dev.archdata.iommu;
819 801
820 dma_addr_mask = iommu->dma_addr_mask; 802 dma_addr_mask = iommu->dma_addr_mask;
821 803
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
index fda5db223d96..253d40ec2245 100644
--- a/arch/sparc64/kernel/pci_psycho.c
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -1,7 +1,6 @@
1/* $Id: pci_psycho.c,v 1.33 2002/02/01 00:58:33 davem Exp $ 1/* pci_psycho.c: PSYCHO/U2P specific PCI controller support.
2 * pci_psycho.c: PSYCHO/U2P specific PCI controller support.
3 * 2 *
4 * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu) 3 * Copyright (C) 1997, 1998, 1999, 2007 David S. Miller (davem@davemloft.net)
5 * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) 4 * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com) 5 * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com)
7 */ 6 */
@@ -119,6 +118,10 @@ static int psycho_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
119 u16 tmp16; 118 u16 tmp16;
120 u8 tmp8; 119 u8 tmp8;
121 120
121 if (bus_dev == pbm->pci_bus && devfn == 0x00)
122 return pci_host_bridge_read_pci_cfg(bus_dev, devfn, where,
123 size, value);
124
122 switch (size) { 125 switch (size) {
123 case 1: 126 case 1:
124 *value = 0xff; 127 *value = 0xff;
@@ -172,6 +175,9 @@ static int psycho_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
172 unsigned char bus = bus_dev->number; 175 unsigned char bus = bus_dev->number;
173 u32 *addr; 176 u32 *addr;
174 177
178 if (bus_dev == pbm->pci_bus && devfn == 0x00)
179 return pci_host_bridge_write_pci_cfg(bus_dev, devfn, where,
180 size, value);
175 addr = psycho_pci_config_mkaddr(pbm, bus, devfn, where); 181 addr = psycho_pci_config_mkaddr(pbm, bus, devfn, where);
176 if (!addr) 182 if (!addr)
177 return PCIBIOS_SUCCESSFUL; 183 return PCIBIOS_SUCCESSFUL;
@@ -263,7 +269,7 @@ static void __psycho_check_one_stc(struct pci_controller_info *p,
263 struct pci_pbm_info *pbm, 269 struct pci_pbm_info *pbm,
264 int is_pbm_a) 270 int is_pbm_a)
265{ 271{
266 struct pci_strbuf *strbuf = &pbm->stc; 272 struct strbuf *strbuf = &pbm->stc;
267 unsigned long regbase = p->pbm_A.controller_regs; 273 unsigned long regbase = p->pbm_A.controller_regs;
268 unsigned long err_base, tag_base, line_base; 274 unsigned long err_base, tag_base, line_base;
269 u64 control; 275 u64 control;
@@ -412,7 +418,7 @@ static void psycho_check_iommu_error(struct pci_controller_info *p,
412 unsigned long afar, 418 unsigned long afar,
413 enum psycho_error_type type) 419 enum psycho_error_type type)
414{ 420{
415 struct pci_iommu *iommu = p->pbm_A.iommu; 421 struct iommu *iommu = p->pbm_A.iommu;
416 unsigned long iommu_tag[16]; 422 unsigned long iommu_tag[16];
417 unsigned long iommu_data[16]; 423 unsigned long iommu_data[16];
418 unsigned long flags; 424 unsigned long flags;
@@ -895,59 +901,6 @@ static void psycho_register_error_handlers(struct pci_controller_info *p)
895} 901}
896 902
897/* PSYCHO boot time probing and initialization. */ 903/* PSYCHO boot time probing and initialization. */
898static void psycho_resource_adjust(struct pci_dev *pdev,
899 struct resource *res,
900 struct resource *root)
901{
902 res->start += root->start;
903 res->end += root->start;
904}
905
906static void psycho_base_address_update(struct pci_dev *pdev, int resource)
907{
908 struct pcidev_cookie *pcp = pdev->sysdata;
909 struct pci_pbm_info *pbm = pcp->pbm;
910 struct resource *res, *root;
911 u32 reg;
912 int where, size, is_64bit;
913
914 res = &pdev->resource[resource];
915 if (resource < 6) {
916 where = PCI_BASE_ADDRESS_0 + (resource * 4);
917 } else if (resource == PCI_ROM_RESOURCE) {
918 where = pdev->rom_base_reg;
919 } else {
920 /* Somebody might have asked allocation of a non-standard resource */
921 return;
922 }
923
924 is_64bit = 0;
925 if (res->flags & IORESOURCE_IO)
926 root = &pbm->io_space;
927 else {
928 root = &pbm->mem_space;
929 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
930 == PCI_BASE_ADDRESS_MEM_TYPE_64)
931 is_64bit = 1;
932 }
933
934 size = res->end - res->start;
935 pci_read_config_dword(pdev, where, &reg);
936 reg = ((reg & size) |
937 (((u32)(res->start - root->start)) & ~size));
938 if (resource == PCI_ROM_RESOURCE) {
939 reg |= PCI_ROM_ADDRESS_ENABLE;
940 res->flags |= IORESOURCE_ROM_ENABLE;
941 }
942 pci_write_config_dword(pdev, where, reg);
943
944 /* This knows that the upper 32-bits of the address
945 * must be zero. Our PCI common layer enforces this.
946 */
947 if (is_64bit)
948 pci_write_config_dword(pdev, where + 4, 0);
949}
950
951static void pbm_config_busmastering(struct pci_pbm_info *pbm) 904static void pbm_config_busmastering(struct pci_pbm_info *pbm)
952{ 905{
953 u8 *addr; 906 u8 *addr;
@@ -968,28 +921,7 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm)
968static void pbm_scan_bus(struct pci_controller_info *p, 921static void pbm_scan_bus(struct pci_controller_info *p,
969 struct pci_pbm_info *pbm) 922 struct pci_pbm_info *pbm)
970{ 923{
971 struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); 924 pbm->pci_bus = pci_scan_one_pbm(pbm);
972
973 if (!cookie) {
974 prom_printf("PSYCHO: Critical allocation failure.\n");
975 prom_halt();
976 }
977
978 /* All we care about is the PBM. */
979 cookie->pbm = pbm;
980
981 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno,
982 p->pci_ops,
983 pbm);
984 pci_fixup_host_bridge_self(pbm->pci_bus);
985 pbm->pci_bus->self->sysdata = cookie;
986
987 pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
988 pci_record_assignments(pbm, pbm->pci_bus);
989 pci_assign_unassigned(pbm, pbm->pci_bus);
990 pci_fixup_irq(pbm, pbm->pci_bus);
991 pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
992 pci_setup_busmastering(pbm, pbm->pci_bus);
993} 925}
994 926
995static void psycho_scan_bus(struct pci_controller_info *p) 927static void psycho_scan_bus(struct pci_controller_info *p)
@@ -1009,7 +941,7 @@ static void psycho_scan_bus(struct pci_controller_info *p)
1009 941
1010static void psycho_iommu_init(struct pci_controller_info *p) 942static void psycho_iommu_init(struct pci_controller_info *p)
1011{ 943{
1012 struct pci_iommu *iommu = p->pbm_A.iommu; 944 struct iommu *iommu = p->pbm_A.iommu;
1013 unsigned long i; 945 unsigned long i;
1014 u64 control; 946 u64 control;
1015 947
@@ -1094,19 +1026,6 @@ static void psycho_controller_hwinit(struct pci_controller_info *p)
1094 psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIB_DIAG, tmp); 1026 psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIB_DIAG, tmp);
1095} 1027}
1096 1028
1097static void pbm_register_toplevel_resources(struct pci_controller_info *p,
1098 struct pci_pbm_info *pbm)
1099{
1100 char *name = pbm->name;
1101
1102 pbm->io_space.name = pbm->mem_space.name = name;
1103
1104 request_resource(&ioport_resource, &pbm->io_space);
1105 request_resource(&iomem_resource, &pbm->mem_space);
1106 pci_register_legacy_regions(&pbm->io_space,
1107 &pbm->mem_space);
1108}
1109
1110static void psycho_pbm_strbuf_init(struct pci_controller_info *p, 1029static void psycho_pbm_strbuf_init(struct pci_controller_info *p,
1111 struct pci_pbm_info *pbm, 1030 struct pci_pbm_info *pbm,
1112 int is_pbm_a) 1031 int is_pbm_a)
@@ -1172,19 +1091,11 @@ static void psycho_pbm_init(struct pci_controller_info *p,
1172 unsigned int *busrange; 1091 unsigned int *busrange;
1173 struct property *prop; 1092 struct property *prop;
1174 struct pci_pbm_info *pbm; 1093 struct pci_pbm_info *pbm;
1175 int len;
1176 1094
1177 if (is_pbm_a) { 1095 if (is_pbm_a)
1178 pbm = &p->pbm_A; 1096 pbm = &p->pbm_A;
1179 pbm->pci_first_slot = 1; 1097 else
1180 pbm->io_space.start = pbm->controller_regs + PSYCHO_IOSPACE_A;
1181 pbm->mem_space.start = pbm->controller_regs + PSYCHO_MEMSPACE_A;
1182 } else {
1183 pbm = &p->pbm_B; 1098 pbm = &p->pbm_B;
1184 pbm->pci_first_slot = 2;
1185 pbm->io_space.start = pbm->controller_regs + PSYCHO_IOSPACE_B;
1186 pbm->mem_space.start = pbm->controller_regs + PSYCHO_MEMSPACE_B;
1187 }
1188 1099
1189 pbm->chip_type = PBM_CHIP_TYPE_PSYCHO; 1100 pbm->chip_type = PBM_CHIP_TYPE_PSYCHO;
1190 pbm->chip_version = 0; 1101 pbm->chip_version = 0;
@@ -1196,41 +1107,15 @@ static void psycho_pbm_init(struct pci_controller_info *p,
1196 if (prop) 1107 if (prop)
1197 pbm->chip_revision = *(int *) prop->value; 1108 pbm->chip_revision = *(int *) prop->value;
1198 1109
1199 pbm->io_space.end = pbm->io_space.start + PSYCHO_IOSPACE_SIZE;
1200 pbm->io_space.flags = IORESOURCE_IO;
1201 pbm->mem_space.end = pbm->mem_space.start + PSYCHO_MEMSPACE_SIZE;
1202 pbm->mem_space.flags = IORESOURCE_MEM;
1203
1204 pbm->parent = p; 1110 pbm->parent = p;
1205 pbm->prom_node = dp; 1111 pbm->prom_node = dp;
1206 pbm->name = dp->full_name; 1112 pbm->name = dp->full_name;
1207 1113
1208 pbm_register_toplevel_resources(p, pbm);
1209
1210 printk("%s: PSYCHO PCI Bus Module ver[%x:%x]\n", 1114 printk("%s: PSYCHO PCI Bus Module ver[%x:%x]\n",
1211 pbm->name, 1115 pbm->name,
1212 pbm->chip_version, pbm->chip_revision); 1116 pbm->chip_version, pbm->chip_revision);
1213 1117
1214 prop = of_find_property(dp, "ranges", &len); 1118 pci_determine_mem_io_space(pbm);
1215 if (prop) {
1216 pbm->pbm_ranges = prop->value;
1217 pbm->num_pbm_ranges =
1218 (len / sizeof(struct linux_prom_pci_ranges));
1219 } else {
1220 pbm->num_pbm_ranges = 0;
1221 }
1222
1223 prop = of_find_property(dp, "interrupt-map", &len);
1224 if (prop) {
1225 pbm->pbm_intmap = prop->value;
1226 pbm->num_pbm_intmap =
1227 (len / sizeof(struct linux_prom_pci_intmap));
1228
1229 prop = of_find_property(dp, "interrupt-map-mask", NULL);
1230 pbm->pbm_intmask = prop->value;
1231 } else {
1232 pbm->num_pbm_intmap = 0;
1233 }
1234 1119
1235 prop = of_find_property(dp, "bus-range", NULL); 1120 prop = of_find_property(dp, "bus-range", NULL);
1236 busrange = prop->value; 1121 busrange = prop->value;
@@ -1246,7 +1131,7 @@ void psycho_init(struct device_node *dp, char *model_name)
1246{ 1131{
1247 struct linux_prom64_registers *pr_regs; 1132 struct linux_prom64_registers *pr_regs;
1248 struct pci_controller_info *p; 1133 struct pci_controller_info *p;
1249 struct pci_iommu *iommu; 1134 struct iommu *iommu;
1250 struct property *prop; 1135 struct property *prop;
1251 u32 upa_portid; 1136 u32 upa_portid;
1252 int is_pbm_a; 1137 int is_pbm_a;
@@ -1269,7 +1154,7 @@ void psycho_init(struct device_node *dp, char *model_name)
1269 prom_printf("PSYCHO: Fatal memory allocation error.\n"); 1154 prom_printf("PSYCHO: Fatal memory allocation error.\n");
1270 prom_halt(); 1155 prom_halt();
1271 } 1156 }
1272 iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); 1157 iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
1273 if (!iommu) { 1158 if (!iommu) {
1274 prom_printf("PSYCHO: Fatal memory allocation error.\n"); 1159 prom_printf("PSYCHO: Fatal memory allocation error.\n");
1275 prom_halt(); 1160 prom_halt();
@@ -1282,10 +1167,7 @@ void psycho_init(struct device_node *dp, char *model_name)
1282 p->pbm_A.portid = upa_portid; 1167 p->pbm_A.portid = upa_portid;
1283 p->pbm_B.portid = upa_portid; 1168 p->pbm_B.portid = upa_portid;
1284 p->index = pci_num_controllers++; 1169 p->index = pci_num_controllers++;
1285 p->pbms_same_domain = 0;
1286 p->scan_bus = psycho_scan_bus; 1170 p->scan_bus = psycho_scan_bus;
1287 p->base_address_update = psycho_base_address_update;
1288 p->resource_adjust = psycho_resource_adjust;
1289 p->pci_ops = &psycho_ops; 1171 p->pci_ops = &psycho_ops;
1290 1172
1291 prop = of_find_property(dp, "reg", NULL); 1173 prop = of_find_property(dp, "reg", NULL);
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
index 94bb681f2323..397862fbd9e1 100644
--- a/arch/sparc64/kernel/pci_sabre.c
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -1,7 +1,6 @@
1/* $Id: pci_sabre.c,v 1.42 2002/01/23 11:27:32 davem Exp $ 1/* pci_sabre.c: Sabre specific PCI controller support.
2 * pci_sabre.c: Sabre specific PCI controller support.
3 * 2 *
4 * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu) 3 * Copyright (C) 1997, 1998, 1999, 2007 David S. Miller (davem@davemloft.net)
5 * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) 4 * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com) 5 * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com)
7 */ 6 */
@@ -254,9 +253,6 @@ static int __sabre_out_of_range(struct pci_pbm_info *pbm,
254 return 0; 253 return 0;
255 254
256 return ((pbm->parent == 0) || 255 return ((pbm->parent == 0) ||
257 ((pbm == &pbm->parent->pbm_B) &&
258 (bus == pbm->pci_first_busno) &&
259 PCI_SLOT(devfn) > 8) ||
260 ((pbm == &pbm->parent->pbm_A) && 256 ((pbm == &pbm->parent->pbm_A) &&
261 (bus == pbm->pci_first_busno) && 257 (bus == pbm->pci_first_busno) &&
262 PCI_SLOT(devfn) > 8)); 258 PCI_SLOT(devfn) > 8));
@@ -322,6 +318,12 @@ static int __sabre_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
322static int sabre_read_pci_cfg(struct pci_bus *bus, unsigned int devfn, 318static int sabre_read_pci_cfg(struct pci_bus *bus, unsigned int devfn,
323 int where, int size, u32 *value) 319 int where, int size, u32 *value)
324{ 320{
321 struct pci_pbm_info *pbm = bus->sysdata;
322
323 if (bus == pbm->pci_bus && devfn == 0x00)
324 return pci_host_bridge_read_pci_cfg(bus, devfn, where,
325 size, value);
326
325 if (!bus->number && sabre_out_of_range(devfn)) { 327 if (!bus->number && sabre_out_of_range(devfn)) {
326 switch (size) { 328 switch (size) {
327 case 1: 329 case 1:
@@ -438,6 +440,12 @@ static int __sabre_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
438static int sabre_write_pci_cfg(struct pci_bus *bus, unsigned int devfn, 440static int sabre_write_pci_cfg(struct pci_bus *bus, unsigned int devfn,
439 int where, int size, u32 value) 441 int where, int size, u32 value)
440{ 442{
443 struct pci_pbm_info *pbm = bus->sysdata;
444
445 if (bus == pbm->pci_bus && devfn == 0x00)
446 return pci_host_bridge_write_pci_cfg(bus, devfn, where,
447 size, value);
448
441 if (bus->number) 449 if (bus->number)
442 return __sabre_write_pci_cfg(bus, devfn, where, size, value); 450 return __sabre_write_pci_cfg(bus, devfn, where, size, value);
443 451
@@ -490,7 +498,7 @@ static void sabre_check_iommu_error(struct pci_controller_info *p,
490 unsigned long afsr, 498 unsigned long afsr,
491 unsigned long afar) 499 unsigned long afar)
492{ 500{
493 struct pci_iommu *iommu = p->pbm_A.iommu; 501 struct iommu *iommu = p->pbm_A.iommu;
494 unsigned long iommu_tag[16]; 502 unsigned long iommu_tag[16];
495 unsigned long iommu_data[16]; 503 unsigned long iommu_data[16];
496 unsigned long flags; 504 unsigned long flags;
@@ -710,8 +718,8 @@ static irqreturn_t sabre_pcierr_intr_other(struct pci_controller_info *p)
710 p->index); 718 p->index);
711 ret = IRQ_HANDLED; 719 ret = IRQ_HANDLED;
712 } 720 }
713 pci_read_config_word(sabre_root_bus->self, 721 pci_bus_read_config_word(sabre_root_bus, 0,
714 PCI_STATUS, &stat); 722 PCI_STATUS, &stat);
715 if (stat & (PCI_STATUS_PARITY | 723 if (stat & (PCI_STATUS_PARITY |
716 PCI_STATUS_SIG_TARGET_ABORT | 724 PCI_STATUS_SIG_TARGET_ABORT |
717 PCI_STATUS_REC_TARGET_ABORT | 725 PCI_STATUS_REC_TARGET_ABORT |
@@ -719,8 +727,8 @@ static irqreturn_t sabre_pcierr_intr_other(struct pci_controller_info *p)
719 PCI_STATUS_SIG_SYSTEM_ERROR)) { 727 PCI_STATUS_SIG_SYSTEM_ERROR)) {
720 printk("SABRE%d: PCI bus error, PCI_STATUS[%04x]\n", 728 printk("SABRE%d: PCI bus error, PCI_STATUS[%04x]\n",
721 p->index, stat); 729 p->index, stat);
722 pci_write_config_word(sabre_root_bus->self, 730 pci_bus_write_config_word(sabre_root_bus, 0,
723 PCI_STATUS, 0xffff); 731 PCI_STATUS, 0xffff);
724 ret = IRQ_HANDLED; 732 ret = IRQ_HANDLED;
725 } 733 }
726 return ret; 734 return ret;
@@ -800,12 +808,10 @@ static irqreturn_t sabre_pcierr_intr(int irq, void *dev_id)
800 if (error_bits & (SABRE_PIOAFSR_PTA | SABRE_PIOAFSR_STA)) { 808 if (error_bits & (SABRE_PIOAFSR_PTA | SABRE_PIOAFSR_STA)) {
801 sabre_check_iommu_error(p, afsr, afar); 809 sabre_check_iommu_error(p, afsr, afar);
802 pci_scan_for_target_abort(p, &p->pbm_A, p->pbm_A.pci_bus); 810 pci_scan_for_target_abort(p, &p->pbm_A, p->pbm_A.pci_bus);
803 pci_scan_for_target_abort(p, &p->pbm_B, p->pbm_B.pci_bus);
804 } 811 }
805 if (error_bits & (SABRE_PIOAFSR_PMA | SABRE_PIOAFSR_SMA)) { 812 if (error_bits & (SABRE_PIOAFSR_PMA | SABRE_PIOAFSR_SMA))
806 pci_scan_for_master_abort(p, &p->pbm_A, p->pbm_A.pci_bus); 813 pci_scan_for_master_abort(p, &p->pbm_A, p->pbm_A.pci_bus);
807 pci_scan_for_master_abort(p, &p->pbm_B, p->pbm_B.pci_bus); 814
808 }
809 /* For excessive retries, SABRE/PBM will abort the device 815 /* For excessive retries, SABRE/PBM will abort the device
810 * and there is no way to specifically check for excessive 816 * and there is no way to specifically check for excessive
811 * retries in the config space status registers. So what 817 * retries in the config space status registers. So what
@@ -813,10 +819,8 @@ static irqreturn_t sabre_pcierr_intr(int irq, void *dev_id)
813 * abort events. 819 * abort events.
814 */ 820 */
815 821
816 if (error_bits & (SABRE_PIOAFSR_PPERR | SABRE_PIOAFSR_SPERR)) { 822 if (error_bits & (SABRE_PIOAFSR_PPERR | SABRE_PIOAFSR_SPERR))
817 pci_scan_for_parity_error(p, &p->pbm_A, p->pbm_A.pci_bus); 823 pci_scan_for_parity_error(p, &p->pbm_A, p->pbm_A.pci_bus);
818 pci_scan_for_parity_error(p, &p->pbm_B, p->pbm_B.pci_bus);
819 }
820 824
821 return IRQ_HANDLED; 825 return IRQ_HANDLED;
822} 826}
@@ -869,144 +873,52 @@ static void sabre_register_error_handlers(struct pci_controller_info *p)
869 sabre_write(base + SABRE_PCICTRL, tmp); 873 sabre_write(base + SABRE_PCICTRL, tmp);
870} 874}
871 875
872static void sabre_resource_adjust(struct pci_dev *pdev,
873 struct resource *res,
874 struct resource *root)
875{
876 struct pci_pbm_info *pbm = pdev->bus->sysdata;
877 unsigned long base;
878
879 if (res->flags & IORESOURCE_IO)
880 base = pbm->controller_regs + SABRE_IOSPACE;
881 else
882 base = pbm->controller_regs + SABRE_MEMSPACE;
883
884 res->start += base;
885 res->end += base;
886}
887
888static void sabre_base_address_update(struct pci_dev *pdev, int resource)
889{
890 struct pcidev_cookie *pcp = pdev->sysdata;
891 struct pci_pbm_info *pbm = pcp->pbm;
892 struct resource *res;
893 unsigned long base;
894 u32 reg;
895 int where, size, is_64bit;
896
897 res = &pdev->resource[resource];
898 if (resource < 6) {
899 where = PCI_BASE_ADDRESS_0 + (resource * 4);
900 } else if (resource == PCI_ROM_RESOURCE) {
901 where = pdev->rom_base_reg;
902 } else {
903 /* Somebody might have asked allocation of a non-standard resource */
904 return;
905 }
906
907 is_64bit = 0;
908 if (res->flags & IORESOURCE_IO)
909 base = pbm->controller_regs + SABRE_IOSPACE;
910 else {
911 base = pbm->controller_regs + SABRE_MEMSPACE;
912 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
913 == PCI_BASE_ADDRESS_MEM_TYPE_64)
914 is_64bit = 1;
915 }
916
917 size = res->end - res->start;
918 pci_read_config_dword(pdev, where, &reg);
919 reg = ((reg & size) |
920 (((u32)(res->start - base)) & ~size));
921 if (resource == PCI_ROM_RESOURCE) {
922 reg |= PCI_ROM_ADDRESS_ENABLE;
923 res->flags |= IORESOURCE_ROM_ENABLE;
924 }
925 pci_write_config_dword(pdev, where, reg);
926
927 /* This knows that the upper 32-bits of the address
928 * must be zero. Our PCI common layer enforces this.
929 */
930 if (is_64bit)
931 pci_write_config_dword(pdev, where + 4, 0);
932}
933
934static void apb_init(struct pci_controller_info *p, struct pci_bus *sabre_bus) 876static void apb_init(struct pci_controller_info *p, struct pci_bus *sabre_bus)
935{ 877{
936 struct pci_dev *pdev; 878 struct pci_dev *pdev;
937 879
938 list_for_each_entry(pdev, &sabre_bus->devices, bus_list) { 880 list_for_each_entry(pdev, &sabre_bus->devices, bus_list) {
939
940 if (pdev->vendor == PCI_VENDOR_ID_SUN && 881 if (pdev->vendor == PCI_VENDOR_ID_SUN &&
941 pdev->device == PCI_DEVICE_ID_SUN_SIMBA) { 882 pdev->device == PCI_DEVICE_ID_SUN_SIMBA) {
942 u32 word32;
943 u16 word16; 883 u16 word16;
944 884
945 sabre_read_pci_cfg(pdev->bus, pdev->devfn, 885 pci_read_config_word(pdev, PCI_COMMAND, &word16);
946 PCI_COMMAND, 2, &word32);
947 word16 = (u16) word32;
948 word16 |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY | 886 word16 |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
949 PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | 887 PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY |
950 PCI_COMMAND_IO; 888 PCI_COMMAND_IO;
951 word32 = (u32) word16; 889 pci_write_config_word(pdev, PCI_COMMAND, word16);
952 sabre_write_pci_cfg(pdev->bus, pdev->devfn,
953 PCI_COMMAND, 2, word32);
954 890
955 /* Status register bits are "write 1 to clear". */ 891 /* Status register bits are "write 1 to clear". */
956 sabre_write_pci_cfg(pdev->bus, pdev->devfn, 892 pci_write_config_word(pdev, PCI_STATUS, 0xffff);
957 PCI_STATUS, 2, 0xffff); 893 pci_write_config_word(pdev, PCI_SEC_STATUS, 0xffff);
958 sabre_write_pci_cfg(pdev->bus, pdev->devfn,
959 PCI_SEC_STATUS, 2, 0xffff);
960 894
961 /* Use a primary/seconday latency timer value 895 /* Use a primary/seconday latency timer value
962 * of 64. 896 * of 64.
963 */ 897 */
964 sabre_write_pci_cfg(pdev->bus, pdev->devfn, 898 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
965 PCI_LATENCY_TIMER, 1, 64); 899 pci_write_config_byte(pdev, PCI_SEC_LATENCY_TIMER, 64);
966 sabre_write_pci_cfg(pdev->bus, pdev->devfn,
967 PCI_SEC_LATENCY_TIMER, 1, 64);
968 900
969 /* Enable reporting/forwarding of master aborts, 901 /* Enable reporting/forwarding of master aborts,
970 * parity, and SERR. 902 * parity, and SERR.
971 */ 903 */
972 sabre_write_pci_cfg(pdev->bus, pdev->devfn, 904 pci_write_config_byte(pdev, PCI_BRIDGE_CONTROL,
973 PCI_BRIDGE_CONTROL, 1, 905 (PCI_BRIDGE_CTL_PARITY |
974 (PCI_BRIDGE_CTL_PARITY | 906 PCI_BRIDGE_CTL_SERR |
975 PCI_BRIDGE_CTL_SERR | 907 PCI_BRIDGE_CTL_MASTER_ABORT));
976 PCI_BRIDGE_CTL_MASTER_ABORT));
977 } 908 }
978 } 909 }
979} 910}
980 911
981static struct pcidev_cookie *alloc_bridge_cookie(struct pci_pbm_info *pbm)
982{
983 struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
984
985 if (!cookie) {
986 prom_printf("SABRE: Critical allocation failure.\n");
987 prom_halt();
988 }
989
990 /* All we care about is the PBM. */
991 cookie->pbm = pbm;
992
993 return cookie;
994}
995
996static void sabre_scan_bus(struct pci_controller_info *p) 912static void sabre_scan_bus(struct pci_controller_info *p)
997{ 913{
998 static int once; 914 static int once;
999 struct pci_bus *sabre_bus, *pbus; 915 struct pci_bus *pbus;
1000 struct pci_pbm_info *pbm;
1001 struct pcidev_cookie *cookie;
1002 int sabres_scanned;
1003 916
1004 /* The APB bridge speaks to the Sabre host PCI bridge 917 /* The APB bridge speaks to the Sabre host PCI bridge
1005 * at 66Mhz, but the front side of APB runs at 33Mhz 918 * at 66Mhz, but the front side of APB runs at 33Mhz
1006 * for both segments. 919 * for both segments.
1007 */ 920 */
1008 p->pbm_A.is_66mhz_capable = 0; 921 p->pbm_A.is_66mhz_capable = 0;
1009 p->pbm_B.is_66mhz_capable = 0;
1010 922
1011 /* This driver has not been verified to handle 923 /* This driver has not been verified to handle
1012 * multiple SABREs yet, so trap this. 924 * multiple SABREs yet, so trap this.
@@ -1020,56 +932,13 @@ static void sabre_scan_bus(struct pci_controller_info *p)
1020 } 932 }
1021 once++; 933 once++;
1022 934
1023 cookie = alloc_bridge_cookie(&p->pbm_A); 935 pbus = pci_scan_one_pbm(&p->pbm_A);
1024 936 if (!pbus)
1025 sabre_bus = pci_scan_bus(p->pci_first_busno, 937 return;
1026 p->pci_ops,
1027 &p->pbm_A);
1028 pci_fixup_host_bridge_self(sabre_bus);
1029 sabre_bus->self->sysdata = cookie;
1030
1031 sabre_root_bus = sabre_bus;
1032
1033 apb_init(p, sabre_bus);
1034
1035 sabres_scanned = 0;
1036
1037 list_for_each_entry(pbus, &sabre_bus->children, node) {
1038
1039 if (pbus->number == p->pbm_A.pci_first_busno) {
1040 pbm = &p->pbm_A;
1041 } else if (pbus->number == p->pbm_B.pci_first_busno) {
1042 pbm = &p->pbm_B;
1043 } else
1044 continue;
1045
1046 cookie = alloc_bridge_cookie(pbm);
1047 pbus->self->sysdata = cookie;
1048
1049 sabres_scanned++;
1050 938
1051 pbus->sysdata = pbm; 939 sabre_root_bus = pbus;
1052 pbm->pci_bus = pbus;
1053 pci_fill_in_pbm_cookies(pbus, pbm, pbm->prom_node);
1054 pci_record_assignments(pbm, pbus);
1055 pci_assign_unassigned(pbm, pbus);
1056 pci_fixup_irq(pbm, pbus);
1057 pci_determine_66mhz_disposition(pbm, pbus);
1058 pci_setup_busmastering(pbm, pbus);
1059 }
1060 940
1061 if (!sabres_scanned) { 941 apb_init(p, pbus);
1062 /* Hummingbird, no APBs. */
1063 pbm = &p->pbm_A;
1064 sabre_bus->sysdata = pbm;
1065 pbm->pci_bus = sabre_bus;
1066 pci_fill_in_pbm_cookies(sabre_bus, pbm, pbm->prom_node);
1067 pci_record_assignments(pbm, sabre_bus);
1068 pci_assign_unassigned(pbm, sabre_bus);
1069 pci_fixup_irq(pbm, sabre_bus);
1070 pci_determine_66mhz_disposition(pbm, sabre_bus);
1071 pci_setup_busmastering(pbm, sabre_bus);
1072 }
1073 942
1074 sabre_register_error_handlers(p); 943 sabre_register_error_handlers(p);
1075} 944}
@@ -1078,7 +947,7 @@ static void sabre_iommu_init(struct pci_controller_info *p,
1078 int tsbsize, unsigned long dvma_offset, 947 int tsbsize, unsigned long dvma_offset,
1079 u32 dma_mask) 948 u32 dma_mask)
1080{ 949{
1081 struct pci_iommu *iommu = p->pbm_A.iommu; 950 struct iommu *iommu = p->pbm_A.iommu;
1082 unsigned long i; 951 unsigned long i;
1083 u64 control; 952 u64 control;
1084 953
@@ -1126,224 +995,31 @@ static void sabre_iommu_init(struct pci_controller_info *p,
1126 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control); 995 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control);
1127} 996}
1128 997
1129static void pbm_register_toplevel_resources(struct pci_controller_info *p, 998static void sabre_pbm_init(struct pci_controller_info *p, struct device_node *dp)
1130 struct pci_pbm_info *pbm)
1131{
1132 char *name = pbm->name;
1133 unsigned long ibase = p->pbm_A.controller_regs + SABRE_IOSPACE;
1134 unsigned long mbase = p->pbm_A.controller_regs + SABRE_MEMSPACE;
1135 unsigned int devfn;
1136 unsigned long first, last, i;
1137 u8 *addr, map;
1138
1139 sprintf(name, "SABRE%d PBM%c",
1140 p->index,
1141 (pbm == &p->pbm_A ? 'A' : 'B'));
1142 pbm->io_space.name = pbm->mem_space.name = name;
1143
1144 devfn = PCI_DEVFN(1, (pbm == &p->pbm_A) ? 0 : 1);
1145 addr = sabre_pci_config_mkaddr(pbm, 0, devfn, APB_IO_ADDRESS_MAP);
1146 map = 0;
1147 pci_config_read8(addr, &map);
1148
1149 first = 8;
1150 last = 0;
1151 for (i = 0; i < 8; i++) {
1152 if ((map & (1 << i)) != 0) {
1153 if (first > i)
1154 first = i;
1155 if (last < i)
1156 last = i;
1157 }
1158 }
1159 pbm->io_space.start = ibase + (first << 21UL);
1160 pbm->io_space.end = ibase + (last << 21UL) + ((1 << 21UL) - 1);
1161 pbm->io_space.flags = IORESOURCE_IO;
1162
1163 addr = sabre_pci_config_mkaddr(pbm, 0, devfn, APB_MEM_ADDRESS_MAP);
1164 map = 0;
1165 pci_config_read8(addr, &map);
1166
1167 first = 8;
1168 last = 0;
1169 for (i = 0; i < 8; i++) {
1170 if ((map & (1 << i)) != 0) {
1171 if (first > i)
1172 first = i;
1173 if (last < i)
1174 last = i;
1175 }
1176 }
1177 pbm->mem_space.start = mbase + (first << 29UL);
1178 pbm->mem_space.end = mbase + (last << 29UL) + ((1 << 29UL) - 1);
1179 pbm->mem_space.flags = IORESOURCE_MEM;
1180
1181 if (request_resource(&ioport_resource, &pbm->io_space) < 0) {
1182 prom_printf("Cannot register PBM-%c's IO space.\n",
1183 (pbm == &p->pbm_A ? 'A' : 'B'));
1184 prom_halt();
1185 }
1186 if (request_resource(&iomem_resource, &pbm->mem_space) < 0) {
1187 prom_printf("Cannot register PBM-%c's MEM space.\n",
1188 (pbm == &p->pbm_A ? 'A' : 'B'));
1189 prom_halt();
1190 }
1191
1192 /* Register legacy regions if this PBM covers that area. */
1193 if (pbm->io_space.start == ibase &&
1194 pbm->mem_space.start == mbase)
1195 pci_register_legacy_regions(&pbm->io_space,
1196 &pbm->mem_space);
1197}
1198
1199static void sabre_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 dma_start, u32 dma_end)
1200{ 999{
1201 struct pci_pbm_info *pbm; 1000 struct pci_pbm_info *pbm;
1202 struct device_node *node;
1203 struct property *prop;
1204 u32 *busrange;
1205 int len, simbas_found;
1206
1207 simbas_found = 0;
1208 node = dp->child;
1209 while (node != NULL) {
1210 if (strcmp(node->name, "pci"))
1211 goto next_pci;
1212
1213 prop = of_find_property(node, "model", NULL);
1214 if (!prop || strncmp(prop->value, "SUNW,simba", prop->length))
1215 goto next_pci;
1216
1217 simbas_found++;
1218
1219 prop = of_find_property(node, "bus-range", NULL);
1220 busrange = prop->value;
1221 if (busrange[0] == 1)
1222 pbm = &p->pbm_B;
1223 else
1224 pbm = &p->pbm_A;
1225
1226 pbm->name = node->full_name;
1227 printk("%s: SABRE PCI Bus Module\n", pbm->name);
1228
1229 pbm->chip_type = PBM_CHIP_TYPE_SABRE;
1230 pbm->parent = p;
1231 pbm->prom_node = node;
1232 pbm->pci_first_slot = 1;
1233 pbm->pci_first_busno = busrange[0];
1234 pbm->pci_last_busno = busrange[1];
1235
1236 prop = of_find_property(node, "ranges", &len);
1237 if (prop) {
1238 pbm->pbm_ranges = prop->value;
1239 pbm->num_pbm_ranges =
1240 (len / sizeof(struct linux_prom_pci_ranges));
1241 } else {
1242 pbm->num_pbm_ranges = 0;
1243 }
1244 1001
1245 prop = of_find_property(node, "interrupt-map", &len); 1002 pbm = &p->pbm_A;
1246 if (prop) { 1003 pbm->name = dp->full_name;
1247 pbm->pbm_intmap = prop->value; 1004 printk("%s: SABRE PCI Bus Module\n", pbm->name);
1248 pbm->num_pbm_intmap =
1249 (len / sizeof(struct linux_prom_pci_intmap));
1250
1251 prop = of_find_property(node, "interrupt-map-mask",
1252 NULL);
1253 pbm->pbm_intmask = prop->value;
1254 } else {
1255 pbm->num_pbm_intmap = 0;
1256 }
1257 1005
1258 pbm_register_toplevel_resources(p, pbm); 1006 pbm->chip_type = PBM_CHIP_TYPE_SABRE;
1259 1007 pbm->parent = p;
1260 next_pci: 1008 pbm->prom_node = dp;
1261 node = node->sibling; 1009 pbm->pci_first_busno = p->pci_first_busno;
1262 } 1010 pbm->pci_last_busno = p->pci_last_busno;
1263 if (simbas_found == 0) {
1264 struct resource *rp;
1265 1011
1266 /* No APBs underneath, probably this is a hummingbird 1012 pci_determine_mem_io_space(pbm);
1267 * system.
1268 */
1269 pbm = &p->pbm_A;
1270 pbm->parent = p;
1271 pbm->prom_node = dp;
1272 pbm->pci_first_busno = p->pci_first_busno;
1273 pbm->pci_last_busno = p->pci_last_busno;
1274
1275 prop = of_find_property(dp, "ranges", &len);
1276 if (prop) {
1277 pbm->pbm_ranges = prop->value;
1278 pbm->num_pbm_ranges =
1279 (len / sizeof(struct linux_prom_pci_ranges));
1280 } else {
1281 pbm->num_pbm_ranges = 0;
1282 }
1283
1284 prop = of_find_property(dp, "interrupt-map", &len);
1285 if (prop) {
1286 pbm->pbm_intmap = prop->value;
1287 pbm->num_pbm_intmap =
1288 (len / sizeof(struct linux_prom_pci_intmap));
1289
1290 prop = of_find_property(dp, "interrupt-map-mask",
1291 NULL);
1292 pbm->pbm_intmask = prop->value;
1293 } else {
1294 pbm->num_pbm_intmap = 0;
1295 }
1296
1297 pbm->name = dp->full_name;
1298 printk("%s: SABRE PCI Bus Module\n", pbm->name);
1299
1300 pbm->io_space.name = pbm->mem_space.name = pbm->name;
1301
1302 /* Hack up top-level resources. */
1303 pbm->io_space.start = p->pbm_A.controller_regs + SABRE_IOSPACE;
1304 pbm->io_space.end = pbm->io_space.start + (1UL << 24) - 1UL;
1305 pbm->io_space.flags = IORESOURCE_IO;
1306
1307 pbm->mem_space.start =
1308 (p->pbm_A.controller_regs + SABRE_MEMSPACE);
1309 pbm->mem_space.end =
1310 (pbm->mem_space.start + ((1UL << 32UL) - 1UL));
1311 pbm->mem_space.flags = IORESOURCE_MEM;
1312
1313 if (request_resource(&ioport_resource, &pbm->io_space) < 0) {
1314 prom_printf("Cannot register Hummingbird's IO space.\n");
1315 prom_halt();
1316 }
1317 if (request_resource(&iomem_resource, &pbm->mem_space) < 0) {
1318 prom_printf("Cannot register Hummingbird's MEM space.\n");
1319 prom_halt();
1320 }
1321
1322 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
1323 if (!rp) {
1324 prom_printf("Cannot allocate IOMMU resource.\n");
1325 prom_halt();
1326 }
1327 rp->name = "IOMMU";
1328 rp->start = pbm->mem_space.start + (unsigned long) dma_start;
1329 rp->end = pbm->mem_space.start + (unsigned long) dma_end - 1UL;
1330 rp->flags = IORESOURCE_BUSY;
1331 request_resource(&pbm->mem_space, rp);
1332
1333 pci_register_legacy_regions(&pbm->io_space,
1334 &pbm->mem_space);
1335 }
1336} 1013}
1337 1014
1338void sabre_init(struct device_node *dp, char *model_name) 1015void sabre_init(struct device_node *dp, char *model_name)
1339{ 1016{
1340 struct linux_prom64_registers *pr_regs; 1017 const struct linux_prom64_registers *pr_regs;
1341 struct pci_controller_info *p; 1018 struct pci_controller_info *p;
1342 struct pci_iommu *iommu; 1019 struct iommu *iommu;
1343 struct property *prop;
1344 int tsbsize; 1020 int tsbsize;
1345 u32 *busrange; 1021 const u32 *busrange;
1346 u32 *vdma; 1022 const u32 *vdma;
1347 u32 upa_portid, dma_mask; 1023 u32 upa_portid, dma_mask;
1348 u64 clear_irq; 1024 u64 clear_irq;
1349 1025
@@ -1351,13 +1027,9 @@ void sabre_init(struct device_node *dp, char *model_name)
1351 if (!strcmp(model_name, "pci108e,a001")) 1027 if (!strcmp(model_name, "pci108e,a001"))
1352 hummingbird_p = 1; 1028 hummingbird_p = 1;
1353 else if (!strcmp(model_name, "SUNW,sabre")) { 1029 else if (!strcmp(model_name, "SUNW,sabre")) {
1354 prop = of_find_property(dp, "compatible", NULL); 1030 const char *compat = of_get_property(dp, "compatible", NULL);
1355 if (prop) { 1031 if (compat && !strcmp(compat, "pci108e,a001"))
1356 const char *compat = prop->value; 1032 hummingbird_p = 1;
1357
1358 if (!strcmp(compat, "pci108e,a001"))
1359 hummingbird_p = 1;
1360 }
1361 if (!hummingbird_p) { 1033 if (!hummingbird_p) {
1362 struct device_node *dp; 1034 struct device_node *dp;
1363 1035
@@ -1381,37 +1053,28 @@ void sabre_init(struct device_node *dp, char *model_name)
1381 prom_printf("SABRE: Error, kmalloc(pci_iommu) failed.\n"); 1053 prom_printf("SABRE: Error, kmalloc(pci_iommu) failed.\n");
1382 prom_halt(); 1054 prom_halt();
1383 } 1055 }
1384 p->pbm_A.iommu = p->pbm_B.iommu = iommu; 1056 p->pbm_A.iommu = iommu;
1385 1057
1386 upa_portid = 0xff; 1058 upa_portid = of_getintprop_default(dp, "upa-portid", 0xff);
1387 prop = of_find_property(dp, "upa-portid", NULL);
1388 if (prop)
1389 upa_portid = *(u32 *) prop->value;
1390 1059
1391 p->next = pci_controller_root; 1060 p->next = pci_controller_root;
1392 pci_controller_root = p; 1061 pci_controller_root = p;
1393 1062
1394 p->pbm_A.portid = upa_portid; 1063 p->pbm_A.portid = upa_portid;
1395 p->pbm_B.portid = upa_portid;
1396 p->index = pci_num_controllers++; 1064 p->index = pci_num_controllers++;
1397 p->pbms_same_domain = 1;
1398 p->scan_bus = sabre_scan_bus; 1065 p->scan_bus = sabre_scan_bus;
1399 p->base_address_update = sabre_base_address_update;
1400 p->resource_adjust = sabre_resource_adjust;
1401 p->pci_ops = &sabre_ops; 1066 p->pci_ops = &sabre_ops;
1402 1067
1403 /* 1068 /*
1404 * Map in SABRE register set and report the presence of this SABRE. 1069 * Map in SABRE register set and report the presence of this SABRE.
1405 */ 1070 */
1406 1071
1407 prop = of_find_property(dp, "reg", NULL); 1072 pr_regs = of_get_property(dp, "reg", NULL);
1408 pr_regs = prop->value;
1409 1073
1410 /* 1074 /*
1411 * First REG in property is base of entire SABRE register space. 1075 * First REG in property is base of entire SABRE register space.
1412 */ 1076 */
1413 p->pbm_A.controller_regs = pr_regs[0].phys_addr; 1077 p->pbm_A.controller_regs = pr_regs[0].phys_addr;
1414 p->pbm_B.controller_regs = pr_regs[0].phys_addr;
1415 1078
1416 /* Clear interrupts */ 1079 /* Clear interrupts */
1417 1080
@@ -1429,11 +1092,10 @@ void sabre_init(struct device_node *dp, char *model_name)
1429 SABRE_PCICTRL_ARBPARK | SABRE_PCICTRL_AEN)); 1092 SABRE_PCICTRL_ARBPARK | SABRE_PCICTRL_AEN));
1430 1093
1431 /* Now map in PCI config space for entire SABRE. */ 1094 /* Now map in PCI config space for entire SABRE. */
1432 p->pbm_A.config_space = p->pbm_B.config_space = 1095 p->pbm_A.config_space =
1433 (p->pbm_A.controller_regs + SABRE_CONFIGSPACE); 1096 (p->pbm_A.controller_regs + SABRE_CONFIGSPACE);
1434 1097
1435 prop = of_find_property(dp, "virtual-dma", NULL); 1098 vdma = of_get_property(dp, "virtual-dma", NULL);
1436 vdma = prop->value;
1437 1099
1438 dma_mask = vdma[0]; 1100 dma_mask = vdma[0];
1439 switch(vdma[1]) { 1101 switch(vdma[1]) {
@@ -1457,13 +1119,12 @@ void sabre_init(struct device_node *dp, char *model_name)
1457 1119
1458 sabre_iommu_init(p, tsbsize, vdma[0], dma_mask); 1120 sabre_iommu_init(p, tsbsize, vdma[0], dma_mask);
1459 1121
1460 prop = of_find_property(dp, "bus-range", NULL); 1122 busrange = of_get_property(dp, "bus-range", NULL);
1461 busrange = prop->value;
1462 p->pci_first_busno = busrange[0]; 1123 p->pci_first_busno = busrange[0];
1463 p->pci_last_busno = busrange[1]; 1124 p->pci_last_busno = busrange[1];
1464 1125
1465 /* 1126 /*
1466 * Look for APB underneath. 1127 * Look for APB underneath.
1467 */ 1128 */
1468 sabre_pbm_init(p, dp, vdma[0], vdma[0] + vdma[1]); 1129 sabre_pbm_init(p, dp);
1469} 1130}
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
index 66911b126aed..91a7385e5d32 100644
--- a/arch/sparc64/kernel/pci_schizo.c
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -1,7 +1,6 @@
1/* $Id: pci_schizo.c,v 1.24 2002/01/23 11:27:32 davem Exp $ 1/* pci_schizo.c: SCHIZO/TOMATILLO specific PCI controller support.
2 * pci_schizo.c: SCHIZO/TOMATILLO specific PCI controller support.
3 * 2 *
4 * Copyright (C) 2001, 2002, 2003 David S. Miller (davem@redhat.com) 3 * Copyright (C) 2001, 2002, 2003, 2007 David S. Miller (davem@davemloft.net)
5 */ 4 */
6 5
7#include <linux/kernel.h> 6#include <linux/kernel.h>
@@ -126,6 +125,9 @@ static int schizo_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
126 u16 tmp16; 125 u16 tmp16;
127 u8 tmp8; 126 u8 tmp8;
128 127
128 if (bus_dev == pbm->pci_bus && devfn == 0x00)
129 return pci_host_bridge_read_pci_cfg(bus_dev, devfn, where,
130 size, value);
129 switch (size) { 131 switch (size) {
130 case 1: 132 case 1:
131 *value = 0xff; 133 *value = 0xff;
@@ -179,6 +181,9 @@ static int schizo_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
179 unsigned char bus = bus_dev->number; 181 unsigned char bus = bus_dev->number;
180 u32 *addr; 182 u32 *addr;
181 183
184 if (bus_dev == pbm->pci_bus && devfn == 0x00)
185 return pci_host_bridge_write_pci_cfg(bus_dev, devfn, where,
186 size, value);
182 addr = schizo_pci_config_mkaddr(pbm, bus, devfn, where); 187 addr = schizo_pci_config_mkaddr(pbm, bus, devfn, where);
183 if (!addr) 188 if (!addr)
184 return PCIBIOS_SUCCESSFUL; 189 return PCIBIOS_SUCCESSFUL;
@@ -274,7 +279,7 @@ struct pci_pbm_info *pbm_for_ino(struct pci_controller_info *p, u32 ino)
274static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm, 279static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm,
275 enum schizo_error_type type) 280 enum schizo_error_type type)
276{ 281{
277 struct pci_strbuf *strbuf = &pbm->stc; 282 struct strbuf *strbuf = &pbm->stc;
278 unsigned long regbase = pbm->pbm_regs; 283 unsigned long regbase = pbm->pbm_regs;
279 unsigned long err_base, tag_base, line_base; 284 unsigned long err_base, tag_base, line_base;
280 u64 control; 285 u64 control;
@@ -382,7 +387,7 @@ static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm,
382static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm, 387static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm,
383 enum schizo_error_type type) 388 enum schizo_error_type type)
384{ 389{
385 struct pci_iommu *iommu = pbm->iommu; 390 struct iommu *iommu = pbm->iommu;
386 unsigned long iommu_tag[16]; 391 unsigned long iommu_tag[16];
387 unsigned long iommu_data[16]; 392 unsigned long iommu_data[16];
388 unsigned long flags; 393 unsigned long flags;
@@ -1229,42 +1234,8 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm)
1229 pci_config_write8(addr, 64); 1234 pci_config_write8(addr, 64);
1230} 1235}
1231 1236
1232static void pbm_scan_bus(struct pci_controller_info *p, 1237static void schizo_scan_bus(struct pci_controller_info *p)
1233 struct pci_pbm_info *pbm)
1234{
1235 struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
1236
1237 if (!cookie) {
1238 prom_printf("%s: Critical allocation failure.\n", pbm->name);
1239 prom_halt();
1240 }
1241
1242 /* All we care about is the PBM. */
1243 cookie->pbm = pbm;
1244
1245 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno,
1246 p->pci_ops,
1247 pbm);
1248 pci_fixup_host_bridge_self(pbm->pci_bus);
1249 pbm->pci_bus->self->sysdata = cookie;
1250
1251 pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
1252 pci_record_assignments(pbm, pbm->pci_bus);
1253 pci_assign_unassigned(pbm, pbm->pci_bus);
1254 pci_fixup_irq(pbm, pbm->pci_bus);
1255 pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
1256 pci_setup_busmastering(pbm, pbm->pci_bus);
1257}
1258
1259static void __schizo_scan_bus(struct pci_controller_info *p,
1260 int chip_type)
1261{ 1238{
1262 if (!p->pbm_B.prom_node || !p->pbm_A.prom_node) {
1263 printk("PCI: Only one PCI bus module of controller found.\n");
1264 printk("PCI: Ignoring entire controller.\n");
1265 return;
1266 }
1267
1268 pbm_config_busmastering(&p->pbm_B); 1239 pbm_config_busmastering(&p->pbm_B);
1269 p->pbm_B.is_66mhz_capable = 1240 p->pbm_B.is_66mhz_capable =
1270 (of_find_property(p->pbm_B.prom_node, "66mhz-capable", NULL) 1241 (of_find_property(p->pbm_B.prom_node, "66mhz-capable", NULL)
@@ -1273,154 +1244,19 @@ static void __schizo_scan_bus(struct pci_controller_info *p,
1273 p->pbm_A.is_66mhz_capable = 1244 p->pbm_A.is_66mhz_capable =
1274 (of_find_property(p->pbm_A.prom_node, "66mhz-capable", NULL) 1245 (of_find_property(p->pbm_A.prom_node, "66mhz-capable", NULL)
1275 != NULL); 1246 != NULL);
1276 pbm_scan_bus(p, &p->pbm_B); 1247
1277 pbm_scan_bus(p, &p->pbm_A); 1248 p->pbm_B.pci_bus = pci_scan_one_pbm(&p->pbm_B);
1249 p->pbm_A.pci_bus = pci_scan_one_pbm(&p->pbm_A);
1278 1250
1279 /* After the PCI bus scan is complete, we can register 1251 /* After the PCI bus scan is complete, we can register
1280 * the error interrupt handlers. 1252 * the error interrupt handlers.
1281 */ 1253 */
1282 if (chip_type == PBM_CHIP_TYPE_TOMATILLO) 1254 if (p->pbm_B.chip_type == PBM_CHIP_TYPE_TOMATILLO)
1283 tomatillo_register_error_handlers(p); 1255 tomatillo_register_error_handlers(p);
1284 else 1256 else
1285 schizo_register_error_handlers(p); 1257 schizo_register_error_handlers(p);
1286} 1258}
1287 1259
1288static void schizo_scan_bus(struct pci_controller_info *p)
1289{
1290 __schizo_scan_bus(p, PBM_CHIP_TYPE_SCHIZO);
1291}
1292
1293static void tomatillo_scan_bus(struct pci_controller_info *p)
1294{
1295 __schizo_scan_bus(p, PBM_CHIP_TYPE_TOMATILLO);
1296}
1297
1298static void schizo_base_address_update(struct pci_dev *pdev, int resource)
1299{
1300 struct pcidev_cookie *pcp = pdev->sysdata;
1301 struct pci_pbm_info *pbm = pcp->pbm;
1302 struct resource *res, *root;
1303 u32 reg;
1304 int where, size, is_64bit;
1305
1306 res = &pdev->resource[resource];
1307 if (resource < 6) {
1308 where = PCI_BASE_ADDRESS_0 + (resource * 4);
1309 } else if (resource == PCI_ROM_RESOURCE) {
1310 where = pdev->rom_base_reg;
1311 } else {
1312 /* Somebody might have asked allocation of a non-standard resource */
1313 return;
1314 }
1315
1316 is_64bit = 0;
1317 if (res->flags & IORESOURCE_IO)
1318 root = &pbm->io_space;
1319 else {
1320 root = &pbm->mem_space;
1321 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
1322 == PCI_BASE_ADDRESS_MEM_TYPE_64)
1323 is_64bit = 1;
1324 }
1325
1326 size = res->end - res->start;
1327 pci_read_config_dword(pdev, where, &reg);
1328 reg = ((reg & size) |
1329 (((u32)(res->start - root->start)) & ~size));
1330 if (resource == PCI_ROM_RESOURCE) {
1331 reg |= PCI_ROM_ADDRESS_ENABLE;
1332 res->flags |= IORESOURCE_ROM_ENABLE;
1333 }
1334 pci_write_config_dword(pdev, where, reg);
1335
1336 /* This knows that the upper 32-bits of the address
1337 * must be zero. Our PCI common layer enforces this.
1338 */
1339 if (is_64bit)
1340 pci_write_config_dword(pdev, where + 4, 0);
1341}
1342
1343static void schizo_resource_adjust(struct pci_dev *pdev,
1344 struct resource *res,
1345 struct resource *root)
1346{
1347 res->start += root->start;
1348 res->end += root->start;
1349}
1350
1351/* Use ranges property to determine where PCI MEM, I/O, and Config
1352 * space are for this PCI bus module.
1353 */
1354static void schizo_determine_mem_io_space(struct pci_pbm_info *pbm)
1355{
1356 int i, saw_cfg, saw_mem, saw_io;
1357
1358 saw_cfg = saw_mem = saw_io = 0;
1359 for (i = 0; i < pbm->num_pbm_ranges; i++) {
1360 struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
1361 unsigned long a;
1362 int type;
1363
1364 type = (pr->child_phys_hi >> 24) & 0x3;
1365 a = (((unsigned long)pr->parent_phys_hi << 32UL) |
1366 ((unsigned long)pr->parent_phys_lo << 0UL));
1367
1368 switch (type) {
1369 case 0:
1370 /* PCI config space, 16MB */
1371 pbm->config_space = a;
1372 saw_cfg = 1;
1373 break;
1374
1375 case 1:
1376 /* 16-bit IO space, 16MB */
1377 pbm->io_space.start = a;
1378 pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
1379 pbm->io_space.flags = IORESOURCE_IO;
1380 saw_io = 1;
1381 break;
1382
1383 case 2:
1384 /* 32-bit MEM space, 2GB */
1385 pbm->mem_space.start = a;
1386 pbm->mem_space.end = a + (0x80000000UL - 1UL);
1387 pbm->mem_space.flags = IORESOURCE_MEM;
1388 saw_mem = 1;
1389 break;
1390
1391 default:
1392 break;
1393 };
1394 }
1395
1396 if (!saw_cfg || !saw_io || !saw_mem) {
1397 prom_printf("%s: Fatal error, missing %s PBM range.\n",
1398 pbm->name,
1399 ((!saw_cfg ?
1400 "CFG" :
1401 (!saw_io ?
1402 "IO" : "MEM"))));
1403 prom_halt();
1404 }
1405
1406 printk("%s: PCI CFG[%lx] IO[%lx] MEM[%lx]\n",
1407 pbm->name,
1408 pbm->config_space,
1409 pbm->io_space.start,
1410 pbm->mem_space.start);
1411}
1412
1413static void pbm_register_toplevel_resources(struct pci_controller_info *p,
1414 struct pci_pbm_info *pbm)
1415{
1416 pbm->io_space.name = pbm->mem_space.name = pbm->name;
1417
1418 request_resource(&ioport_resource, &pbm->io_space);
1419 request_resource(&iomem_resource, &pbm->mem_space);
1420 pci_register_legacy_regions(&pbm->io_space,
1421 &pbm->mem_space);
1422}
1423
1424#define SCHIZO_STRBUF_CONTROL (0x02800UL) 1260#define SCHIZO_STRBUF_CONTROL (0x02800UL)
1425#define SCHIZO_STRBUF_FLUSH (0x02808UL) 1261#define SCHIZO_STRBUF_FLUSH (0x02808UL)
1426#define SCHIZO_STRBUF_FSYNC (0x02810UL) 1262#define SCHIZO_STRBUF_FSYNC (0x02810UL)
@@ -1472,7 +1308,7 @@ static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm)
1472 1308
1473static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm) 1309static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
1474{ 1310{
1475 struct pci_iommu *iommu = pbm->iommu; 1311 struct iommu *iommu = pbm->iommu;
1476 unsigned long i, tagbase, database; 1312 unsigned long i, tagbase, database;
1477 struct property *prop; 1313 struct property *prop;
1478 u32 vdma[2], dma_mask; 1314 u32 vdma[2], dma_mask;
@@ -1654,14 +1490,12 @@ static void schizo_pbm_init(struct pci_controller_info *p,
1654 struct device_node *dp, u32 portid, 1490 struct device_node *dp, u32 portid,
1655 int chip_type) 1491 int chip_type)
1656{ 1492{
1657 struct linux_prom64_registers *regs; 1493 const struct linux_prom64_registers *regs;
1658 struct property *prop; 1494 const unsigned int *busrange;
1659 unsigned int *busrange;
1660 struct pci_pbm_info *pbm; 1495 struct pci_pbm_info *pbm;
1661 const char *chipset_name; 1496 const char *chipset_name;
1662 u32 *ino_bitmap; 1497 const u32 *ino_bitmap;
1663 int is_pbm_a; 1498 int is_pbm_a;
1664 int len;
1665 1499
1666 switch (chip_type) { 1500 switch (chip_type) {
1667 case PBM_CHIP_TYPE_TOMATILLO: 1501 case PBM_CHIP_TYPE_TOMATILLO:
@@ -1689,11 +1523,9 @@ static void schizo_pbm_init(struct pci_controller_info *p,
1689 * 3) PBM PCI config space 1523 * 3) PBM PCI config space
1690 * 4) Ichip regs 1524 * 4) Ichip regs
1691 */ 1525 */
1692 prop = of_find_property(dp, "reg", NULL); 1526 regs = of_get_property(dp, "reg", NULL);
1693 regs = prop->value;
1694 1527
1695 is_pbm_a = ((regs[0].phys_addr & 0x00700000) == 0x00600000); 1528 is_pbm_a = ((regs[0].phys_addr & 0x00700000) == 0x00600000);
1696
1697 if (is_pbm_a) 1529 if (is_pbm_a)
1698 pbm = &p->pbm_A; 1530 pbm = &p->pbm_A;
1699 else 1531 else
@@ -1702,17 +1534,10 @@ static void schizo_pbm_init(struct pci_controller_info *p,
1702 pbm->portid = portid; 1534 pbm->portid = portid;
1703 pbm->parent = p; 1535 pbm->parent = p;
1704 pbm->prom_node = dp; 1536 pbm->prom_node = dp;
1705 pbm->pci_first_slot = 1;
1706 1537
1707 pbm->chip_type = chip_type; 1538 pbm->chip_type = chip_type;
1708 pbm->chip_version = 0; 1539 pbm->chip_version = of_getintprop_default(dp, "version#", 0);
1709 prop = of_find_property(dp, "version#", NULL); 1540 pbm->chip_revision = of_getintprop_default(dp, "module-version#", 0);
1710 if (prop)
1711 pbm->chip_version = *(int *) prop->value;
1712 pbm->chip_revision = 0;
1713 prop = of_find_property(dp, "module-revision#", NULL);
1714 if (prop)
1715 pbm->chip_revision = *(int *) prop->value;
1716 1541
1717 pbm->pbm_regs = regs[0].phys_addr; 1542 pbm->pbm_regs = regs[0].phys_addr;
1718 pbm->controller_regs = regs[1].phys_addr - 0x10000UL; 1543 pbm->controller_regs = regs[1].phys_addr - 0x10000UL;
@@ -1723,40 +1548,18 @@ static void schizo_pbm_init(struct pci_controller_info *p,
1723 pbm->name = dp->full_name; 1548 pbm->name = dp->full_name;
1724 1549
1725 printk("%s: %s PCI Bus Module ver[%x:%x]\n", 1550 printk("%s: %s PCI Bus Module ver[%x:%x]\n",
1726 pbm->name, 1551 pbm->name, chipset_name,
1727 (chip_type == PBM_CHIP_TYPE_TOMATILLO ?
1728 "TOMATILLO" : "SCHIZO"),
1729 pbm->chip_version, pbm->chip_revision); 1552 pbm->chip_version, pbm->chip_revision);
1730 1553
1731 schizo_pbm_hw_init(pbm); 1554 schizo_pbm_hw_init(pbm);
1732 1555
1733 prop = of_find_property(dp, "ranges", &len); 1556 pci_determine_mem_io_space(pbm);
1734 pbm->pbm_ranges = prop->value;
1735 pbm->num_pbm_ranges =
1736 (len / sizeof(struct linux_prom_pci_ranges));
1737 1557
1738 schizo_determine_mem_io_space(pbm); 1558 ino_bitmap = of_get_property(dp, "ino-bitmap", NULL);
1739 pbm_register_toplevel_resources(p, pbm);
1740
1741 prop = of_find_property(dp, "interrupt-map", &len);
1742 if (prop) {
1743 pbm->pbm_intmap = prop->value;
1744 pbm->num_pbm_intmap =
1745 (len / sizeof(struct linux_prom_pci_intmap));
1746
1747 prop = of_find_property(dp, "interrupt-map-mask", NULL);
1748 pbm->pbm_intmask = prop->value;
1749 } else {
1750 pbm->num_pbm_intmap = 0;
1751 }
1752
1753 prop = of_find_property(dp, "ino-bitmap", NULL);
1754 ino_bitmap = prop->value;
1755 pbm->ino_bitmap = (((u64)ino_bitmap[1] << 32UL) | 1559 pbm->ino_bitmap = (((u64)ino_bitmap[1] << 32UL) |
1756 ((u64)ino_bitmap[0] << 0UL)); 1560 ((u64)ino_bitmap[0] << 0UL));
1757 1561
1758 prop = of_find_property(dp, "bus-range", NULL); 1562 busrange = of_get_property(dp, "bus-range", NULL);
1759 busrange = prop->value;
1760 pbm->pci_first_busno = busrange[0]; 1563 pbm->pci_first_busno = busrange[0];
1761 pbm->pci_last_busno = busrange[1]; 1564 pbm->pci_last_busno = busrange[1];
1762 1565
@@ -1777,15 +1580,10 @@ static inline int portid_compare(u32 x, u32 y, int chip_type)
1777static void __schizo_init(struct device_node *dp, char *model_name, int chip_type) 1580static void __schizo_init(struct device_node *dp, char *model_name, int chip_type)
1778{ 1581{
1779 struct pci_controller_info *p; 1582 struct pci_controller_info *p;
1780 struct pci_iommu *iommu; 1583 struct iommu *iommu;
1781 struct property *prop;
1782 int is_pbm_a;
1783 u32 portid; 1584 u32 portid;
1784 1585
1785 portid = 0xff; 1586 portid = of_getintprop_default(dp, "portid", 0xff);
1786 prop = of_find_property(dp, "portid", NULL);
1787 if (prop)
1788 portid = *(u32 *) prop->value;
1789 1587
1790 for (p = pci_controller_root; p; p = p->next) { 1588 for (p = pci_controller_root; p; p = p->next) {
1791 struct pci_pbm_info *pbm; 1589 struct pci_pbm_info *pbm;
@@ -1798,48 +1596,43 @@ static void __schizo_init(struct device_node *dp, char *model_name, int chip_typ
1798 &p->pbm_B); 1596 &p->pbm_B);
1799 1597
1800 if (portid_compare(pbm->portid, portid, chip_type)) { 1598 if (portid_compare(pbm->portid, portid, chip_type)) {
1801 is_pbm_a = (p->pbm_A.prom_node == NULL);
1802 schizo_pbm_init(p, dp, portid, chip_type); 1599 schizo_pbm_init(p, dp, portid, chip_type);
1803 return; 1600 return;
1804 } 1601 }
1805 } 1602 }
1806 1603
1807 p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); 1604 p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
1808 if (!p) { 1605 if (!p)
1809 prom_printf("SCHIZO: Fatal memory allocation error.\n"); 1606 goto memfail;
1810 prom_halt(); 1607
1811 } 1608 iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
1609 if (!iommu)
1610 goto memfail;
1812 1611
1813 iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
1814 if (!iommu) {
1815 prom_printf("SCHIZO: Fatal memory allocation error.\n");
1816 prom_halt();
1817 }
1818 p->pbm_A.iommu = iommu; 1612 p->pbm_A.iommu = iommu;
1819 1613
1820 iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); 1614 iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
1821 if (!iommu) { 1615 if (!iommu)
1822 prom_printf("SCHIZO: Fatal memory allocation error.\n"); 1616 goto memfail;
1823 prom_halt(); 1617
1824 }
1825 p->pbm_B.iommu = iommu; 1618 p->pbm_B.iommu = iommu;
1826 1619
1827 p->next = pci_controller_root; 1620 p->next = pci_controller_root;
1828 pci_controller_root = p; 1621 pci_controller_root = p;
1829 1622
1830 p->index = pci_num_controllers++; 1623 p->index = pci_num_controllers++;
1831 p->pbms_same_domain = 0; 1624 p->scan_bus = schizo_scan_bus;
1832 p->scan_bus = (chip_type == PBM_CHIP_TYPE_TOMATILLO ?
1833 tomatillo_scan_bus :
1834 schizo_scan_bus);
1835 p->base_address_update = schizo_base_address_update;
1836 p->resource_adjust = schizo_resource_adjust;
1837 p->pci_ops = &schizo_ops; 1625 p->pci_ops = &schizo_ops;
1838 1626
1839 /* Like PSYCHO we have a 2GB aligned area for memory space. */ 1627 /* Like PSYCHO we have a 2GB aligned area for memory space. */
1840 pci_memspace_mask = 0x7fffffffUL; 1628 pci_memspace_mask = 0x7fffffffUL;
1841 1629
1842 schizo_pbm_init(p, dp, portid, chip_type); 1630 schizo_pbm_init(p, dp, portid, chip_type);
1631 return;
1632
1633memfail:
1634 prom_printf("SCHIZO: Fatal memory allocation error.\n");
1635 prom_halt();
1843} 1636}
1844 1637
1845void schizo_init(struct device_node *dp, char *model_name) 1638void schizo_init(struct device_node *dp, char *model_name)
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index ec22cd61ec8c..94295c219329 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -1,6 +1,6 @@
1/* pci_sun4v.c: SUN4V specific PCI controller support. 1/* pci_sun4v.c: SUN4V specific PCI controller support.
2 * 2 *
3 * Copyright (C) 2006 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
4 */ 4 */
5 5
6#include <linux/kernel.h> 6#include <linux/kernel.h>
@@ -29,7 +29,7 @@
29 29
30#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) 30#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
31 31
32struct pci_iommu_batch { 32struct iommu_batch {
33 struct pci_dev *pdev; /* Device mapping is for. */ 33 struct pci_dev *pdev; /* Device mapping is for. */
34 unsigned long prot; /* IOMMU page protections */ 34 unsigned long prot; /* IOMMU page protections */
35 unsigned long entry; /* Index into IOTSB. */ 35 unsigned long entry; /* Index into IOTSB. */
@@ -37,12 +37,12 @@ struct pci_iommu_batch {
37 unsigned long npages; /* Number of pages in list. */ 37 unsigned long npages; /* Number of pages in list. */
38}; 38};
39 39
40static DEFINE_PER_CPU(struct pci_iommu_batch, pci_iommu_batch); 40static DEFINE_PER_CPU(struct iommu_batch, pci_iommu_batch);
41 41
42/* Interrupts must be disabled. */ 42/* Interrupts must be disabled. */
43static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry) 43static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry)
44{ 44{
45 struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); 45 struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
46 46
47 p->pdev = pdev; 47 p->pdev = pdev;
48 p->prot = prot; 48 p->prot = prot;
@@ -51,10 +51,10 @@ static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long pro
51} 51}
52 52
53/* Interrupts must be disabled. */ 53/* Interrupts must be disabled. */
54static long pci_iommu_batch_flush(struct pci_iommu_batch *p) 54static long pci_iommu_batch_flush(struct iommu_batch *p)
55{ 55{
56 struct pcidev_cookie *pcp = p->pdev->sysdata; 56 struct pci_pbm_info *pbm = p->pdev->dev.archdata.host_controller;
57 unsigned long devhandle = pcp->pbm->devhandle; 57 unsigned long devhandle = pbm->devhandle;
58 unsigned long prot = p->prot; 58 unsigned long prot = p->prot;
59 unsigned long entry = p->entry; 59 unsigned long entry = p->entry;
60 u64 *pglist = p->pglist; 60 u64 *pglist = p->pglist;
@@ -89,7 +89,7 @@ static long pci_iommu_batch_flush(struct pci_iommu_batch *p)
89/* Interrupts must be disabled. */ 89/* Interrupts must be disabled. */
90static inline long pci_iommu_batch_add(u64 phys_page) 90static inline long pci_iommu_batch_add(u64 phys_page)
91{ 91{
92 struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); 92 struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
93 93
94 BUG_ON(p->npages >= PGLIST_NENTS); 94 BUG_ON(p->npages >= PGLIST_NENTS);
95 95
@@ -103,14 +103,14 @@ static inline long pci_iommu_batch_add(u64 phys_page)
103/* Interrupts must be disabled. */ 103/* Interrupts must be disabled. */
104static inline long pci_iommu_batch_end(void) 104static inline long pci_iommu_batch_end(void)
105{ 105{
106 struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); 106 struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
107 107
108 BUG_ON(p->npages >= PGLIST_NENTS); 108 BUG_ON(p->npages >= PGLIST_NENTS);
109 109
110 return pci_iommu_batch_flush(p); 110 return pci_iommu_batch_flush(p);
111} 111}
112 112
113static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages) 113static long pci_arena_alloc(struct iommu_arena *arena, unsigned long npages)
114{ 114{
115 unsigned long n, i, start, end, limit; 115 unsigned long n, i, start, end, limit;
116 int pass; 116 int pass;
@@ -149,7 +149,7 @@ again:
149 return n; 149 return n;
150} 150}
151 151
152static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages) 152static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
153{ 153{
154 unsigned long i; 154 unsigned long i;
155 155
@@ -159,8 +159,7 @@ static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, un
159 159
160static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) 160static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp)
161{ 161{
162 struct pcidev_cookie *pcp; 162 struct iommu *iommu;
163 struct pci_iommu *iommu;
164 unsigned long flags, order, first_page, npages, n; 163 unsigned long flags, order, first_page, npages, n;
165 void *ret; 164 void *ret;
166 long entry; 165 long entry;
@@ -178,8 +177,7 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
178 177
179 memset((char *)first_page, 0, PAGE_SIZE << order); 178 memset((char *)first_page, 0, PAGE_SIZE << order);
180 179
181 pcp = pdev->sysdata; 180 iommu = pdev->dev.archdata.iommu;
182 iommu = pcp->pbm->iommu;
183 181
184 spin_lock_irqsave(&iommu->lock, flags); 182 spin_lock_irqsave(&iommu->lock, flags);
185 entry = pci_arena_alloc(&iommu->arena, npages); 183 entry = pci_arena_alloc(&iommu->arena, npages);
@@ -226,15 +224,15 @@ arena_alloc_fail:
226 224
227static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) 225static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
228{ 226{
229 struct pcidev_cookie *pcp; 227 struct pci_pbm_info *pbm;
230 struct pci_iommu *iommu; 228 struct iommu *iommu;
231 unsigned long flags, order, npages, entry; 229 unsigned long flags, order, npages, entry;
232 u32 devhandle; 230 u32 devhandle;
233 231
234 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 232 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
235 pcp = pdev->sysdata; 233 iommu = pdev->dev.archdata.iommu;
236 iommu = pcp->pbm->iommu; 234 pbm = pdev->dev.archdata.host_controller;
237 devhandle = pcp->pbm->devhandle; 235 devhandle = pbm->devhandle;
238 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); 236 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
239 237
240 spin_lock_irqsave(&iommu->lock, flags); 238 spin_lock_irqsave(&iommu->lock, flags);
@@ -259,16 +257,14 @@ static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu,
259 257
260static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) 258static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
261{ 259{
262 struct pcidev_cookie *pcp; 260 struct iommu *iommu;
263 struct pci_iommu *iommu;
264 unsigned long flags, npages, oaddr; 261 unsigned long flags, npages, oaddr;
265 unsigned long i, base_paddr; 262 unsigned long i, base_paddr;
266 u32 bus_addr, ret; 263 u32 bus_addr, ret;
267 unsigned long prot; 264 unsigned long prot;
268 long entry; 265 long entry;
269 266
270 pcp = pdev->sysdata; 267 iommu = pdev->dev.archdata.iommu;
271 iommu = pcp->pbm->iommu;
272 268
273 if (unlikely(direction == PCI_DMA_NONE)) 269 if (unlikely(direction == PCI_DMA_NONE))
274 goto bad; 270 goto bad;
@@ -324,8 +320,8 @@ iommu_map_fail:
324 320
325static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) 321static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
326{ 322{
327 struct pcidev_cookie *pcp; 323 struct pci_pbm_info *pbm;
328 struct pci_iommu *iommu; 324 struct iommu *iommu;
329 unsigned long flags, npages; 325 unsigned long flags, npages;
330 long entry; 326 long entry;
331 u32 devhandle; 327 u32 devhandle;
@@ -336,9 +332,9 @@ static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_
336 return; 332 return;
337 } 333 }
338 334
339 pcp = pdev->sysdata; 335 iommu = pdev->dev.archdata.iommu;
340 iommu = pcp->pbm->iommu; 336 pbm = pdev->dev.archdata.host_controller;
341 devhandle = pcp->pbm->devhandle; 337 devhandle = pbm->devhandle;
342 338
343 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 339 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
344 npages >>= IO_PAGE_SHIFT; 340 npages >>= IO_PAGE_SHIFT;
@@ -460,8 +456,7 @@ iommu_map_failed:
460 456
461static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) 457static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
462{ 458{
463 struct pcidev_cookie *pcp; 459 struct iommu *iommu;
464 struct pci_iommu *iommu;
465 unsigned long flags, npages, prot; 460 unsigned long flags, npages, prot;
466 u32 dma_base; 461 u32 dma_base;
467 struct scatterlist *sgtmp; 462 struct scatterlist *sgtmp;
@@ -480,8 +475,7 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n
480 return 1; 475 return 1;
481 } 476 }
482 477
483 pcp = pdev->sysdata; 478 iommu = pdev->dev.archdata.iommu;
484 iommu = pcp->pbm->iommu;
485 479
486 if (unlikely(direction == PCI_DMA_NONE)) 480 if (unlikely(direction == PCI_DMA_NONE))
487 goto bad; 481 goto bad;
@@ -537,8 +531,8 @@ iommu_map_failed:
537 531
538static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) 532static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
539{ 533{
540 struct pcidev_cookie *pcp; 534 struct pci_pbm_info *pbm;
541 struct pci_iommu *iommu; 535 struct iommu *iommu;
542 unsigned long flags, i, npages; 536 unsigned long flags, i, npages;
543 long entry; 537 long entry;
544 u32 devhandle, bus_addr; 538 u32 devhandle, bus_addr;
@@ -548,9 +542,9 @@ static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
548 WARN_ON(1); 542 WARN_ON(1);
549 } 543 }
550 544
551 pcp = pdev->sysdata; 545 iommu = pdev->dev.archdata.iommu;
552 iommu = pcp->pbm->iommu; 546 pbm = pdev->dev.archdata.host_controller;
553 devhandle = pcp->pbm->devhandle; 547 devhandle = pbm->devhandle;
554 548
555 bus_addr = sglist->dma_address & IO_PAGE_MASK; 549 bus_addr = sglist->dma_address & IO_PAGE_MASK;
556 550
@@ -589,7 +583,7 @@ static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist
589 /* Nothing to do... */ 583 /* Nothing to do... */
590} 584}
591 585
592struct pci_iommu_ops pci_sun4v_iommu_ops = { 586const struct pci_iommu_ops pci_sun4v_iommu_ops = {
593 .alloc_consistent = pci_4v_alloc_consistent, 587 .alloc_consistent = pci_4v_alloc_consistent,
594 .free_consistent = pci_4v_free_consistent, 588 .free_consistent = pci_4v_free_consistent,
595 .map_single = pci_4v_map_single, 589 .map_single = pci_4v_map_single,
@@ -600,132 +594,12 @@ struct pci_iommu_ops pci_sun4v_iommu_ops = {
600 .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu, 594 .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu,
601}; 595};
602 596
603/* SUN4V PCI configuration space accessors. */
604
605struct pdev_entry {
606 struct pdev_entry *next;
607 u32 devhandle;
608 unsigned int bus;
609 unsigned int device;
610 unsigned int func;
611};
612
613#define PDEV_HTAB_SIZE 16
614#define PDEV_HTAB_MASK (PDEV_HTAB_SIZE - 1)
615static struct pdev_entry *pdev_htab[PDEV_HTAB_SIZE];
616
617static inline unsigned int pdev_hashfn(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
618{
619 unsigned int val;
620
621 val = (devhandle ^ (devhandle >> 4));
622 val ^= bus;
623 val ^= device;
624 val ^= func;
625
626 return val & PDEV_HTAB_MASK;
627}
628
629static int pdev_htab_add(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
630{
631 struct pdev_entry *p = kmalloc(sizeof(*p), GFP_KERNEL);
632 struct pdev_entry **slot;
633
634 if (!p)
635 return -ENOMEM;
636
637 slot = &pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
638 p->next = *slot;
639 *slot = p;
640
641 p->devhandle = devhandle;
642 p->bus = bus;
643 p->device = device;
644 p->func = func;
645
646 return 0;
647}
648
649/* Recursively descend into the OBP device tree, rooted at toplevel_node,
650 * looking for a PCI device matching bus and devfn.
651 */
652static int obp_find(struct device_node *toplevel_node, unsigned int bus, unsigned int devfn)
653{
654 toplevel_node = toplevel_node->child;
655
656 while (toplevel_node != NULL) {
657 struct linux_prom_pci_registers *regs;
658 struct property *prop;
659 int ret;
660
661 ret = obp_find(toplevel_node, bus, devfn);
662 if (ret != 0)
663 return ret;
664
665 prop = of_find_property(toplevel_node, "reg", NULL);
666 if (!prop)
667 goto next_sibling;
668
669 regs = prop->value;
670 if (((regs->phys_hi >> 16) & 0xff) == bus &&
671 ((regs->phys_hi >> 8) & 0xff) == devfn)
672 break;
673
674 next_sibling:
675 toplevel_node = toplevel_node->sibling;
676 }
677
678 return toplevel_node != NULL;
679}
680
681static int pdev_htab_populate(struct pci_pbm_info *pbm)
682{
683 u32 devhandle = pbm->devhandle;
684 unsigned int bus;
685
686 for (bus = pbm->pci_first_busno; bus <= pbm->pci_last_busno; bus++) {
687 unsigned int devfn;
688
689 for (devfn = 0; devfn < 256; devfn++) {
690 unsigned int device = PCI_SLOT(devfn);
691 unsigned int func = PCI_FUNC(devfn);
692
693 if (obp_find(pbm->prom_node, bus, devfn)) {
694 int err = pdev_htab_add(devhandle, bus,
695 device, func);
696 if (err)
697 return err;
698 }
699 }
700 }
701
702 return 0;
703}
704
705static struct pdev_entry *pdev_find(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
706{
707 struct pdev_entry *p;
708
709 p = pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
710 while (p) {
711 if (p->devhandle == devhandle &&
712 p->bus == bus &&
713 p->device == device &&
714 p->func == func)
715 break;
716
717 p = p->next;
718 }
719
720 return p;
721}
722
723static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func) 597static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
724{ 598{
725 if (bus < pbm->pci_first_busno || 599 if (bus < pbm->pci_first_busno ||
726 bus > pbm->pci_last_busno) 600 bus > pbm->pci_last_busno)
727 return 1; 601 return 1;
728 return pdev_find(pbm->devhandle, bus, device, func) == NULL; 602 return 0;
729} 603}
730 604
731static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, 605static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
@@ -738,6 +612,9 @@ static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
738 unsigned int func = PCI_FUNC(devfn); 612 unsigned int func = PCI_FUNC(devfn);
739 unsigned long ret; 613 unsigned long ret;
740 614
615 if (bus_dev == pbm->pci_bus && devfn == 0x00)
616 return pci_host_bridge_read_pci_cfg(bus_dev, devfn, where,
617 size, value);
741 if (pci_sun4v_out_of_range(pbm, bus, device, func)) { 618 if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
742 ret = ~0UL; 619 ret = ~0UL;
743 } else { 620 } else {
@@ -776,6 +653,9 @@ static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
776 unsigned int func = PCI_FUNC(devfn); 653 unsigned int func = PCI_FUNC(devfn);
777 unsigned long ret; 654 unsigned long ret;
778 655
656 if (bus_dev == pbm->pci_bus && devfn == 0x00)
657 return pci_host_bridge_write_pci_cfg(bus_dev, devfn, where,
658 size, value);
779 if (pci_sun4v_out_of_range(pbm, bus, device, func)) { 659 if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
780 /* Do nothing. */ 660 /* Do nothing. */
781 } else { 661 } else {
@@ -800,27 +680,7 @@ static struct pci_ops pci_sun4v_ops = {
800static void pbm_scan_bus(struct pci_controller_info *p, 680static void pbm_scan_bus(struct pci_controller_info *p,
801 struct pci_pbm_info *pbm) 681 struct pci_pbm_info *pbm)
802{ 682{
803 struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); 683 pbm->pci_bus = pci_scan_one_pbm(pbm);
804
805 if (!cookie) {
806 prom_printf("%s: Critical allocation failure.\n", pbm->name);
807 prom_halt();
808 }
809
810 /* All we care about is the PBM. */
811 cookie->pbm = pbm;
812
813 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm);
814#if 0
815 pci_fixup_host_bridge_self(pbm->pci_bus);
816 pbm->pci_bus->self->sysdata = cookie;
817#endif
818 pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
819 pci_record_assignments(pbm, pbm->pci_bus);
820 pci_assign_unassigned(pbm, pbm->pci_bus);
821 pci_fixup_irq(pbm, pbm->pci_bus);
822 pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
823 pci_setup_busmastering(pbm, pbm->pci_bus);
824} 684}
825 685
826static void pci_sun4v_scan_bus(struct pci_controller_info *p) 686static void pci_sun4v_scan_bus(struct pci_controller_info *p)
@@ -844,130 +704,10 @@ static void pci_sun4v_scan_bus(struct pci_controller_info *p)
844 /* XXX register error interrupt handlers XXX */ 704 /* XXX register error interrupt handlers XXX */
845} 705}
846 706
847static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
848{
849 struct pcidev_cookie *pcp = pdev->sysdata;
850 struct pci_pbm_info *pbm = pcp->pbm;
851 struct resource *res, *root;
852 u32 reg;
853 int where, size, is_64bit;
854
855 res = &pdev->resource[resource];
856 if (resource < 6) {
857 where = PCI_BASE_ADDRESS_0 + (resource * 4);
858 } else if (resource == PCI_ROM_RESOURCE) {
859 where = pdev->rom_base_reg;
860 } else {
861 /* Somebody might have asked allocation of a non-standard resource */
862 return;
863 }
864
865 /* XXX 64-bit MEM handling is not %100 correct... XXX */
866 is_64bit = 0;
867 if (res->flags & IORESOURCE_IO)
868 root = &pbm->io_space;
869 else {
870 root = &pbm->mem_space;
871 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
872 == PCI_BASE_ADDRESS_MEM_TYPE_64)
873 is_64bit = 1;
874 }
875
876 size = res->end - res->start;
877 pci_read_config_dword(pdev, where, &reg);
878 reg = ((reg & size) |
879 (((u32)(res->start - root->start)) & ~size));
880 if (resource == PCI_ROM_RESOURCE) {
881 reg |= PCI_ROM_ADDRESS_ENABLE;
882 res->flags |= IORESOURCE_ROM_ENABLE;
883 }
884 pci_write_config_dword(pdev, where, reg);
885
886 /* This knows that the upper 32-bits of the address
887 * must be zero. Our PCI common layer enforces this.
888 */
889 if (is_64bit)
890 pci_write_config_dword(pdev, where + 4, 0);
891}
892
893static void pci_sun4v_resource_adjust(struct pci_dev *pdev,
894 struct resource *res,
895 struct resource *root)
896{
897 res->start += root->start;
898 res->end += root->start;
899}
900
901/* Use ranges property to determine where PCI MEM, I/O, and Config
902 * space are for this PCI bus module.
903 */
904static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm)
905{
906 int i, saw_mem, saw_io;
907
908 saw_mem = saw_io = 0;
909 for (i = 0; i < pbm->num_pbm_ranges; i++) {
910 struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
911 unsigned long a;
912 int type;
913
914 type = (pr->child_phys_hi >> 24) & 0x3;
915 a = (((unsigned long)pr->parent_phys_hi << 32UL) |
916 ((unsigned long)pr->parent_phys_lo << 0UL));
917
918 switch (type) {
919 case 1:
920 /* 16-bit IO space, 16MB */
921 pbm->io_space.start = a;
922 pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
923 pbm->io_space.flags = IORESOURCE_IO;
924 saw_io = 1;
925 break;
926
927 case 2:
928 /* 32-bit MEM space, 2GB */
929 pbm->mem_space.start = a;
930 pbm->mem_space.end = a + (0x80000000UL - 1UL);
931 pbm->mem_space.flags = IORESOURCE_MEM;
932 saw_mem = 1;
933 break;
934
935 case 3:
936 /* XXX 64-bit MEM handling XXX */
937
938 default:
939 break;
940 };
941 }
942
943 if (!saw_io || !saw_mem) {
944 prom_printf("%s: Fatal error, missing %s PBM range.\n",
945 pbm->name,
946 (!saw_io ? "IO" : "MEM"));
947 prom_halt();
948 }
949
950 printk("%s: PCI IO[%lx] MEM[%lx]\n",
951 pbm->name,
952 pbm->io_space.start,
953 pbm->mem_space.start);
954}
955
956static void pbm_register_toplevel_resources(struct pci_controller_info *p,
957 struct pci_pbm_info *pbm)
958{
959 pbm->io_space.name = pbm->mem_space.name = pbm->name;
960
961 request_resource(&ioport_resource, &pbm->io_space);
962 request_resource(&iomem_resource, &pbm->mem_space);
963 pci_register_legacy_regions(&pbm->io_space,
964 &pbm->mem_space);
965}
966
967static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, 707static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
968 struct pci_iommu *iommu) 708 struct iommu *iommu)
969{ 709{
970 struct pci_iommu_arena *arena = &iommu->arena; 710 struct iommu_arena *arena = &iommu->arena;
971 unsigned long i, cnt = 0; 711 unsigned long i, cnt = 0;
972 u32 devhandle; 712 u32 devhandle;
973 713
@@ -994,7 +734,7 @@ static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
994 734
995static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) 735static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
996{ 736{
997 struct pci_iommu *iommu = pbm->iommu; 737 struct iommu *iommu = pbm->iommu;
998 struct property *prop; 738 struct property *prop;
999 unsigned long num_tsb_entries, sz; 739 unsigned long num_tsb_entries, sz;
1000 u32 vdma[2], dma_mask, dma_offset; 740 u32 vdma[2], dma_mask, dma_offset;
@@ -1281,7 +1021,7 @@ h_error:
1281 1021
1282static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) 1022static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1283{ 1023{
1284 u32 *val; 1024 const u32 *val;
1285 int len; 1025 int len;
1286 1026
1287 val = of_get_property(pbm->prom_node, "#msi-eqs", &len); 1027 val = of_get_property(pbm->prom_node, "#msi-eqs", &len);
@@ -1289,16 +1029,16 @@ static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1289 goto no_msi; 1029 goto no_msi;
1290 pbm->msiq_num = *val; 1030 pbm->msiq_num = *val;
1291 if (pbm->msiq_num) { 1031 if (pbm->msiq_num) {
1292 struct msiq_prop { 1032 const struct msiq_prop {
1293 u32 first_msiq; 1033 u32 first_msiq;
1294 u32 num_msiq; 1034 u32 num_msiq;
1295 u32 first_devino; 1035 u32 first_devino;
1296 } *mqp; 1036 } *mqp;
1297 struct msi_range_prop { 1037 const struct msi_range_prop {
1298 u32 first_msi; 1038 u32 first_msi;
1299 u32 num_msi; 1039 u32 num_msi;
1300 } *mrng; 1040 } *mrng;
1301 struct addr_range_prop { 1041 const struct addr_range_prop {
1302 u32 msi32_high; 1042 u32 msi32_high;
1303 u32 msi32_low; 1043 u32 msi32_low;
1304 u32 msi32_len; 1044 u32 msi32_len;
@@ -1410,8 +1150,7 @@ static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p,
1410 struct pci_dev *pdev, 1150 struct pci_dev *pdev,
1411 struct msi_desc *entry) 1151 struct msi_desc *entry)
1412{ 1152{
1413 struct pcidev_cookie *pcp = pdev->sysdata; 1153 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
1414 struct pci_pbm_info *pbm = pcp->pbm;
1415 unsigned long devino, msiqid; 1154 unsigned long devino, msiqid;
1416 struct msi_msg msg; 1155 struct msi_msg msg;
1417 int msi_num, err; 1156 int msi_num, err;
@@ -1455,7 +1194,7 @@ static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p,
1455 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_VALID)) 1194 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_VALID))
1456 goto out_err; 1195 goto out_err;
1457 1196
1458 pcp->msi_num = msi_num; 1197 pdev->dev.archdata.msi_num = msi_num;
1459 1198
1460 if (entry->msi_attrib.is_64) { 1199 if (entry->msi_attrib.is_64) {
1461 msg.address_hi = pbm->msi64_start >> 32; 1200 msg.address_hi = pbm->msi64_start >> 32;
@@ -1484,12 +1223,11 @@ out_err:
1484static void pci_sun4v_teardown_msi_irq(unsigned int virt_irq, 1223static void pci_sun4v_teardown_msi_irq(unsigned int virt_irq,
1485 struct pci_dev *pdev) 1224 struct pci_dev *pdev)
1486{ 1225{
1487 struct pcidev_cookie *pcp = pdev->sysdata; 1226 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
1488 struct pci_pbm_info *pbm = pcp->pbm;
1489 unsigned long msiqid, err; 1227 unsigned long msiqid, err;
1490 unsigned int msi_num; 1228 unsigned int msi_num;
1491 1229
1492 msi_num = pcp->msi_num; 1230 msi_num = pdev->dev.archdata.msi_num;
1493 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi_num, &msiqid); 1231 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi_num, &msiqid);
1494 if (err) { 1232 if (err) {
1495 printk(KERN_ERR "%s: getmsiq gives error %lu\n", 1233 printk(KERN_ERR "%s: getmsiq gives error %lu\n",
@@ -1516,8 +1254,6 @@ static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1516static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle) 1254static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle)
1517{ 1255{
1518 struct pci_pbm_info *pbm; 1256 struct pci_pbm_info *pbm;
1519 struct property *prop;
1520 int len, i;
1521 1257
1522 if (devhandle & 0x40) 1258 if (devhandle & 0x40)
1523 pbm = &p->pbm_B; 1259 pbm = &p->pbm_B;
@@ -1526,7 +1262,6 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node
1526 1262
1527 pbm->parent = p; 1263 pbm->parent = p;
1528 pbm->prom_node = dp; 1264 pbm->prom_node = dp;
1529 pbm->pci_first_slot = 1;
1530 1265
1531 pbm->devhandle = devhandle; 1266 pbm->devhandle = devhandle;
1532 1267
@@ -1534,39 +1269,17 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node
1534 1269
1535 printk("%s: SUN4V PCI Bus Module\n", pbm->name); 1270 printk("%s: SUN4V PCI Bus Module\n", pbm->name);
1536 1271
1537 prop = of_find_property(dp, "ranges", &len); 1272 pci_determine_mem_io_space(pbm);
1538 pbm->pbm_ranges = prop->value;
1539 pbm->num_pbm_ranges =
1540 (len / sizeof(struct linux_prom_pci_ranges));
1541
1542 /* Mask out the top 8 bits of the ranges, leaving the real
1543 * physical address.
1544 */
1545 for (i = 0; i < pbm->num_pbm_ranges; i++)
1546 pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff;
1547
1548 pci_sun4v_determine_mem_io_space(pbm);
1549 pbm_register_toplevel_resources(p, pbm);
1550
1551 prop = of_find_property(dp, "interrupt-map", &len);
1552 pbm->pbm_intmap = prop->value;
1553 pbm->num_pbm_intmap =
1554 (len / sizeof(struct linux_prom_pci_intmap));
1555
1556 prop = of_find_property(dp, "interrupt-map-mask", NULL);
1557 pbm->pbm_intmask = prop->value;
1558 1273
1559 pci_sun4v_get_bus_range(pbm); 1274 pci_sun4v_get_bus_range(pbm);
1560 pci_sun4v_iommu_init(pbm); 1275 pci_sun4v_iommu_init(pbm);
1561 pci_sun4v_msi_init(pbm); 1276 pci_sun4v_msi_init(pbm);
1562
1563 pdev_htab_populate(pbm);
1564} 1277}
1565 1278
1566void sun4v_pci_init(struct device_node *dp, char *model_name) 1279void sun4v_pci_init(struct device_node *dp, char *model_name)
1567{ 1280{
1568 struct pci_controller_info *p; 1281 struct pci_controller_info *p;
1569 struct pci_iommu *iommu; 1282 struct iommu *iommu;
1570 struct property *prop; 1283 struct property *prop;
1571 struct linux_prom64_registers *regs; 1284 struct linux_prom64_registers *regs;
1572 u32 devhandle; 1285 u32 devhandle;
@@ -1606,13 +1319,13 @@ void sun4v_pci_init(struct device_node *dp, char *model_name)
1606 if (!p) 1319 if (!p)
1607 goto fatal_memory_error; 1320 goto fatal_memory_error;
1608 1321
1609 iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); 1322 iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
1610 if (!iommu) 1323 if (!iommu)
1611 goto fatal_memory_error; 1324 goto fatal_memory_error;
1612 1325
1613 p->pbm_A.iommu = iommu; 1326 p->pbm_A.iommu = iommu;
1614 1327
1615 iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); 1328 iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
1616 if (!iommu) 1329 if (!iommu)
1617 goto fatal_memory_error; 1330 goto fatal_memory_error;
1618 1331
@@ -1622,11 +1335,8 @@ void sun4v_pci_init(struct device_node *dp, char *model_name)
1622 pci_controller_root = p; 1335 pci_controller_root = p;
1623 1336
1624 p->index = pci_num_controllers++; 1337 p->index = pci_num_controllers++;
1625 p->pbms_same_domain = 0;
1626 1338
1627 p->scan_bus = pci_sun4v_scan_bus; 1339 p->scan_bus = pci_sun4v_scan_bus;
1628 p->base_address_update = pci_sun4v_base_address_update;
1629 p->resource_adjust = pci_sun4v_resource_adjust;
1630#ifdef CONFIG_PCI_MSI 1340#ifdef CONFIG_PCI_MSI
1631 p->setup_msi_irq = pci_sun4v_setup_msi_irq; 1341 p->setup_msi_irq = pci_sun4v_setup_msi_irq;
1632 p->teardown_msi_irq = pci_sun4v_teardown_msi_irq; 1342 p->teardown_msi_irq = pci_sun4v_teardown_msi_irq;
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index b291060c25a6..a114151f9fbe 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -28,6 +28,7 @@
28#include <linux/reboot.h> 28#include <linux/reboot.h>
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/compat.h> 30#include <linux/compat.h>
31#include <linux/tick.h>
31#include <linux/init.h> 32#include <linux/init.h>
32 33
33#include <asm/oplib.h> 34#include <asm/oplib.h>
@@ -88,12 +89,14 @@ void cpu_idle(void)
88 set_thread_flag(TIF_POLLING_NRFLAG); 89 set_thread_flag(TIF_POLLING_NRFLAG);
89 90
90 while(1) { 91 while(1) {
91 if (need_resched()) { 92 tick_nohz_stop_sched_tick();
92 preempt_enable_no_resched(); 93 while (!need_resched())
93 schedule(); 94 sparc64_yield();
94 preempt_disable(); 95 tick_nohz_restart_sched_tick();
95 } 96
96 sparc64_yield(); 97 preempt_enable_no_resched();
98 schedule();
99 preempt_disable();
97 } 100 }
98} 101}
99 102
diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c
index 0917c24c4f08..5e1fcd05160d 100644
--- a/arch/sparc64/kernel/prom.c
+++ b/arch/sparc64/kernel/prom.c
@@ -36,12 +36,13 @@ static struct device_node *allnodes;
36 */ 36 */
37static DEFINE_RWLOCK(devtree_lock); 37static DEFINE_RWLOCK(devtree_lock);
38 38
39int of_device_is_compatible(struct device_node *device, const char *compat) 39int of_device_is_compatible(const struct device_node *device,
40 const char *compat)
40{ 41{
41 const char* cp; 42 const char* cp;
42 int cplen, l; 43 int cplen, l;
43 44
44 cp = (char *) of_get_property(device, "compatible", &cplen); 45 cp = of_get_property(device, "compatible", &cplen);
45 if (cp == NULL) 46 if (cp == NULL)
46 return 0; 47 return 0;
47 while (cplen > 0) { 48 while (cplen > 0) {
@@ -154,13 +155,14 @@ struct device_node *of_find_compatible_node(struct device_node *from,
154} 155}
155EXPORT_SYMBOL(of_find_compatible_node); 156EXPORT_SYMBOL(of_find_compatible_node);
156 157
157struct property *of_find_property(struct device_node *np, const char *name, 158struct property *of_find_property(const struct device_node *np,
159 const char *name,
158 int *lenp) 160 int *lenp)
159{ 161{
160 struct property *pp; 162 struct property *pp;
161 163
162 for (pp = np->properties; pp != 0; pp = pp->next) { 164 for (pp = np->properties; pp != 0; pp = pp->next) {
163 if (strcmp(pp->name, name) == 0) { 165 if (strcasecmp(pp->name, name) == 0) {
164 if (lenp != 0) 166 if (lenp != 0)
165 *lenp = pp->length; 167 *lenp = pp->length;
166 break; 168 break;
@@ -174,7 +176,8 @@ EXPORT_SYMBOL(of_find_property);
174 * Find a property with a given name for a given node 176 * Find a property with a given name for a given node
175 * and return the value. 177 * and return the value.
176 */ 178 */
177void *of_get_property(struct device_node *np, const char *name, int *lenp) 179const void *of_get_property(const struct device_node *np, const char *name,
180 int *lenp)
178{ 181{
179 struct property *pp = of_find_property(np,name,lenp); 182 struct property *pp = of_find_property(np,name,lenp);
180 return pp ? pp->value : NULL; 183 return pp ? pp->value : NULL;
@@ -196,7 +199,7 @@ EXPORT_SYMBOL(of_getintprop_default);
196 199
197int of_n_addr_cells(struct device_node *np) 200int of_n_addr_cells(struct device_node *np)
198{ 201{
199 int* ip; 202 const int* ip;
200 do { 203 do {
201 if (np->parent) 204 if (np->parent)
202 np = np->parent; 205 np = np->parent;
@@ -211,7 +214,7 @@ EXPORT_SYMBOL(of_n_addr_cells);
211 214
212int of_n_size_cells(struct device_node *np) 215int of_n_size_cells(struct device_node *np)
213{ 216{
214 int* ip; 217 const int* ip;
215 do { 218 do {
216 if (np->parent) 219 if (np->parent)
217 np = np->parent; 220 np = np->parent;
@@ -243,7 +246,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
243 while (*prevp) { 246 while (*prevp) {
244 struct property *prop = *prevp; 247 struct property *prop = *prevp;
245 248
246 if (!strcmp(prop->name, name)) { 249 if (!strcasecmp(prop->name, name)) {
247 void *old_val = prop->value; 250 void *old_val = prop->value;
248 int ret; 251 int ret;
249 252
@@ -397,7 +400,7 @@ static unsigned int psycho_irq_build(struct device_node *dp,
397 400
398static void psycho_irq_trans_init(struct device_node *dp) 401static void psycho_irq_trans_init(struct device_node *dp)
399{ 402{
400 struct linux_prom64_registers *regs; 403 const struct linux_prom64_registers *regs;
401 404
402 dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); 405 dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
403 dp->irq_trans->irq_build = psycho_irq_build; 406 dp->irq_trans->irq_build = psycho_irq_build;
@@ -547,7 +550,7 @@ static unsigned long __sabre_onboard_imap_off[] = {
547static int sabre_device_needs_wsync(struct device_node *dp) 550static int sabre_device_needs_wsync(struct device_node *dp)
548{ 551{
549 struct device_node *parent = dp->parent; 552 struct device_node *parent = dp->parent;
550 char *parent_model, *parent_compat; 553 const char *parent_model, *parent_compat;
551 554
552 /* This traversal up towards the root is meant to 555 /* This traversal up towards the root is meant to
553 * handle two cases: 556 * handle two cases:
@@ -589,7 +592,7 @@ static unsigned int sabre_irq_build(struct device_node *dp,
589{ 592{
590 struct sabre_irq_data *irq_data = _data; 593 struct sabre_irq_data *irq_data = _data;
591 unsigned long controller_regs = irq_data->controller_regs; 594 unsigned long controller_regs = irq_data->controller_regs;
592 struct linux_prom_pci_registers *regs; 595 const struct linux_prom_pci_registers *regs;
593 unsigned long imap, iclr; 596 unsigned long imap, iclr;
594 unsigned long imap_off, iclr_off; 597 unsigned long imap_off, iclr_off;
595 int inofixup = 0; 598 int inofixup = 0;
@@ -639,9 +642,9 @@ static unsigned int sabre_irq_build(struct device_node *dp,
639 642
640static void sabre_irq_trans_init(struct device_node *dp) 643static void sabre_irq_trans_init(struct device_node *dp)
641{ 644{
642 struct linux_prom64_registers *regs; 645 const struct linux_prom64_registers *regs;
643 struct sabre_irq_data *irq_data; 646 struct sabre_irq_data *irq_data;
644 u32 *busrange; 647 const u32 *busrange;
645 648
646 dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); 649 dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
647 dp->irq_trans->irq_build = sabre_irq_build; 650 dp->irq_trans->irq_build = sabre_irq_build;
@@ -795,7 +798,7 @@ static unsigned int schizo_irq_build(struct device_node *dp,
795 798
796static void __schizo_irq_trans_init(struct device_node *dp, int is_tomatillo) 799static void __schizo_irq_trans_init(struct device_node *dp, int is_tomatillo)
797{ 800{
798 struct linux_prom64_registers *regs; 801 const struct linux_prom64_registers *regs;
799 struct schizo_irq_data *irq_data; 802 struct schizo_irq_data *irq_data;
800 803
801 dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); 804 dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
@@ -836,7 +839,7 @@ static unsigned int pci_sun4v_irq_build(struct device_node *dp,
836 839
837static void pci_sun4v_irq_trans_init(struct device_node *dp) 840static void pci_sun4v_irq_trans_init(struct device_node *dp)
838{ 841{
839 struct linux_prom64_registers *regs; 842 const struct linux_prom64_registers *regs;
840 843
841 dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); 844 dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
842 dp->irq_trans->irq_build = pci_sun4v_irq_build; 845 dp->irq_trans->irq_build = pci_sun4v_irq_build;
@@ -940,7 +943,7 @@ static unsigned int sbus_of_build_irq(struct device_node *dp,
940 void *_data) 943 void *_data)
941{ 944{
942 unsigned long reg_base = (unsigned long) _data; 945 unsigned long reg_base = (unsigned long) _data;
943 struct linux_prom_registers *regs; 946 const struct linux_prom_registers *regs;
944 unsigned long imap, iclr; 947 unsigned long imap, iclr;
945 int sbus_slot = 0; 948 int sbus_slot = 0;
946 int sbus_level = 0; 949 int sbus_level = 0;
@@ -994,7 +997,7 @@ static unsigned int sbus_of_build_irq(struct device_node *dp,
994 997
995static void sbus_irq_trans_init(struct device_node *dp) 998static void sbus_irq_trans_init(struct device_node *dp)
996{ 999{
997 struct linux_prom64_registers *regs; 1000 const struct linux_prom64_registers *regs;
998 1001
999 dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); 1002 dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
1000 dp->irq_trans->irq_build = sbus_of_build_irq; 1003 dp->irq_trans->irq_build = sbus_of_build_irq;
@@ -1080,7 +1083,7 @@ static unsigned int sun4v_vdev_irq_build(struct device_node *dp,
1080 1083
1081static void sun4v_vdev_irq_trans_init(struct device_node *dp) 1084static void sun4v_vdev_irq_trans_init(struct device_node *dp)
1082{ 1085{
1083 struct linux_prom64_registers *regs; 1086 const struct linux_prom64_registers *regs;
1084 1087
1085 dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); 1088 dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
1086 dp->irq_trans->irq_build = sun4v_vdev_irq_build; 1089 dp->irq_trans->irq_build = sun4v_vdev_irq_build;
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c
index 14f78fb5e890..3b05428cc909 100644
--- a/arch/sparc64/kernel/sbus.c
+++ b/arch/sparc64/kernel/sbus.c
@@ -26,23 +26,9 @@
26 26
27#define MAP_BASE ((u32)0xc0000000) 27#define MAP_BASE ((u32)0xc0000000)
28 28
29struct sbus_iommu_arena { 29struct sbus_info {
30 unsigned long *map; 30 struct iommu iommu;
31 unsigned int hint; 31 struct strbuf strbuf;
32 unsigned int limit;
33};
34
35struct sbus_iommu {
36 spinlock_t lock;
37
38 struct sbus_iommu_arena arena;
39
40 iopte_t *page_table;
41 unsigned long strbuf_regs;
42 unsigned long iommu_regs;
43 unsigned long sbus_control_reg;
44
45 volatile unsigned long strbuf_flushflag;
46}; 32};
47 33
48/* Offsets from iommu_regs */ 34/* Offsets from iommu_regs */
@@ -58,16 +44,17 @@ struct sbus_iommu {
58 44
59#define IOMMU_DRAM_VALID (1UL << 30UL) 45#define IOMMU_DRAM_VALID (1UL << 30UL)
60 46
61static void __iommu_flushall(struct sbus_iommu *iommu) 47static void __iommu_flushall(struct iommu *iommu)
62{ 48{
63 unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG; 49 unsigned long tag;
64 int entry; 50 int entry;
65 51
52 tag = iommu->iommu_control + (IOMMU_TAGDIAG - IOMMU_CONTROL);
66 for (entry = 0; entry < 16; entry++) { 53 for (entry = 0; entry < 16; entry++) {
67 upa_writeq(0, tag); 54 upa_writeq(0, tag);
68 tag += 8UL; 55 tag += 8UL;
69 } 56 }
70 upa_readq(iommu->sbus_control_reg); 57 upa_readq(iommu->write_complete_reg);
71} 58}
72 59
73/* Offsets from strbuf_regs */ 60/* Offsets from strbuf_regs */
@@ -82,15 +69,14 @@ static void __iommu_flushall(struct sbus_iommu *iommu)
82 69
83#define STRBUF_TAG_VALID 0x02UL 70#define STRBUF_TAG_VALID 0x02UL
84 71
85static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages, int direction) 72static void sbus_strbuf_flush(struct iommu *iommu, struct strbuf *strbuf, u32 base, unsigned long npages, int direction)
86{ 73{
87 unsigned long n; 74 unsigned long n;
88 int limit; 75 int limit;
89 76
90 n = npages; 77 n = npages;
91 while (n--) 78 while (n--)
92 upa_writeq(base + (n << IO_PAGE_SHIFT), 79 upa_writeq(base + (n << IO_PAGE_SHIFT), strbuf->strbuf_pflush);
93 iommu->strbuf_regs + STRBUF_PFLUSH);
94 80
95 /* If the device could not have possibly put dirty data into 81 /* If the device could not have possibly put dirty data into
96 * the streaming cache, no flush-flag synchronization needs 82 * the streaming cache, no flush-flag synchronization needs
@@ -99,15 +85,14 @@ static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long
99 if (direction == SBUS_DMA_TODEVICE) 85 if (direction == SBUS_DMA_TODEVICE)
100 return; 86 return;
101 87
102 iommu->strbuf_flushflag = 0UL; 88 *(strbuf->strbuf_flushflag) = 0UL;
103 89
104 /* Whoopee cushion! */ 90 /* Whoopee cushion! */
105 upa_writeq(__pa(&iommu->strbuf_flushflag), 91 upa_writeq(strbuf->strbuf_flushflag_pa, strbuf->strbuf_fsync);
106 iommu->strbuf_regs + STRBUF_FSYNC); 92 upa_readq(iommu->write_complete_reg);
107 upa_readq(iommu->sbus_control_reg);
108 93
109 limit = 100000; 94 limit = 100000;
110 while (iommu->strbuf_flushflag == 0UL) { 95 while (*(strbuf->strbuf_flushflag) == 0UL) {
111 limit--; 96 limit--;
112 if (!limit) 97 if (!limit)
113 break; 98 break;
@@ -121,9 +106,9 @@ static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long
121} 106}
122 107
123/* Based largely upon the ppc64 iommu allocator. */ 108/* Based largely upon the ppc64 iommu allocator. */
124static long sbus_arena_alloc(struct sbus_iommu *iommu, unsigned long npages) 109static long sbus_arena_alloc(struct iommu *iommu, unsigned long npages)
125{ 110{
126 struct sbus_iommu_arena *arena = &iommu->arena; 111 struct iommu_arena *arena = &iommu->arena;
127 unsigned long n, i, start, end, limit; 112 unsigned long n, i, start, end, limit;
128 int pass; 113 int pass;
129 114
@@ -162,7 +147,7 @@ again:
162 return n; 147 return n;
163} 148}
164 149
165static void sbus_arena_free(struct sbus_iommu_arena *arena, unsigned long base, unsigned long npages) 150static void sbus_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
166{ 151{
167 unsigned long i; 152 unsigned long i;
168 153
@@ -170,7 +155,7 @@ static void sbus_arena_free(struct sbus_iommu_arena *arena, unsigned long base,
170 __clear_bit(i, arena->map); 155 __clear_bit(i, arena->map);
171} 156}
172 157
173static void sbus_iommu_table_init(struct sbus_iommu *iommu, unsigned int tsbsize) 158static void sbus_iommu_table_init(struct iommu *iommu, unsigned int tsbsize)
174{ 159{
175 unsigned long tsbbase, order, sz, num_tsb_entries; 160 unsigned long tsbbase, order, sz, num_tsb_entries;
176 161
@@ -178,13 +163,14 @@ static void sbus_iommu_table_init(struct sbus_iommu *iommu, unsigned int tsbsize
178 163
179 /* Setup initial software IOMMU state. */ 164 /* Setup initial software IOMMU state. */
180 spin_lock_init(&iommu->lock); 165 spin_lock_init(&iommu->lock);
166 iommu->page_table_map_base = MAP_BASE;
181 167
182 /* Allocate and initialize the free area map. */ 168 /* Allocate and initialize the free area map. */
183 sz = num_tsb_entries / 8; 169 sz = num_tsb_entries / 8;
184 sz = (sz + 7UL) & ~7UL; 170 sz = (sz + 7UL) & ~7UL;
185 iommu->arena.map = kzalloc(sz, GFP_KERNEL); 171 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
186 if (!iommu->arena.map) { 172 if (!iommu->arena.map) {
187 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); 173 prom_printf("SBUS_IOMMU: Error, kmalloc(arena.map) failed.\n");
188 prom_halt(); 174 prom_halt();
189 } 175 }
190 iommu->arena.limit = num_tsb_entries; 176 iommu->arena.limit = num_tsb_entries;
@@ -200,7 +186,7 @@ static void sbus_iommu_table_init(struct sbus_iommu *iommu, unsigned int tsbsize
200 memset(iommu->page_table, 0, tsbsize); 186 memset(iommu->page_table, 0, tsbsize);
201} 187}
202 188
203static inline iopte_t *alloc_npages(struct sbus_iommu *iommu, unsigned long npages) 189static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages)
204{ 190{
205 long entry; 191 long entry;
206 192
@@ -211,14 +197,15 @@ static inline iopte_t *alloc_npages(struct sbus_iommu *iommu, unsigned long npag
211 return iommu->page_table + entry; 197 return iommu->page_table + entry;
212} 198}
213 199
214static inline void free_npages(struct sbus_iommu *iommu, dma_addr_t base, unsigned long npages) 200static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages)
215{ 201{
216 sbus_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages); 202 sbus_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
217} 203}
218 204
219void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr) 205void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr)
220{ 206{
221 struct sbus_iommu *iommu; 207 struct sbus_info *info;
208 struct iommu *iommu;
222 iopte_t *iopte; 209 iopte_t *iopte;
223 unsigned long flags, order, first_page; 210 unsigned long flags, order, first_page;
224 void *ret; 211 void *ret;
@@ -234,7 +221,8 @@ void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma
234 return NULL; 221 return NULL;
235 memset((char *)first_page, 0, PAGE_SIZE << order); 222 memset((char *)first_page, 0, PAGE_SIZE << order);
236 223
237 iommu = sdev->bus->iommu; 224 info = sdev->bus->iommu;
225 iommu = &info->iommu;
238 226
239 spin_lock_irqsave(&iommu->lock, flags); 227 spin_lock_irqsave(&iommu->lock, flags);
240 iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT); 228 iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
@@ -245,7 +233,7 @@ void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma
245 return NULL; 233 return NULL;
246 } 234 }
247 235
248 *dvma_addr = (MAP_BASE + 236 *dvma_addr = (iommu->page_table_map_base +
249 ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); 237 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
250 ret = (void *) first_page; 238 ret = (void *) first_page;
251 npages = size >> IO_PAGE_SHIFT; 239 npages = size >> IO_PAGE_SHIFT;
@@ -263,18 +251,20 @@ void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma
263 251
264void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma) 252void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma)
265{ 253{
266 struct sbus_iommu *iommu; 254 struct sbus_info *info;
255 struct iommu *iommu;
267 iopte_t *iopte; 256 iopte_t *iopte;
268 unsigned long flags, order, npages; 257 unsigned long flags, order, npages;
269 258
270 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 259 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
271 iommu = sdev->bus->iommu; 260 info = sdev->bus->iommu;
261 iommu = &info->iommu;
272 iopte = iommu->page_table + 262 iopte = iommu->page_table +
273 ((dvma - MAP_BASE) >> IO_PAGE_SHIFT); 263 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
274 264
275 spin_lock_irqsave(&iommu->lock, flags); 265 spin_lock_irqsave(&iommu->lock, flags);
276 266
277 free_npages(iommu, dvma - MAP_BASE, npages); 267 free_npages(iommu, dvma - iommu->page_table_map_base, npages);
278 268
279 spin_unlock_irqrestore(&iommu->lock, flags); 269 spin_unlock_irqrestore(&iommu->lock, flags);
280 270
@@ -285,14 +275,16 @@ void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_add
285 275
286dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int direction) 276dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int direction)
287{ 277{
288 struct sbus_iommu *iommu; 278 struct sbus_info *info;
279 struct iommu *iommu;
289 iopte_t *base; 280 iopte_t *base;
290 unsigned long flags, npages, oaddr; 281 unsigned long flags, npages, oaddr;
291 unsigned long i, base_paddr; 282 unsigned long i, base_paddr;
292 u32 bus_addr, ret; 283 u32 bus_addr, ret;
293 unsigned long iopte_protection; 284 unsigned long iopte_protection;
294 285
295 iommu = sdev->bus->iommu; 286 info = sdev->bus->iommu;
287 iommu = &info->iommu;
296 288
297 if (unlikely(direction == SBUS_DMA_NONE)) 289 if (unlikely(direction == SBUS_DMA_NONE))
298 BUG(); 290 BUG();
@@ -308,7 +300,7 @@ dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int dire
308 if (unlikely(!base)) 300 if (unlikely(!base))
309 BUG(); 301 BUG();
310 302
311 bus_addr = (MAP_BASE + 303 bus_addr = (iommu->page_table_map_base +
312 ((base - iommu->page_table) << IO_PAGE_SHIFT)); 304 ((base - iommu->page_table) << IO_PAGE_SHIFT));
313 ret = bus_addr | (oaddr & ~IO_PAGE_MASK); 305 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
314 base_paddr = __pa(oaddr & IO_PAGE_MASK); 306 base_paddr = __pa(oaddr & IO_PAGE_MASK);
@@ -325,7 +317,9 @@ dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int dire
325 317
326void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction) 318void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction)
327{ 319{
328 struct sbus_iommu *iommu = sdev->bus->iommu; 320 struct sbus_info *info = sdev->bus->iommu;
321 struct iommu *iommu = &info->iommu;
322 struct strbuf *strbuf = &info->strbuf;
329 iopte_t *base; 323 iopte_t *base;
330 unsigned long flags, npages, i; 324 unsigned long flags, npages, i;
331 325
@@ -335,15 +329,15 @@ void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, in
335 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 329 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
336 npages >>= IO_PAGE_SHIFT; 330 npages >>= IO_PAGE_SHIFT;
337 base = iommu->page_table + 331 base = iommu->page_table +
338 ((bus_addr - MAP_BASE) >> IO_PAGE_SHIFT); 332 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
339 333
340 bus_addr &= IO_PAGE_MASK; 334 bus_addr &= IO_PAGE_MASK;
341 335
342 spin_lock_irqsave(&iommu->lock, flags); 336 spin_lock_irqsave(&iommu->lock, flags);
343 sbus_strbuf_flush(iommu, bus_addr, npages, direction); 337 sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
344 for (i = 0; i < npages; i++) 338 for (i = 0; i < npages; i++)
345 iopte_val(base[i]) = 0UL; 339 iopte_val(base[i]) = 0UL;
346 free_npages(iommu, bus_addr - MAP_BASE, npages); 340 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
347 spin_unlock_irqrestore(&iommu->lock, flags); 341 spin_unlock_irqrestore(&iommu->lock, flags);
348} 342}
349 343
@@ -425,7 +419,8 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
425 419
426int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction) 420int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
427{ 421{
428 struct sbus_iommu *iommu; 422 struct sbus_info *info;
423 struct iommu *iommu;
429 unsigned long flags, npages, iopte_protection; 424 unsigned long flags, npages, iopte_protection;
430 iopte_t *base; 425 iopte_t *base;
431 u32 dma_base; 426 u32 dma_base;
@@ -442,7 +437,8 @@ int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, i
442 return 1; 437 return 1;
443 } 438 }
444 439
445 iommu = sdev->bus->iommu; 440 info = sdev->bus->iommu;
441 iommu = &info->iommu;
446 442
447 if (unlikely(direction == SBUS_DMA_NONE)) 443 if (unlikely(direction == SBUS_DMA_NONE))
448 BUG(); 444 BUG();
@@ -456,7 +452,7 @@ int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, i
456 if (unlikely(base == NULL)) 452 if (unlikely(base == NULL))
457 BUG(); 453 BUG();
458 454
459 dma_base = MAP_BASE + 455 dma_base = iommu->page_table_map_base +
460 ((base - iommu->page_table) << IO_PAGE_SHIFT); 456 ((base - iommu->page_table) << IO_PAGE_SHIFT);
461 457
462 /* Normalize DVMA addresses. */ 458 /* Normalize DVMA addresses. */
@@ -485,7 +481,9 @@ int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, i
485 481
486void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction) 482void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
487{ 483{
488 struct sbus_iommu *iommu; 484 struct sbus_info *info;
485 struct iommu *iommu;
486 struct strbuf *strbuf;
489 iopte_t *base; 487 iopte_t *base;
490 unsigned long flags, i, npages; 488 unsigned long flags, i, npages;
491 u32 bus_addr; 489 u32 bus_addr;
@@ -493,7 +491,9 @@ void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems
493 if (unlikely(direction == SBUS_DMA_NONE)) 491 if (unlikely(direction == SBUS_DMA_NONE))
494 BUG(); 492 BUG();
495 493
496 iommu = sdev->bus->iommu; 494 info = sdev->bus->iommu;
495 iommu = &info->iommu;
496 strbuf = &info->strbuf;
497 497
498 bus_addr = sglist->dma_address & IO_PAGE_MASK; 498 bus_addr = sglist->dma_address & IO_PAGE_MASK;
499 499
@@ -505,29 +505,33 @@ void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems
505 bus_addr) >> IO_PAGE_SHIFT; 505 bus_addr) >> IO_PAGE_SHIFT;
506 506
507 base = iommu->page_table + 507 base = iommu->page_table +
508 ((bus_addr - MAP_BASE) >> IO_PAGE_SHIFT); 508 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
509 509
510 spin_lock_irqsave(&iommu->lock, flags); 510 spin_lock_irqsave(&iommu->lock, flags);
511 sbus_strbuf_flush(iommu, bus_addr, npages, direction); 511 sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
512 for (i = 0; i < npages; i++) 512 for (i = 0; i < npages; i++)
513 iopte_val(base[i]) = 0UL; 513 iopte_val(base[i]) = 0UL;
514 free_npages(iommu, bus_addr - MAP_BASE, npages); 514 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
515 spin_unlock_irqrestore(&iommu->lock, flags); 515 spin_unlock_irqrestore(&iommu->lock, flags);
516} 516}
517 517
518void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction) 518void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction)
519{ 519{
520 struct sbus_iommu *iommu; 520 struct sbus_info *info;
521 struct iommu *iommu;
522 struct strbuf *strbuf;
521 unsigned long flags, npages; 523 unsigned long flags, npages;
522 524
523 iommu = sdev->bus->iommu; 525 info = sdev->bus->iommu;
526 iommu = &info->iommu;
527 strbuf = &info->strbuf;
524 528
525 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 529 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
526 npages >>= IO_PAGE_SHIFT; 530 npages >>= IO_PAGE_SHIFT;
527 bus_addr &= IO_PAGE_MASK; 531 bus_addr &= IO_PAGE_MASK;
528 532
529 spin_lock_irqsave(&iommu->lock, flags); 533 spin_lock_irqsave(&iommu->lock, flags);
530 sbus_strbuf_flush(iommu, bus_addr, npages, direction); 534 sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
531 spin_unlock_irqrestore(&iommu->lock, flags); 535 spin_unlock_irqrestore(&iommu->lock, flags);
532} 536}
533 537
@@ -537,11 +541,15 @@ void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t base, siz
537 541
538void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction) 542void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
539{ 543{
540 struct sbus_iommu *iommu; 544 struct sbus_info *info;
545 struct iommu *iommu;
546 struct strbuf *strbuf;
541 unsigned long flags, npages, i; 547 unsigned long flags, npages, i;
542 u32 bus_addr; 548 u32 bus_addr;
543 549
544 iommu = sdev->bus->iommu; 550 info = sdev->bus->iommu;
551 iommu = &info->iommu;
552 strbuf = &info->strbuf;
545 553
546 bus_addr = sglist[0].dma_address & IO_PAGE_MASK; 554 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
547 for (i = 0; i < nelems; i++) { 555 for (i = 0; i < nelems; i++) {
@@ -553,7 +561,7 @@ void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sglist,
553 - bus_addr) >> IO_PAGE_SHIFT; 561 - bus_addr) >> IO_PAGE_SHIFT;
554 562
555 spin_lock_irqsave(&iommu->lock, flags); 563 spin_lock_irqsave(&iommu->lock, flags);
556 sbus_strbuf_flush(iommu, bus_addr, npages, direction); 564 sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
557 spin_unlock_irqrestore(&iommu->lock, flags); 565 spin_unlock_irqrestore(&iommu->lock, flags);
558} 566}
559 567
@@ -564,12 +572,13 @@ void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg,
564/* Enable 64-bit DVMA mode for the given device. */ 572/* Enable 64-bit DVMA mode for the given device. */
565void sbus_set_sbus64(struct sbus_dev *sdev, int bursts) 573void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
566{ 574{
567 struct sbus_iommu *iommu = sdev->bus->iommu; 575 struct sbus_info *info = sdev->bus->iommu;
576 struct iommu *iommu = &info->iommu;
568 int slot = sdev->slot; 577 int slot = sdev->slot;
569 unsigned long cfg_reg; 578 unsigned long cfg_reg;
570 u64 val; 579 u64 val;
571 580
572 cfg_reg = iommu->sbus_control_reg; 581 cfg_reg = iommu->write_complete_reg;
573 switch (slot) { 582 switch (slot) {
574 case 0: 583 case 0:
575 cfg_reg += 0x20UL; 584 cfg_reg += 0x20UL;
@@ -704,8 +713,9 @@ static unsigned long sysio_imap_to_iclr(unsigned long imap)
704unsigned int sbus_build_irq(void *buscookie, unsigned int ino) 713unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
705{ 714{
706 struct sbus_bus *sbus = (struct sbus_bus *)buscookie; 715 struct sbus_bus *sbus = (struct sbus_bus *)buscookie;
707 struct sbus_iommu *iommu = sbus->iommu; 716 struct sbus_info *info = sbus->iommu;
708 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL; 717 struct iommu *iommu = &info->iommu;
718 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
709 unsigned long imap, iclr; 719 unsigned long imap, iclr;
710 int sbus_level = 0; 720 int sbus_level = 0;
711 721
@@ -766,8 +776,9 @@ unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
766static irqreturn_t sysio_ue_handler(int irq, void *dev_id) 776static irqreturn_t sysio_ue_handler(int irq, void *dev_id)
767{ 777{
768 struct sbus_bus *sbus = dev_id; 778 struct sbus_bus *sbus = dev_id;
769 struct sbus_iommu *iommu = sbus->iommu; 779 struct sbus_info *info = sbus->iommu;
770 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL; 780 struct iommu *iommu = &info->iommu;
781 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
771 unsigned long afsr_reg, afar_reg; 782 unsigned long afsr_reg, afar_reg;
772 unsigned long afsr, afar, error_bits; 783 unsigned long afsr, afar, error_bits;
773 int reported; 784 int reported;
@@ -838,8 +849,9 @@ static irqreturn_t sysio_ue_handler(int irq, void *dev_id)
838static irqreturn_t sysio_ce_handler(int irq, void *dev_id) 849static irqreturn_t sysio_ce_handler(int irq, void *dev_id)
839{ 850{
840 struct sbus_bus *sbus = dev_id; 851 struct sbus_bus *sbus = dev_id;
841 struct sbus_iommu *iommu = sbus->iommu; 852 struct sbus_info *info = sbus->iommu;
842 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL; 853 struct iommu *iommu = &info->iommu;
854 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
843 unsigned long afsr_reg, afar_reg; 855 unsigned long afsr_reg, afar_reg;
844 unsigned long afsr, afar, error_bits; 856 unsigned long afsr, afar, error_bits;
845 int reported; 857 int reported;
@@ -915,12 +927,13 @@ static irqreturn_t sysio_ce_handler(int irq, void *dev_id)
915static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id) 927static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id)
916{ 928{
917 struct sbus_bus *sbus = dev_id; 929 struct sbus_bus *sbus = dev_id;
918 struct sbus_iommu *iommu = sbus->iommu; 930 struct sbus_info *info = sbus->iommu;
931 struct iommu *iommu = &info->iommu;
919 unsigned long afsr_reg, afar_reg, reg_base; 932 unsigned long afsr_reg, afar_reg, reg_base;
920 unsigned long afsr, afar, error_bits; 933 unsigned long afsr, afar, error_bits;
921 int reported; 934 int reported;
922 935
923 reg_base = iommu->sbus_control_reg - 0x2000UL; 936 reg_base = iommu->write_complete_reg - 0x2000UL;
924 afsr_reg = reg_base + SYSIO_SBUS_AFSR; 937 afsr_reg = reg_base + SYSIO_SBUS_AFSR;
925 afar_reg = reg_base + SYSIO_SBUS_AFAR; 938 afar_reg = reg_base + SYSIO_SBUS_AFAR;
926 939
@@ -982,8 +995,9 @@ static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id)
982 995
983static void __init sysio_register_error_handlers(struct sbus_bus *sbus) 996static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
984{ 997{
985 struct sbus_iommu *iommu = sbus->iommu; 998 struct sbus_info *info = sbus->iommu;
986 unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL; 999 struct iommu *iommu = &info->iommu;
1000 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
987 unsigned int irq; 1001 unsigned int irq;
988 u64 control; 1002 u64 control;
989 1003
@@ -1017,18 +1031,20 @@ static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
1017 SYSIO_ECNTRL_CEEN), 1031 SYSIO_ECNTRL_CEEN),
1018 reg_base + ECC_CONTROL); 1032 reg_base + ECC_CONTROL);
1019 1033
1020 control = upa_readq(iommu->sbus_control_reg); 1034 control = upa_readq(iommu->write_complete_reg);
1021 control |= 0x100UL; /* SBUS Error Interrupt Enable */ 1035 control |= 0x100UL; /* SBUS Error Interrupt Enable */
1022 upa_writeq(control, iommu->sbus_control_reg); 1036 upa_writeq(control, iommu->write_complete_reg);
1023} 1037}
1024 1038
1025/* Boot time initialization. */ 1039/* Boot time initialization. */
1026static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus) 1040static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
1027{ 1041{
1028 struct linux_prom64_registers *pr; 1042 const struct linux_prom64_registers *pr;
1029 struct device_node *dp; 1043 struct device_node *dp;
1030 struct sbus_iommu *iommu; 1044 struct sbus_info *info;
1031 unsigned long regs; 1045 struct iommu *iommu;
1046 struct strbuf *strbuf;
1047 unsigned long regs, reg_base;
1032 u64 control; 1048 u64 control;
1033 int i; 1049 int i;
1034 1050
@@ -1043,33 +1059,42 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
1043 } 1059 }
1044 regs = pr->phys_addr; 1060 regs = pr->phys_addr;
1045 1061
1046 iommu = kmalloc(sizeof(*iommu) + SMP_CACHE_BYTES, GFP_ATOMIC); 1062 info = kzalloc(sizeof(*info), GFP_ATOMIC);
1047 if (iommu == NULL) { 1063 if (info == NULL) {
1048 prom_printf("sbus_iommu_init: Fatal error, kmalloc(iommu) failed\n"); 1064 prom_printf("sbus_iommu_init: Fatal error, "
1065 "kmalloc(info) failed\n");
1049 prom_halt(); 1066 prom_halt();
1050 } 1067 }
1051 1068
1052 /* Align on E$ line boundary. */ 1069 iommu = &info->iommu;
1053 iommu = (struct sbus_iommu *) 1070 strbuf = &info->strbuf;
1054 (((unsigned long)iommu + (SMP_CACHE_BYTES - 1UL)) &
1055 ~(SMP_CACHE_BYTES - 1UL));
1056 1071
1057 memset(iommu, 0, sizeof(*iommu)); 1072 reg_base = regs + SYSIO_IOMMUREG_BASE;
1073 iommu->iommu_control = reg_base + IOMMU_CONTROL;
1074 iommu->iommu_tsbbase = reg_base + IOMMU_TSBBASE;
1075 iommu->iommu_flush = reg_base + IOMMU_FLUSH;
1058 1076
1059 /* Setup spinlock. */ 1077 reg_base = regs + SYSIO_STRBUFREG_BASE;
1060 spin_lock_init(&iommu->lock); 1078 strbuf->strbuf_control = reg_base + STRBUF_CONTROL;
1079 strbuf->strbuf_pflush = reg_base + STRBUF_PFLUSH;
1080 strbuf->strbuf_fsync = reg_base + STRBUF_FSYNC;
1061 1081
1062 /* Init register offsets. */ 1082 strbuf->strbuf_enabled = 1;
1063 iommu->iommu_regs = regs + SYSIO_IOMMUREG_BASE; 1083
1064 iommu->strbuf_regs = regs + SYSIO_STRBUFREG_BASE; 1084 strbuf->strbuf_flushflag = (volatile unsigned long *)
1085 ((((unsigned long)&strbuf->__flushflag_buf[0])
1086 + 63UL)
1087 & ~63UL);
1088 strbuf->strbuf_flushflag_pa = (unsigned long)
1089 __pa(strbuf->strbuf_flushflag);
1065 1090
1066 /* The SYSIO SBUS control register is used for dummy reads 1091 /* The SYSIO SBUS control register is used for dummy reads
1067 * in order to ensure write completion. 1092 * in order to ensure write completion.
1068 */ 1093 */
1069 iommu->sbus_control_reg = regs + 0x2000UL; 1094 iommu->write_complete_reg = regs + 0x2000UL;
1070 1095
1071 /* Link into SYSIO software state. */ 1096 /* Link into SYSIO software state. */
1072 sbus->iommu = iommu; 1097 sbus->iommu = info;
1073 1098
1074 printk("SYSIO: UPA portID %x, at %016lx\n", 1099 printk("SYSIO: UPA portID %x, at %016lx\n",
1075 sbus->portid, regs); 1100 sbus->portid, regs);
@@ -1077,40 +1102,44 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
1077 /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */ 1102 /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
1078 sbus_iommu_table_init(iommu, IO_TSB_SIZE); 1103 sbus_iommu_table_init(iommu, IO_TSB_SIZE);
1079 1104
1080 control = upa_readq(iommu->iommu_regs + IOMMU_CONTROL); 1105 control = upa_readq(iommu->iommu_control);
1081 control = ((7UL << 16UL) | 1106 control = ((7UL << 16UL) |
1082 (0UL << 2UL) | 1107 (0UL << 2UL) |
1083 (1UL << 1UL) | 1108 (1UL << 1UL) |
1084 (1UL << 0UL)); 1109 (1UL << 0UL));
1085 upa_writeq(control, iommu->iommu_regs + IOMMU_CONTROL); 1110 upa_writeq(control, iommu->iommu_control);
1086 1111
1087 /* Clean out any cruft in the IOMMU using 1112 /* Clean out any cruft in the IOMMU using
1088 * diagnostic accesses. 1113 * diagnostic accesses.
1089 */ 1114 */
1090 for (i = 0; i < 16; i++) { 1115 for (i = 0; i < 16; i++) {
1091 unsigned long dram = iommu->iommu_regs + IOMMU_DRAMDIAG; 1116 unsigned long dram, tag;
1092 unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG; 1117
1118 dram = iommu->iommu_control + (IOMMU_DRAMDIAG - IOMMU_CONTROL);
1119 tag = iommu->iommu_control + (IOMMU_TAGDIAG - IOMMU_CONTROL);
1093 1120
1094 dram += (unsigned long)i * 8UL; 1121 dram += (unsigned long)i * 8UL;
1095 tag += (unsigned long)i * 8UL; 1122 tag += (unsigned long)i * 8UL;
1096 upa_writeq(0, dram); 1123 upa_writeq(0, dram);
1097 upa_writeq(0, tag); 1124 upa_writeq(0, tag);
1098 } 1125 }
1099 upa_readq(iommu->sbus_control_reg); 1126 upa_readq(iommu->write_complete_reg);
1100 1127
1101 /* Give the TSB to SYSIO. */ 1128 /* Give the TSB to SYSIO. */
1102 upa_writeq(__pa(iommu->page_table), iommu->iommu_regs + IOMMU_TSBBASE); 1129 upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase);
1103 1130
1104 /* Setup streaming buffer, DE=1 SB_EN=1 */ 1131 /* Setup streaming buffer, DE=1 SB_EN=1 */
1105 control = (1UL << 1UL) | (1UL << 0UL); 1132 control = (1UL << 1UL) | (1UL << 0UL);
1106 upa_writeq(control, iommu->strbuf_regs + STRBUF_CONTROL); 1133 upa_writeq(control, strbuf->strbuf_control);
1107 1134
1108 /* Clear out the tags using diagnostics. */ 1135 /* Clear out the tags using diagnostics. */
1109 for (i = 0; i < 16; i++) { 1136 for (i = 0; i < 16; i++) {
1110 unsigned long ptag, ltag; 1137 unsigned long ptag, ltag;
1111 1138
1112 ptag = iommu->strbuf_regs + STRBUF_PTAGDIAG; 1139 ptag = strbuf->strbuf_control +
1113 ltag = iommu->strbuf_regs + STRBUF_LTAGDIAG; 1140 (STRBUF_PTAGDIAG - STRBUF_CONTROL);
1141 ltag = strbuf->strbuf_control +
1142 (STRBUF_LTAGDIAG - STRBUF_CONTROL);
1114 ptag += (unsigned long)i * 8UL; 1143 ptag += (unsigned long)i * 8UL;
1115 ltag += (unsigned long)i * 8UL; 1144 ltag += (unsigned long)i * 8UL;
1116 1145
@@ -1119,9 +1148,9 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
1119 } 1148 }
1120 1149
1121 /* Enable DVMA arbitration for all devices/slots. */ 1150 /* Enable DVMA arbitration for all devices/slots. */
1122 control = upa_readq(iommu->sbus_control_reg); 1151 control = upa_readq(iommu->write_complete_reg);
1123 control |= 0x3fUL; 1152 control |= 0x3fUL;
1124 upa_writeq(control, iommu->sbus_control_reg); 1153 upa_writeq(control, iommu->write_complete_reg);
1125 1154
1126 /* Now some Xfire specific grot... */ 1155 /* Now some Xfire specific grot... */
1127 if (this_is_starfire) 1156 if (this_is_starfire)
@@ -1133,7 +1162,7 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
1133void sbus_fill_device_irq(struct sbus_dev *sdev) 1162void sbus_fill_device_irq(struct sbus_dev *sdev)
1134{ 1163{
1135 struct device_node *dp = of_find_node_by_phandle(sdev->prom_node); 1164 struct device_node *dp = of_find_node_by_phandle(sdev->prom_node);
1136 struct linux_prom_irqs *irqs; 1165 const struct linux_prom_irqs *irqs;
1137 1166
1138 irqs = of_get_property(dp, "interrupts", NULL); 1167 irqs = of_get_property(dp, "interrupts", NULL);
1139 if (!irqs) { 1168 if (!irqs) {
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index fc99f7b8012f..d4f0a70f4845 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -45,7 +45,7 @@
45extern void calibrate_delay(void); 45extern void calibrate_delay(void);
46 46
47/* Please don't make this stuff initdata!!! --DaveM */ 47/* Please don't make this stuff initdata!!! --DaveM */
48static unsigned char boot_cpu_id; 48unsigned char boot_cpu_id;
49 49
50cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; 50cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
51cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE; 51cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
@@ -81,8 +81,6 @@ void __init smp_store_cpu_info(int id)
81 struct device_node *dp; 81 struct device_node *dp;
82 int def; 82 int def;
83 83
84 /* multiplier and counter set by
85 smp_setup_percpu_timer() */
86 cpu_data(id).udelay_val = loops_per_jiffy; 84 cpu_data(id).udelay_val = loops_per_jiffy;
87 85
88 cpu_find_by_mid(id, &dp); 86 cpu_find_by_mid(id, &dp);
@@ -125,7 +123,7 @@ void __init smp_store_cpu_info(int id)
125 cpu_data(id).ecache_size, cpu_data(id).ecache_line_size); 123 cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
126} 124}
127 125
128static void smp_setup_percpu_timer(void); 126extern void setup_sparc64_timer(void);
129 127
130static volatile unsigned long callin_flag = 0; 128static volatile unsigned long callin_flag = 0;
131 129
@@ -140,7 +138,7 @@ void __init smp_callin(void)
140 138
141 __flush_tlb_all(); 139 __flush_tlb_all();
142 140
143 smp_setup_percpu_timer(); 141 setup_sparc64_timer();
144 142
145 if (cheetah_pcache_forced_on) 143 if (cheetah_pcache_forced_on)
146 cheetah_enable_pcache(); 144 cheetah_enable_pcache();
@@ -177,8 +175,6 @@ void cpu_panic(void)
177 panic("SMP bolixed\n"); 175 panic("SMP bolixed\n");
178} 176}
179 177
180static unsigned long current_tick_offset __read_mostly;
181
182/* This tick register synchronization scheme is taken entirely from 178/* This tick register synchronization scheme is taken entirely from
183 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. 179 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
184 * 180 *
@@ -261,7 +257,7 @@ void smp_synchronize_tick_client(void)
261 } else 257 } else
262 adj = -delta; 258 adj = -delta;
263 259
264 tick_ops->add_tick(adj, current_tick_offset); 260 tick_ops->add_tick(adj);
265 } 261 }
266#if DEBUG_TICK_SYNC 262#if DEBUG_TICK_SYNC
267 t[i].rt = rt; 263 t[i].rt = rt;
@@ -1180,117 +1176,15 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1180 preempt_enable(); 1176 preempt_enable();
1181} 1177}
1182 1178
1183#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
1184#define prof_counter(__cpu) cpu_data(__cpu).counter
1185
1186void smp_percpu_timer_interrupt(struct pt_regs *regs)
1187{
1188 unsigned long compare, tick, pstate;
1189 int cpu = smp_processor_id();
1190 int user = user_mode(regs);
1191 struct pt_regs *old_regs;
1192
1193 /*
1194 * Check for level 14 softint.
1195 */
1196 {
1197 unsigned long tick_mask = tick_ops->softint_mask;
1198
1199 if (!(get_softint() & tick_mask)) {
1200 extern void handler_irq(int, struct pt_regs *);
1201
1202 handler_irq(14, regs);
1203 return;
1204 }
1205 clear_softint(tick_mask);
1206 }
1207
1208 old_regs = set_irq_regs(regs);
1209 do {
1210 profile_tick(CPU_PROFILING);
1211 if (!--prof_counter(cpu)) {
1212 irq_enter();
1213
1214 if (cpu == boot_cpu_id) {
1215 kstat_this_cpu.irqs[0]++;
1216 timer_tick_interrupt(regs);
1217 }
1218
1219 update_process_times(user);
1220
1221 irq_exit();
1222
1223 prof_counter(cpu) = prof_multiplier(cpu);
1224 }
1225
1226 /* Guarantee that the following sequences execute
1227 * uninterrupted.
1228 */
1229 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1230 "wrpr %0, %1, %%pstate"
1231 : "=r" (pstate)
1232 : "i" (PSTATE_IE));
1233
1234 compare = tick_ops->add_compare(current_tick_offset);
1235 tick = tick_ops->get_tick();
1236
1237 /* Restore PSTATE_IE. */
1238 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1239 : /* no outputs */
1240 : "r" (pstate));
1241 } while (time_after_eq(tick, compare));
1242 set_irq_regs(old_regs);
1243}
1244
1245static void __init smp_setup_percpu_timer(void)
1246{
1247 int cpu = smp_processor_id();
1248 unsigned long pstate;
1249
1250 prof_counter(cpu) = prof_multiplier(cpu) = 1;
1251
1252 /* Guarantee that the following sequences execute
1253 * uninterrupted.
1254 */
1255 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1256 "wrpr %0, %1, %%pstate"
1257 : "=r" (pstate)
1258 : "i" (PSTATE_IE));
1259
1260 tick_ops->init_tick(current_tick_offset);
1261
1262 /* Restore PSTATE_IE. */
1263 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1264 : /* no outputs */
1265 : "r" (pstate));
1266}
1267
1268void __init smp_tick_init(void) 1179void __init smp_tick_init(void)
1269{ 1180{
1270 boot_cpu_id = hard_smp_processor_id(); 1181 boot_cpu_id = hard_smp_processor_id();
1271 current_tick_offset = timer_tick_offset;
1272
1273 prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
1274} 1182}
1275 1183
1276/* /proc/profile writes can call this, don't __init it please. */ 1184/* /proc/profile writes can call this, don't __init it please. */
1277static DEFINE_SPINLOCK(prof_setup_lock);
1278
1279int setup_profiling_timer(unsigned int multiplier) 1185int setup_profiling_timer(unsigned int multiplier)
1280{ 1186{
1281 unsigned long flags; 1187 return -EINVAL;
1282 int i;
1283
1284 if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
1285 return -EINVAL;
1286
1287 spin_lock_irqsave(&prof_setup_lock, flags);
1288 for_each_possible_cpu(i)
1289 prof_multiplier(i) = multiplier;
1290 current_tick_offset = (timer_tick_offset / multiplier);
1291 spin_unlock_irqrestore(&prof_setup_lock, flags);
1292
1293 return 0;
1294} 1188}
1295 1189
1296static void __init smp_tune_scheduling(void) 1190static void __init smp_tune_scheduling(void)
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index beffc82a1e85..d00f51a5683f 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -212,7 +212,6 @@ EXPORT_SYMBOL(insl);
212#ifdef CONFIG_PCI 212#ifdef CONFIG_PCI
213EXPORT_SYMBOL(ebus_chain); 213EXPORT_SYMBOL(ebus_chain);
214EXPORT_SYMBOL(isa_chain); 214EXPORT_SYMBOL(isa_chain);
215EXPORT_SYMBOL(pci_memspace_mask);
216EXPORT_SYMBOL(pci_alloc_consistent); 215EXPORT_SYMBOL(pci_alloc_consistent);
217EXPORT_SYMBOL(pci_free_consistent); 216EXPORT_SYMBOL(pci_free_consistent);
218EXPORT_SYMBOL(pci_map_single); 217EXPORT_SYMBOL(pci_map_single);
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index f84da4f1b706..259063f41f95 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -31,6 +31,9 @@
31#include <linux/profile.h> 31#include <linux/profile.h>
32#include <linux/miscdevice.h> 32#include <linux/miscdevice.h>
33#include <linux/rtc.h> 33#include <linux/rtc.h>
34#include <linux/kernel_stat.h>
35#include <linux/clockchips.h>
36#include <linux/clocksource.h>
34 37
35#include <asm/oplib.h> 38#include <asm/oplib.h>
36#include <asm/mostek.h> 39#include <asm/mostek.h>
@@ -60,6 +63,7 @@ static void __iomem *mstk48t59_regs;
60static int set_rtc_mmss(unsigned long); 63static int set_rtc_mmss(unsigned long);
61 64
62#define TICK_PRIV_BIT (1UL << 63) 65#define TICK_PRIV_BIT (1UL << 63)
66#define TICKCMP_IRQ_BIT (1UL << 63)
63 67
64#ifdef CONFIG_SMP 68#ifdef CONFIG_SMP
65unsigned long profile_pc(struct pt_regs *regs) 69unsigned long profile_pc(struct pt_regs *regs)
@@ -93,21 +97,22 @@ static void tick_disable_protection(void)
93 : "g2"); 97 : "g2");
94} 98}
95 99
96static void tick_init_tick(unsigned long offset) 100static void tick_disable_irq(void)
97{ 101{
98 tick_disable_protection();
99
100 __asm__ __volatile__( 102 __asm__ __volatile__(
101 " rd %%tick, %%g1\n"
102 " andn %%g1, %1, %%g1\n"
103 " ba,pt %%xcc, 1f\n" 103 " ba,pt %%xcc, 1f\n"
104 " add %%g1, %0, %%g1\n" 104 " nop\n"
105 " .align 64\n" 105 " .align 64\n"
106 "1: wr %%g1, 0x0, %%tick_cmpr\n" 106 "1: wr %0, 0x0, %%tick_cmpr\n"
107 " rd %%tick_cmpr, %%g0" 107 " rd %%tick_cmpr, %%g0"
108 : /* no outputs */ 108 : /* no outputs */
109 : "r" (offset), "r" (TICK_PRIV_BIT) 109 : "r" (TICKCMP_IRQ_BIT));
110 : "g1"); 110}
111
112static void tick_init_tick(void)
113{
114 tick_disable_protection();
115 tick_disable_irq();
111} 116}
112 117
113static unsigned long tick_get_tick(void) 118static unsigned long tick_get_tick(void)
@@ -121,20 +126,14 @@ static unsigned long tick_get_tick(void)
121 return ret & ~TICK_PRIV_BIT; 126 return ret & ~TICK_PRIV_BIT;
122} 127}
123 128
124static unsigned long tick_get_compare(void) 129static int tick_add_compare(unsigned long adj)
125{ 130{
126 unsigned long ret; 131 unsigned long orig_tick, new_tick, new_compare;
127 132
128 __asm__ __volatile__("rd %%tick_cmpr, %0\n\t" 133 __asm__ __volatile__("rd %%tick, %0"
129 "mov %0, %0" 134 : "=r" (orig_tick));
130 : "=r" (ret));
131 135
132 return ret; 136 orig_tick &= ~TICKCMP_IRQ_BIT;
133}
134
135static unsigned long tick_add_compare(unsigned long adj)
136{
137 unsigned long new_compare;
138 137
139 /* Workaround for Spitfire Errata (#54 I think??), I discovered 138 /* Workaround for Spitfire Errata (#54 I think??), I discovered
140 * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch 139 * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
@@ -145,44 +144,41 @@ static unsigned long tick_add_compare(unsigned long adj)
145 * at the start of an I-cache line, and perform a dummy 144 * at the start of an I-cache line, and perform a dummy
146 * read back from %tick_cmpr right after writing to it. -DaveM 145 * read back from %tick_cmpr right after writing to it. -DaveM
147 */ 146 */
148 __asm__ __volatile__("rd %%tick_cmpr, %0\n\t" 147 __asm__ __volatile__("ba,pt %%xcc, 1f\n\t"
149 "ba,pt %%xcc, 1f\n\t" 148 " add %1, %2, %0\n\t"
150 " add %0, %1, %0\n\t"
151 ".align 64\n" 149 ".align 64\n"
152 "1:\n\t" 150 "1:\n\t"
153 "wr %0, 0, %%tick_cmpr\n\t" 151 "wr %0, 0, %%tick_cmpr\n\t"
154 "rd %%tick_cmpr, %%g0" 152 "rd %%tick_cmpr, %%g0\n\t"
155 : "=&r" (new_compare) 153 : "=r" (new_compare)
156 : "r" (adj)); 154 : "r" (orig_tick), "r" (adj));
157 155
158 return new_compare; 156 __asm__ __volatile__("rd %%tick, %0"
157 : "=r" (new_tick));
158 new_tick &= ~TICKCMP_IRQ_BIT;
159
160 return ((long)(new_tick - (orig_tick+adj))) > 0L;
159} 161}
160 162
161static unsigned long tick_add_tick(unsigned long adj, unsigned long offset) 163static unsigned long tick_add_tick(unsigned long adj)
162{ 164{
163 unsigned long new_tick, tmp; 165 unsigned long new_tick;
164 166
165 /* Also need to handle Blackbird bug here too. */ 167 /* Also need to handle Blackbird bug here too. */
166 __asm__ __volatile__("rd %%tick, %0\n\t" 168 __asm__ __volatile__("rd %%tick, %0\n\t"
167 "add %0, %2, %0\n\t" 169 "add %0, %1, %0\n\t"
168 "wrpr %0, 0, %%tick\n\t" 170 "wrpr %0, 0, %%tick\n\t"
169 "andn %0, %4, %1\n\t" 171 : "=&r" (new_tick)
170 "ba,pt %%xcc, 1f\n\t" 172 : "r" (adj));
171 " add %1, %3, %1\n\t"
172 ".align 64\n"
173 "1:\n\t"
174 "wr %1, 0, %%tick_cmpr\n\t"
175 "rd %%tick_cmpr, %%g0"
176 : "=&r" (new_tick), "=&r" (tmp)
177 : "r" (adj), "r" (offset), "r" (TICK_PRIV_BIT));
178 173
179 return new_tick; 174 return new_tick;
180} 175}
181 176
182static struct sparc64_tick_ops tick_operations __read_mostly = { 177static struct sparc64_tick_ops tick_operations __read_mostly = {
178 .name = "tick",
183 .init_tick = tick_init_tick, 179 .init_tick = tick_init_tick,
180 .disable_irq = tick_disable_irq,
184 .get_tick = tick_get_tick, 181 .get_tick = tick_get_tick,
185 .get_compare = tick_get_compare,
186 .add_tick = tick_add_tick, 182 .add_tick = tick_add_tick,
187 .add_compare = tick_add_compare, 183 .add_compare = tick_add_compare,
188 .softint_mask = 1UL << 0, 184 .softint_mask = 1UL << 0,
@@ -190,7 +186,15 @@ static struct sparc64_tick_ops tick_operations __read_mostly = {
190 186
191struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations; 187struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations;
192 188
193static void stick_init_tick(unsigned long offset) 189static void stick_disable_irq(void)
190{
191 __asm__ __volatile__(
192 "wr %0, 0x0, %%asr25"
193 : /* no outputs */
194 : "r" (TICKCMP_IRQ_BIT));
195}
196
197static void stick_init_tick(void)
194{ 198{
195 /* Writes to the %tick and %stick register are not 199 /* Writes to the %tick and %stick register are not
196 * allowed on sun4v. The Hypervisor controls that 200 * allowed on sun4v. The Hypervisor controls that
@@ -198,6 +202,7 @@ static void stick_init_tick(unsigned long offset)
198 */ 202 */
199 if (tlb_type != hypervisor) { 203 if (tlb_type != hypervisor) {
200 tick_disable_protection(); 204 tick_disable_protection();
205 tick_disable_irq();
201 206
202 /* Let the user get at STICK too. */ 207 /* Let the user get at STICK too. */
203 __asm__ __volatile__( 208 __asm__ __volatile__(
@@ -209,14 +214,7 @@ static void stick_init_tick(unsigned long offset)
209 : "g1", "g2"); 214 : "g1", "g2");
210 } 215 }
211 216
212 __asm__ __volatile__( 217 stick_disable_irq();
213 " rd %%asr24, %%g1\n"
214 " andn %%g1, %1, %%g1\n"
215 " add %%g1, %0, %%g1\n"
216 " wr %%g1, 0x0, %%asr25"
217 : /* no outputs */
218 : "r" (offset), "r" (TICK_PRIV_BIT)
219 : "g1");
220} 218}
221 219
222static unsigned long stick_get_tick(void) 220static unsigned long stick_get_tick(void)
@@ -229,49 +227,43 @@ static unsigned long stick_get_tick(void)
229 return ret & ~TICK_PRIV_BIT; 227 return ret & ~TICK_PRIV_BIT;
230} 228}
231 229
232static unsigned long stick_get_compare(void) 230static unsigned long stick_add_tick(unsigned long adj)
233{ 231{
234 unsigned long ret; 232 unsigned long new_tick;
235
236 __asm__ __volatile__("rd %%asr25, %0"
237 : "=r" (ret));
238
239 return ret;
240}
241
242static unsigned long stick_add_tick(unsigned long adj, unsigned long offset)
243{
244 unsigned long new_tick, tmp;
245 233
246 __asm__ __volatile__("rd %%asr24, %0\n\t" 234 __asm__ __volatile__("rd %%asr24, %0\n\t"
247 "add %0, %2, %0\n\t" 235 "add %0, %1, %0\n\t"
248 "wr %0, 0, %%asr24\n\t" 236 "wr %0, 0, %%asr24\n\t"
249 "andn %0, %4, %1\n\t" 237 : "=&r" (new_tick)
250 "add %1, %3, %1\n\t" 238 : "r" (adj));
251 "wr %1, 0, %%asr25"
252 : "=&r" (new_tick), "=&r" (tmp)
253 : "r" (adj), "r" (offset), "r" (TICK_PRIV_BIT));
254 239
255 return new_tick; 240 return new_tick;
256} 241}
257 242
258static unsigned long stick_add_compare(unsigned long adj) 243static int stick_add_compare(unsigned long adj)
259{ 244{
260 unsigned long new_compare; 245 unsigned long orig_tick, new_tick;
261 246
262 __asm__ __volatile__("rd %%asr25, %0\n\t" 247 __asm__ __volatile__("rd %%asr24, %0"
263 "add %0, %1, %0\n\t" 248 : "=r" (orig_tick));
264 "wr %0, 0, %%asr25" 249 orig_tick &= ~TICKCMP_IRQ_BIT;
265 : "=&r" (new_compare) 250
266 : "r" (adj)); 251 __asm__ __volatile__("wr %0, 0, %%asr25"
252 : /* no outputs */
253 : "r" (orig_tick + adj));
254
255 __asm__ __volatile__("rd %%asr24, %0"
256 : "=r" (new_tick));
257 new_tick &= ~TICKCMP_IRQ_BIT;
267 258
268 return new_compare; 259 return ((long)(new_tick - (orig_tick+adj))) > 0L;
269} 260}
270 261
271static struct sparc64_tick_ops stick_operations __read_mostly = { 262static struct sparc64_tick_ops stick_operations __read_mostly = {
263 .name = "stick",
272 .init_tick = stick_init_tick, 264 .init_tick = stick_init_tick,
265 .disable_irq = stick_disable_irq,
273 .get_tick = stick_get_tick, 266 .get_tick = stick_get_tick,
274 .get_compare = stick_get_compare,
275 .add_tick = stick_add_tick, 267 .add_tick = stick_add_tick,
276 .add_compare = stick_add_compare, 268 .add_compare = stick_add_compare,
277 .softint_mask = 1UL << 16, 269 .softint_mask = 1UL << 16,
@@ -320,20 +312,6 @@ static unsigned long __hbird_read_stick(void)
320 return ret; 312 return ret;
321} 313}
322 314
323static unsigned long __hbird_read_compare(void)
324{
325 unsigned long low, high;
326 unsigned long addr = HBIRD_STICKCMP_ADDR;
327
328 __asm__ __volatile__("ldxa [%2] %3, %0\n\t"
329 "add %2, 0x8, %2\n\t"
330 "ldxa [%2] %3, %1"
331 : "=&r" (low), "=&r" (high), "=&r" (addr)
332 : "i" (ASI_PHYS_BYPASS_EC_E), "2" (addr));
333
334 return (high << 32UL) | low;
335}
336
337static void __hbird_write_stick(unsigned long val) 315static void __hbird_write_stick(unsigned long val)
338{ 316{
339 unsigned long low = (val & 0xffffffffUL); 317 unsigned long low = (val & 0xffffffffUL);
@@ -364,10 +342,13 @@ static void __hbird_write_compare(unsigned long val)
364 "i" (ASI_PHYS_BYPASS_EC_E)); 342 "i" (ASI_PHYS_BYPASS_EC_E));
365} 343}
366 344
367static void hbtick_init_tick(unsigned long offset) 345static void hbtick_disable_irq(void)
368{ 346{
369 unsigned long val; 347 __hbird_write_compare(TICKCMP_IRQ_BIT);
348}
370 349
350static void hbtick_init_tick(void)
351{
371 tick_disable_protection(); 352 tick_disable_protection();
372 353
373 /* XXX This seems to be necessary to 'jumpstart' Hummingbird 354 /* XXX This seems to be necessary to 'jumpstart' Hummingbird
@@ -377,8 +358,7 @@ static void hbtick_init_tick(unsigned long offset)
377 */ 358 */
378 __hbird_write_stick(__hbird_read_stick()); 359 __hbird_write_stick(__hbird_read_stick());
379 360
380 val = __hbird_read_stick() & ~TICK_PRIV_BIT; 361 hbtick_disable_irq();
381 __hbird_write_compare(val + offset);
382} 362}
383 363
384static unsigned long hbtick_get_tick(void) 364static unsigned long hbtick_get_tick(void)
@@ -386,122 +366,95 @@ static unsigned long hbtick_get_tick(void)
386 return __hbird_read_stick() & ~TICK_PRIV_BIT; 366 return __hbird_read_stick() & ~TICK_PRIV_BIT;
387} 367}
388 368
389static unsigned long hbtick_get_compare(void) 369static unsigned long hbtick_add_tick(unsigned long adj)
390{
391 return __hbird_read_compare();
392}
393
394static unsigned long hbtick_add_tick(unsigned long adj, unsigned long offset)
395{ 370{
396 unsigned long val; 371 unsigned long val;
397 372
398 val = __hbird_read_stick() + adj; 373 val = __hbird_read_stick() + adj;
399 __hbird_write_stick(val); 374 __hbird_write_stick(val);
400 375
401 val &= ~TICK_PRIV_BIT;
402 __hbird_write_compare(val + offset);
403
404 return val; 376 return val;
405} 377}
406 378
407static unsigned long hbtick_add_compare(unsigned long adj) 379static int hbtick_add_compare(unsigned long adj)
408{ 380{
409 unsigned long val = __hbird_read_compare() + adj; 381 unsigned long val = __hbird_read_stick();
382 unsigned long val2;
410 383
411 val &= ~TICK_PRIV_BIT; 384 val &= ~TICKCMP_IRQ_BIT;
385 val += adj;
412 __hbird_write_compare(val); 386 __hbird_write_compare(val);
413 387
414 return val; 388 val2 = __hbird_read_stick() & ~TICKCMP_IRQ_BIT;
389
390 return ((long)(val2 - val)) > 0L;
415} 391}
416 392
417static struct sparc64_tick_ops hbtick_operations __read_mostly = { 393static struct sparc64_tick_ops hbtick_operations __read_mostly = {
394 .name = "hbtick",
418 .init_tick = hbtick_init_tick, 395 .init_tick = hbtick_init_tick,
396 .disable_irq = hbtick_disable_irq,
419 .get_tick = hbtick_get_tick, 397 .get_tick = hbtick_get_tick,
420 .get_compare = hbtick_get_compare,
421 .add_tick = hbtick_add_tick, 398 .add_tick = hbtick_add_tick,
422 .add_compare = hbtick_add_compare, 399 .add_compare = hbtick_add_compare,
423 .softint_mask = 1UL << 0, 400 .softint_mask = 1UL << 0,
424}; 401};
425 402
426/* timer_interrupt() needs to keep up the real-time clock,
427 * as well as call the "do_timer()" routine every clocktick
428 *
429 * NOTE: On SUN5 systems the ticker interrupt comes in using 2
430 * interrupts, one at level14 and one with softint bit 0.
431 */
432unsigned long timer_tick_offset __read_mostly;
433
434static unsigned long timer_ticks_per_nsec_quotient __read_mostly; 403static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
435 404
436#define TICK_SIZE (tick_nsec / 1000) 405#define TICK_SIZE (tick_nsec / 1000)
437 406
438static inline void timer_check_rtc(void) 407#define USEC_AFTER 500000
439{ 408#define USEC_BEFORE 500000
440 /* last time the cmos clock got updated */
441 static long last_rtc_update;
442
443 /* Determine when to update the Mostek clock. */
444 if (ntp_synced() &&
445 xtime.tv_sec > last_rtc_update + 660 &&
446 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
447 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
448 if (set_rtc_mmss(xtime.tv_sec) == 0)
449 last_rtc_update = xtime.tv_sec;
450 else
451 last_rtc_update = xtime.tv_sec - 600;
452 /* do it again in 60 s */
453 }
454}
455 409
456irqreturn_t timer_interrupt(int irq, void *dev_id) 410static void sync_cmos_clock(unsigned long dummy);
457{
458 unsigned long ticks, compare, pstate;
459 411
460 write_seqlock(&xtime_lock); 412static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
461 413
462 do { 414static void sync_cmos_clock(unsigned long dummy)
463#ifndef CONFIG_SMP 415{
464 profile_tick(CPU_PROFILING); 416 struct timeval now, next;
465 update_process_times(user_mode(get_irq_regs())); 417 int fail = 1;
466#endif
467 do_timer(1);
468 418
469 /* Guarantee that the following sequences execute 419 /*
470 * uninterrupted. 420 * If we have an externally synchronized Linux clock, then update
421 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
422 * called as close as possible to 500 ms before the new second starts.
423 * This code is run on a timer. If the clock is set, that timer
424 * may not expire at the correct time. Thus, we adjust...
425 */
426 if (!ntp_synced())
427 /*
428 * Not synced, exit, do not restart a timer (if one is
429 * running, let it run out).
471 */ 430 */
472 __asm__ __volatile__("rdpr %%pstate, %0\n\t" 431 return;
473 "wrpr %0, %1, %%pstate"
474 : "=r" (pstate)
475 : "i" (PSTATE_IE));
476 432
477 compare = tick_ops->add_compare(timer_tick_offset); 433 do_gettimeofday(&now);
478 ticks = tick_ops->get_tick(); 434 if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
435 now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
436 fail = set_rtc_mmss(now.tv_sec);
479 437
480 /* Restore PSTATE_IE. */ 438 next.tv_usec = USEC_AFTER - now.tv_usec;
481 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 439 if (next.tv_usec <= 0)
482 : /* no outputs */ 440 next.tv_usec += USEC_PER_SEC;
483 : "r" (pstate));
484 } while (time_after_eq(ticks, compare));
485 441
486 timer_check_rtc(); 442 if (!fail)
443 next.tv_sec = 659;
444 else
445 next.tv_sec = 0;
487 446
488 write_sequnlock(&xtime_lock); 447 if (next.tv_usec >= USEC_PER_SEC) {
489 448 next.tv_sec++;
490 return IRQ_HANDLED; 449 next.tv_usec -= USEC_PER_SEC;
450 }
451 mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
491} 452}
492 453
493#ifdef CONFIG_SMP 454void notify_arch_cmos_timer(void)
494void timer_tick_interrupt(struct pt_regs *regs)
495{ 455{
496 write_seqlock(&xtime_lock); 456 mod_timer(&sync_cmos_timer, jiffies + 1);
497
498 do_timer(1);
499
500 timer_check_rtc();
501
502 write_sequnlock(&xtime_lock);
503} 457}
504#endif
505 458
506/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */ 459/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */
507static void __init kick_start_clock(void) 460static void __init kick_start_clock(void)
@@ -751,7 +704,7 @@ retry:
751 return -EOPNOTSUPP; 704 return -EOPNOTSUPP;
752} 705}
753 706
754static int __init clock_model_matches(char *model) 707static int __init clock_model_matches(const char *model)
755{ 708{
756 if (strcmp(model, "mk48t02") && 709 if (strcmp(model, "mk48t02") &&
757 strcmp(model, "mk48t08") && 710 strcmp(model, "mk48t08") &&
@@ -768,7 +721,7 @@ static int __init clock_model_matches(char *model)
768static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match) 721static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match)
769{ 722{
770 struct device_node *dp = op->node; 723 struct device_node *dp = op->node;
771 char *model = of_get_property(dp, "model", NULL); 724 const char *model = of_get_property(dp, "model", NULL);
772 unsigned long size, flags; 725 unsigned long size, flags;
773 void __iomem *regs; 726 void __iomem *regs;
774 727
@@ -900,7 +853,6 @@ static unsigned long sparc64_init_timers(void)
900 prop = of_find_property(dp, "stick-frequency", NULL); 853 prop = of_find_property(dp, "stick-frequency", NULL);
901 } 854 }
902 clock = *(unsigned int *) prop->value; 855 clock = *(unsigned int *) prop->value;
903 timer_tick_offset = clock / HZ;
904 856
905#ifdef CONFIG_SMP 857#ifdef CONFIG_SMP
906 smp_tick_init(); 858 smp_tick_init();
@@ -909,26 +861,6 @@ static unsigned long sparc64_init_timers(void)
909 return clock; 861 return clock;
910} 862}
911 863
912static void sparc64_start_timers(void)
913{
914 unsigned long pstate;
915
916 /* Guarantee that the following sequences execute
917 * uninterrupted.
918 */
919 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
920 "wrpr %0, %1, %%pstate"
921 : "=r" (pstate)
922 : "i" (PSTATE_IE));
923
924 tick_ops->init_tick(timer_tick_offset);
925
926 /* Restore PSTATE_IE. */
927 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
928 : /* no outputs */
929 : "r" (pstate));
930}
931
932struct freq_table { 864struct freq_table {
933 unsigned long clock_tick_ref; 865 unsigned long clock_tick_ref;
934 unsigned int ref_freq; 866 unsigned int ref_freq;
@@ -975,29 +907,148 @@ static struct notifier_block sparc64_cpufreq_notifier_block = {
975 907
976#endif /* CONFIG_CPU_FREQ */ 908#endif /* CONFIG_CPU_FREQ */
977 909
978static struct time_interpolator sparc64_cpu_interpolator = { 910static int sparc64_next_event(unsigned long delta,
979 .source = TIME_SOURCE_CPU, 911 struct clock_event_device *evt)
980 .shift = 16, 912{
981 .mask = 0xffffffffffffffffLL 913 return tick_ops->add_compare(delta) ? -ETIME : 0;
914}
915
916static void sparc64_timer_setup(enum clock_event_mode mode,
917 struct clock_event_device *evt)
918{
919 switch (mode) {
920 case CLOCK_EVT_MODE_ONESHOT:
921 break;
922
923 case CLOCK_EVT_MODE_SHUTDOWN:
924 tick_ops->disable_irq();
925 break;
926
927 case CLOCK_EVT_MODE_PERIODIC:
928 case CLOCK_EVT_MODE_UNUSED:
929 WARN_ON(1);
930 break;
931 };
932}
933
934static struct clock_event_device sparc64_clockevent = {
935 .features = CLOCK_EVT_FEAT_ONESHOT,
936 .set_mode = sparc64_timer_setup,
937 .set_next_event = sparc64_next_event,
938 .rating = 100,
939 .shift = 30,
940 .irq = -1,
982}; 941};
942static DEFINE_PER_CPU(struct clock_event_device, sparc64_events);
983 943
984/* The quotient formula is taken from the IA64 port. */ 944void timer_interrupt(int irq, struct pt_regs *regs)
985#define SPARC64_NSEC_PER_CYC_SHIFT 10UL
986void __init time_init(void)
987{ 945{
988 unsigned long clock = sparc64_init_timers(); 946 struct pt_regs *old_regs = set_irq_regs(regs);
947 unsigned long tick_mask = tick_ops->softint_mask;
948 int cpu = smp_processor_id();
949 struct clock_event_device *evt = &per_cpu(sparc64_events, cpu);
950
951 clear_softint(tick_mask);
952
953 irq_enter();
989 954
990 sparc64_cpu_interpolator.frequency = clock; 955 kstat_this_cpu.irqs[0]++;
991 register_time_interpolator(&sparc64_cpu_interpolator);
992 956
993 /* Now that the interpolator is registered, it is 957 if (unlikely(!evt->event_handler)) {
994 * safe to start the timer ticking. 958 printk(KERN_WARNING
959 "Spurious SPARC64 timer interrupt on cpu %d\n", cpu);
960 } else
961 evt->event_handler(evt);
962
963 irq_exit();
964
965 set_irq_regs(old_regs);
966}
967
968void __devinit setup_sparc64_timer(void)
969{
970 struct clock_event_device *sevt;
971 unsigned long pstate;
972
973 /* Guarantee that the following sequences execute
974 * uninterrupted.
995 */ 975 */
996 sparc64_start_timers(); 976 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
977 "wrpr %0, %1, %%pstate"
978 : "=r" (pstate)
979 : "i" (PSTATE_IE));
980
981 tick_ops->init_tick();
982
983 /* Restore PSTATE_IE. */
984 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
985 : /* no outputs */
986 : "r" (pstate));
987
988 sevt = &__get_cpu_var(sparc64_events);
989
990 memcpy(sevt, &sparc64_clockevent, sizeof(*sevt));
991 sevt->cpumask = cpumask_of_cpu(smp_processor_id());
992
993 clockevents_register_device(sevt);
994}
995
996#define SPARC64_NSEC_PER_CYC_SHIFT 32UL
997
998static struct clocksource clocksource_tick = {
999 .rating = 100,
1000 .mask = CLOCKSOURCE_MASK(64),
1001 .shift = 16,
1002 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
1003};
1004
1005static void __init setup_clockevent_multiplier(unsigned long hz)
1006{
1007 unsigned long mult, shift = 32;
1008
1009 while (1) {
1010 mult = div_sc(hz, NSEC_PER_SEC, shift);
1011 if (mult && (mult >> 32UL) == 0UL)
1012 break;
1013
1014 shift--;
1015 }
1016
1017 sparc64_clockevent.shift = shift;
1018 sparc64_clockevent.mult = mult;
1019}
1020
1021void __init time_init(void)
1022{
1023 unsigned long clock = sparc64_init_timers();
997 1024
998 timer_ticks_per_nsec_quotient = 1025 timer_ticks_per_nsec_quotient =
999 (((NSEC_PER_SEC << SPARC64_NSEC_PER_CYC_SHIFT) + 1026 clocksource_hz2mult(clock, SPARC64_NSEC_PER_CYC_SHIFT);
1000 (clock / 2)) / clock); 1027
1028 clocksource_tick.name = tick_ops->name;
1029 clocksource_tick.mult =
1030 clocksource_hz2mult(clock,
1031 clocksource_tick.shift);
1032 clocksource_tick.read = tick_ops->get_tick;
1033
1034 printk("clocksource: mult[%x] shift[%d]\n",
1035 clocksource_tick.mult, clocksource_tick.shift);
1036
1037 clocksource_register(&clocksource_tick);
1038
1039 sparc64_clockevent.name = tick_ops->name;
1040
1041 setup_clockevent_multiplier(clock);
1042
1043 sparc64_clockevent.max_delta_ns =
1044 clockevent_delta2ns(0x7fffffffffffffff, &sparc64_clockevent);
1045 sparc64_clockevent.min_delta_ns =
1046 clockevent_delta2ns(0xF, &sparc64_clockevent);
1047
1048 printk("clockevent: mult[%lx] shift[%d]\n",
1049 sparc64_clockevent.mult, sparc64_clockevent.shift);
1050
1051 setup_sparc64_timer();
1001 1052
1002#ifdef CONFIG_CPU_FREQ 1053#ifdef CONFIG_CPU_FREQ
1003 cpufreq_register_notifier(&sparc64_cpufreq_notifier_block, 1054 cpufreq_register_notifier(&sparc64_cpufreq_notifier_block,
@@ -1126,10 +1177,6 @@ static int set_rtc_mmss(unsigned long nowtime)
1126#define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */ 1177#define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */
1127static unsigned char mini_rtc_status; /* bitmapped status byte. */ 1178static unsigned char mini_rtc_status; /* bitmapped status byte. */
1128 1179
1129/* months start at 0 now */
1130static unsigned char days_in_mo[] =
1131{31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
1132
1133#define FEBRUARY 2 1180#define FEBRUARY 2
1134#define STARTOFTIME 1970 1181#define STARTOFTIME 1970
1135#define SECDAY 86400L 1182#define SECDAY 86400L
@@ -1278,8 +1325,7 @@ static int mini_rtc_ioctl(struct inode *inode, struct file *file,
1278 1325
1279 case RTC_SET_TIME: /* Set the RTC */ 1326 case RTC_SET_TIME: /* Set the RTC */
1280 { 1327 {
1281 int year; 1328 int year, days;
1282 unsigned char leap_yr;
1283 1329
1284 if (!capable(CAP_SYS_TIME)) 1330 if (!capable(CAP_SYS_TIME))
1285 return -EACCES; 1331 return -EACCES;
@@ -1288,14 +1334,14 @@ static int mini_rtc_ioctl(struct inode *inode, struct file *file,
1288 return -EFAULT; 1334 return -EFAULT;
1289 1335
1290 year = wtime.tm_year + 1900; 1336 year = wtime.tm_year + 1900;
1291 leap_yr = ((!(year % 4) && (year % 100)) || 1337 days = month_days[wtime.tm_mon] +
1292 !(year % 400)); 1338 ((wtime.tm_mon == 1) && leapyear(year));
1293 1339
1294 if ((wtime.tm_mon < 0 || wtime.tm_mon > 11) || (wtime.tm_mday < 1)) 1340 if ((wtime.tm_mon < 0 || wtime.tm_mon > 11) ||
1341 (wtime.tm_mday < 1))
1295 return -EINVAL; 1342 return -EINVAL;
1296 1343
1297 if (wtime.tm_mday < 0 || wtime.tm_mday > 1344 if (wtime.tm_mday < 0 || wtime.tm_mday > days)
1298 (days_in_mo[wtime.tm_mon] + ((wtime.tm_mon == 1) && leap_yr)))
1299 return -EINVAL; 1345 return -EINVAL;
1300 1346
1301 if (wtime.tm_hour < 0 || wtime.tm_hour >= 24 || 1347 if (wtime.tm_hour < 0 || wtime.tm_hour >= 24 ||
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S
index d7d2a8bdc66e..7575aa371da8 100644
--- a/arch/sparc64/kernel/ttable.S
+++ b/arch/sparc64/kernel/ttable.S
@@ -60,11 +60,7 @@ tl0_irq4: BTRAP(0x44)
60tl0_irq5: TRAP_IRQ(handler_irq, 5) 60tl0_irq5: TRAP_IRQ(handler_irq, 5)
61tl0_irq6: BTRAP(0x46) BTRAP(0x47) BTRAP(0x48) BTRAP(0x49) 61tl0_irq6: BTRAP(0x46) BTRAP(0x47) BTRAP(0x48) BTRAP(0x49)
62tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d) 62tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d)
63#ifndef CONFIG_SMP 63tl0_irq14: TRAP_IRQ(timer_interrupt, 14)
64tl0_irq14: TRAP_IRQ(timer_irq, 14)
65#else
66tl0_irq14: TICK_SMP_IRQ
67#endif
68tl0_irq15: TRAP_IRQ(handler_irq, 15) 64tl0_irq15: TRAP_IRQ(handler_irq, 15)
69tl0_resv050: BTRAP(0x50) BTRAP(0x51) BTRAP(0x52) BTRAP(0x53) BTRAP(0x54) BTRAP(0x55) 65tl0_resv050: BTRAP(0x50) BTRAP(0x51) BTRAP(0x52) BTRAP(0x53) BTRAP(0x54) BTRAP(0x55)
70tl0_resv056: BTRAP(0x56) BTRAP(0x57) BTRAP(0x58) BTRAP(0x59) BTRAP(0x5a) BTRAP(0x5b) 66tl0_resv056: BTRAP(0x56) BTRAP(0x57) BTRAP(0x58) BTRAP(0x59) BTRAP(0x5a) BTRAP(0x5b)
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index f146071a4b2a..cafadcbcdf38 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -122,24 +122,19 @@ static void __init read_obp_memory(const char *property,
122 size = 0UL; 122 size = 0UL;
123 base = new_base; 123 base = new_base;
124 } 124 }
125 regs[i].phys_addr = base; 125 if (size == 0UL) {
126 regs[i].reg_size = size; 126 /* If it is empty, simply get rid of it.
127 } 127 * This simplifies the logic of the other
128 128 * functions that process these arrays.
129 for (i = 0; i < ents; i++) { 129 */
130 if (regs[i].reg_size == 0UL) { 130 memmove(&regs[i], &regs[i + 1],
131 int j; 131 (ents - i - 1) * sizeof(regs[0]));
132
133 for (j = i; j < ents - 1; j++) {
134 regs[j].phys_addr =
135 regs[j+1].phys_addr;
136 regs[j].reg_size =
137 regs[j+1].reg_size;
138 }
139
140 ents--;
141 i--; 132 i--;
133 ents--;
134 continue;
142 } 135 }
136 regs[i].phys_addr = base;
137 regs[i].reg_size = size;
143 } 138 }
144 139
145 *num_ents = ents; 140 *num_ents = ents;
@@ -154,15 +149,6 @@ unsigned long *sparc64_valid_addr_bitmap __read_mostly;
154unsigned long kern_base __read_mostly; 149unsigned long kern_base __read_mostly;
155unsigned long kern_size __read_mostly; 150unsigned long kern_size __read_mostly;
156 151
157/* get_new_mmu_context() uses "cache + 1". */
158DEFINE_SPINLOCK(ctx_alloc_lock);
159unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
160#define CTX_BMAP_SLOTS (1UL << (CTX_NR_BITS - 6))
161unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
162
163/* References to special section boundaries */
164extern char _start[], _end[];
165
166/* Initial ramdisk setup */ 152/* Initial ramdisk setup */
167extern unsigned long sparc_ramdisk_image64; 153extern unsigned long sparc_ramdisk_image64;
168extern unsigned int sparc_ramdisk_image; 154extern unsigned int sparc_ramdisk_image;
@@ -406,19 +392,70 @@ void __kprobes flush_icache_range(unsigned long start, unsigned long end)
406 if (tlb_type == spitfire) { 392 if (tlb_type == spitfire) {
407 unsigned long kaddr; 393 unsigned long kaddr;
408 394
409 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) 395 /* This code only runs on Spitfire cpus so this is
410 __flush_icache_page(__get_phys(kaddr)); 396 * why we can assume _PAGE_PADDR_4U.
397 */
398 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
399 unsigned long paddr, mask = _PAGE_PADDR_4U;
400
401 if (kaddr >= PAGE_OFFSET)
402 paddr = kaddr & mask;
403 else {
404 pgd_t *pgdp = pgd_offset_k(kaddr);
405 pud_t *pudp = pud_offset(pgdp, kaddr);
406 pmd_t *pmdp = pmd_offset(pudp, kaddr);
407 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
408
409 paddr = pte_val(*ptep) & mask;
410 }
411 __flush_icache_page(paddr);
412 }
411 } 413 }
412} 414}
413 415
414void show_mem(void) 416void show_mem(void)
415{ 417{
416 printk("Mem-info:\n"); 418 unsigned long total = 0, reserved = 0;
419 unsigned long shared = 0, cached = 0;
420 pg_data_t *pgdat;
421
422 printk(KERN_INFO "Mem-info:\n");
417 show_free_areas(); 423 show_free_areas();
418 printk("Free swap: %6ldkB\n", 424 printk(KERN_INFO "Free swap: %6ldkB\n",
419 nr_swap_pages << (PAGE_SHIFT-10)); 425 nr_swap_pages << (PAGE_SHIFT-10));
420 printk("%ld pages of RAM\n", num_physpages); 426 for_each_online_pgdat(pgdat) {
421 printk("%lu free pages\n", nr_free_pages()); 427 unsigned long i, flags;
428
429 pgdat_resize_lock(pgdat, &flags);
430 for (i = 0; i < pgdat->node_spanned_pages; i++) {
431 struct page *page = pgdat_page_nr(pgdat, i);
432 total++;
433 if (PageReserved(page))
434 reserved++;
435 else if (PageSwapCache(page))
436 cached++;
437 else if (page_count(page))
438 shared += page_count(page) - 1;
439 }
440 pgdat_resize_unlock(pgdat, &flags);
441 }
442
443 printk(KERN_INFO "%lu pages of RAM\n", total);
444 printk(KERN_INFO "%lu reserved pages\n", reserved);
445 printk(KERN_INFO "%lu pages shared\n", shared);
446 printk(KERN_INFO "%lu pages swap cached\n", cached);
447
448 printk(KERN_INFO "%lu pages dirty\n",
449 global_page_state(NR_FILE_DIRTY));
450 printk(KERN_INFO "%lu pages writeback\n",
451 global_page_state(NR_WRITEBACK));
452 printk(KERN_INFO "%lu pages mapped\n",
453 global_page_state(NR_FILE_MAPPED));
454 printk(KERN_INFO "%lu pages slab\n",
455 global_page_state(NR_SLAB_RECLAIMABLE) +
456 global_page_state(NR_SLAB_UNRECLAIMABLE));
457 printk(KERN_INFO "%lu pages pagetables\n",
458 global_page_state(NR_PAGETABLE));
422} 459}
423 460
424void mmu_info(struct seq_file *m) 461void mmu_info(struct seq_file *m)
@@ -658,6 +695,13 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
658} 695}
659#endif /* DCACHE_ALIASING_POSSIBLE */ 696#endif /* DCACHE_ALIASING_POSSIBLE */
660 697
698/* get_new_mmu_context() uses "cache + 1". */
699DEFINE_SPINLOCK(ctx_alloc_lock);
700unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
701#define MAX_CTX_NR (1UL << CTX_NR_BITS)
702#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
703DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
704
661/* Caller does TLB context flushing on local CPU if necessary. 705/* Caller does TLB context flushing on local CPU if necessary.
662 * The caller also ensures that CTX_VALID(mm->context) is false. 706 * The caller also ensures that CTX_VALID(mm->context) is false.
663 * 707 *
@@ -717,95 +761,6 @@ out:
717 smp_new_mmu_context_version(); 761 smp_new_mmu_context_version();
718} 762}
719 763
720void sparc_ultra_dump_itlb(void)
721{
722 int slot;
723
724 if (tlb_type == spitfire) {
725 printk ("Contents of itlb: ");
726 for (slot = 0; slot < 14; slot++) printk (" ");
727 printk ("%2x:%016lx,%016lx\n",
728 0,
729 spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));
730 for (slot = 1; slot < 64; slot+=3) {
731 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
732 slot,
733 spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot),
734 slot+1,
735 spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1),
736 slot+2,
737 spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2));
738 }
739 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
740 printk ("Contents of itlb0:\n");
741 for (slot = 0; slot < 16; slot+=2) {
742 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
743 slot,
744 cheetah_get_litlb_tag(slot), cheetah_get_litlb_data(slot),
745 slot+1,
746 cheetah_get_litlb_tag(slot+1), cheetah_get_litlb_data(slot+1));
747 }
748 printk ("Contents of itlb2:\n");
749 for (slot = 0; slot < 128; slot+=2) {
750 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
751 slot,
752 cheetah_get_itlb_tag(slot), cheetah_get_itlb_data(slot),
753 slot+1,
754 cheetah_get_itlb_tag(slot+1), cheetah_get_itlb_data(slot+1));
755 }
756 }
757}
758
759void sparc_ultra_dump_dtlb(void)
760{
761 int slot;
762
763 if (tlb_type == spitfire) {
764 printk ("Contents of dtlb: ");
765 for (slot = 0; slot < 14; slot++) printk (" ");
766 printk ("%2x:%016lx,%016lx\n", 0,
767 spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0));
768 for (slot = 1; slot < 64; slot+=3) {
769 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
770 slot,
771 spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot),
772 slot+1,
773 spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1),
774 slot+2,
775 spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2));
776 }
777 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
778 printk ("Contents of dtlb0:\n");
779 for (slot = 0; slot < 16; slot+=2) {
780 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
781 slot,
782 cheetah_get_ldtlb_tag(slot), cheetah_get_ldtlb_data(slot),
783 slot+1,
784 cheetah_get_ldtlb_tag(slot+1), cheetah_get_ldtlb_data(slot+1));
785 }
786 printk ("Contents of dtlb2:\n");
787 for (slot = 0; slot < 512; slot+=2) {
788 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
789 slot,
790 cheetah_get_dtlb_tag(slot, 2), cheetah_get_dtlb_data(slot, 2),
791 slot+1,
792 cheetah_get_dtlb_tag(slot+1, 2), cheetah_get_dtlb_data(slot+1, 2));
793 }
794 if (tlb_type == cheetah_plus) {
795 printk ("Contents of dtlb3:\n");
796 for (slot = 0; slot < 512; slot+=2) {
797 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
798 slot,
799 cheetah_get_dtlb_tag(slot, 3), cheetah_get_dtlb_data(slot, 3),
800 slot+1,
801 cheetah_get_dtlb_tag(slot+1, 3), cheetah_get_dtlb_data(slot+1, 3));
802 }
803 }
804 }
805}
806
807extern unsigned long cmdline_memory_size;
808
809/* Find a free area for the bootmem map, avoiding the kernel image 764/* Find a free area for the bootmem map, avoiding the kernel image
810 * and the initial ramdisk. 765 * and the initial ramdisk.
811 */ 766 */
@@ -815,8 +770,8 @@ static unsigned long __init choose_bootmap_pfn(unsigned long start_pfn,
815 unsigned long avoid_start, avoid_end, bootmap_size; 770 unsigned long avoid_start, avoid_end, bootmap_size;
816 int i; 771 int i;
817 772
818 bootmap_size = ((end_pfn - start_pfn) + 7) / 8; 773 bootmap_size = bootmem_bootmap_pages(end_pfn - start_pfn);
819 bootmap_size = ALIGN(bootmap_size, sizeof(long)); 774 bootmap_size <<= PAGE_SHIFT;
820 775
821 avoid_start = avoid_end = 0; 776 avoid_start = avoid_end = 0;
822#ifdef CONFIG_BLK_DEV_INITRD 777#ifdef CONFIG_BLK_DEV_INITRD
@@ -983,6 +938,20 @@ static void __init trim_pavail(unsigned long *cur_size_p,
983 } 938 }
984} 939}
985 940
941/* About pages_avail, this is the value we will use to calculate
942 * the zholes_size[] argument given to free_area_init_node(). The
943 * page allocator uses this to calculate nr_kernel_pages,
944 * nr_all_pages and zone->present_pages. On NUMA it is used
945 * to calculate zone->min_unmapped_pages and zone->min_slab_pages.
946 *
947 * So this number should really be set to what the page allocator
948 * actually ends up with. This means:
949 * 1) It should include bootmem map pages, we'll release those.
950 * 2) It should not include the kernel image, except for the
951 * __init sections which we will also release.
952 * 3) It should include the initrd image, since we'll release
953 * that too.
954 */
986static unsigned long __init bootmem_init(unsigned long *pages_avail, 955static unsigned long __init bootmem_init(unsigned long *pages_avail,
987 unsigned long phys_base) 956 unsigned long phys_base)
988{ 957{
@@ -1069,7 +1038,6 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail,
1069 initrd_start, initrd_end); 1038 initrd_start, initrd_end);
1070#endif 1039#endif
1071 reserve_bootmem(initrd_start, size); 1040 reserve_bootmem(initrd_start, size);
1072 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
1073 1041
1074 initrd_start += PAGE_OFFSET; 1042 initrd_start += PAGE_OFFSET;
1075 initrd_end += PAGE_OFFSET; 1043 initrd_end += PAGE_OFFSET;
@@ -1082,6 +1050,11 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail,
1082 reserve_bootmem(kern_base, kern_size); 1050 reserve_bootmem(kern_base, kern_size);
1083 *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT; 1051 *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT;
1084 1052
1053 /* Add back in the initmem pages. */
1054 size = ((unsigned long)(__init_end) & PAGE_MASK) -
1055 PAGE_ALIGN((unsigned long)__init_begin);
1056 *pages_avail += size >> PAGE_SHIFT;
1057
1085 /* Reserve the bootmem map. We do not account for it 1058 /* Reserve the bootmem map. We do not account for it
1086 * in pages_avail because we will release that memory 1059 * in pages_avail because we will release that memory
1087 * in free_all_bootmem. 1060 * in free_all_bootmem.
@@ -1092,7 +1065,6 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail,
1092 (bootmap_pfn << PAGE_SHIFT), size); 1065 (bootmap_pfn << PAGE_SHIFT), size);
1093#endif 1066#endif
1094 reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size); 1067 reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
1095 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
1096 1068
1097 for (i = 0; i < pavail_ents; i++) { 1069 for (i = 0; i < pavail_ents; i++) {
1098 unsigned long start_pfn, end_pfn; 1070 unsigned long start_pfn, end_pfn;
@@ -1584,6 +1556,10 @@ void __init mem_init(void)
1584#ifdef CONFIG_DEBUG_BOOTMEM 1556#ifdef CONFIG_DEBUG_BOOTMEM
1585 prom_printf("mem_init: Calling free_all_bootmem().\n"); 1557 prom_printf("mem_init: Calling free_all_bootmem().\n");
1586#endif 1558#endif
1559
1560 /* We subtract one to account for the mem_map_zero page
1561 * allocated below.
1562 */
1587 totalram_pages = num_physpages = free_all_bootmem() - 1; 1563 totalram_pages = num_physpages = free_all_bootmem() - 1;
1588 1564
1589 /* 1565 /*
@@ -1883,62 +1859,6 @@ static unsigned long kern_large_tte(unsigned long paddr)
1883 return val | paddr; 1859 return val | paddr;
1884} 1860}
1885 1861
1886/*
1887 * Translate PROM's mapping we capture at boot time into physical address.
1888 * The second parameter is only set from prom_callback() invocations.
1889 */
1890unsigned long prom_virt_to_phys(unsigned long promva, int *error)
1891{
1892 unsigned long mask;
1893 int i;
1894
1895 mask = _PAGE_PADDR_4U;
1896 if (tlb_type == hypervisor)
1897 mask = _PAGE_PADDR_4V;
1898
1899 for (i = 0; i < prom_trans_ents; i++) {
1900 struct linux_prom_translation *p = &prom_trans[i];
1901
1902 if (promva >= p->virt &&
1903 promva < (p->virt + p->size)) {
1904 unsigned long base = p->data & mask;
1905
1906 if (error)
1907 *error = 0;
1908 return base + (promva & (8192 - 1));
1909 }
1910 }
1911 if (error)
1912 *error = 1;
1913 return 0UL;
1914}
1915
1916/* XXX We should kill off this ugly thing at so me point. XXX */
1917unsigned long sun4u_get_pte(unsigned long addr)
1918{
1919 pgd_t *pgdp;
1920 pud_t *pudp;
1921 pmd_t *pmdp;
1922 pte_t *ptep;
1923 unsigned long mask = _PAGE_PADDR_4U;
1924
1925 if (tlb_type == hypervisor)
1926 mask = _PAGE_PADDR_4V;
1927
1928 if (addr >= PAGE_OFFSET)
1929 return addr & mask;
1930
1931 if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
1932 return prom_virt_to_phys(addr, NULL);
1933
1934 pgdp = pgd_offset_k(addr);
1935 pudp = pud_offset(pgdp, addr);
1936 pmdp = pmd_offset(pudp, addr);
1937 ptep = pte_offset_kernel(pmdp, addr);
1938
1939 return pte_val(*ptep) & mask;
1940}
1941
1942/* If not locked, zap it. */ 1862/* If not locked, zap it. */
1943void __flush_tlb_all(void) 1863void __flush_tlb_all(void)
1944{ 1864{
diff --git a/arch/sparc64/solaris/misc.c b/arch/sparc64/solaris/misc.c
index 9fcaad6dd11f..542c808ec2c8 100644
--- a/arch/sparc64/solaris/misc.c
+++ b/arch/sparc64/solaris/misc.c
@@ -224,7 +224,8 @@ static char *serial(char *buffer, int sz)
224 224
225 *buffer = 0; 225 *buffer = 0;
226 if (dp) { 226 if (dp) {
227 char *val = of_get_property(dp, "system-board-serial#", &len); 227 const char *val =
228 of_get_property(dp, "system-board-serial#", &len);
228 229
229 if (val && len > 0) { 230 if (val && len > 0) {
230 if (len > sz) 231 if (len > sz)
diff --git a/arch/xtensa/lib/Makefile b/arch/xtensa/lib/Makefile
index ed935b58e8a4..6c4fdd86acd8 100644
--- a/arch/xtensa/lib/Makefile
+++ b/arch/xtensa/lib/Makefile
@@ -2,6 +2,6 @@
2# Makefile for Xtensa-specific library files. 2# Makefile for Xtensa-specific library files.
3# 3#
4 4
5lib-y += memcopy.o memset.o checksum.o strcasecmp.o \ 5lib-y += memcopy.o memset.o checksum.o \
6 usercopy.o strncpy_user.o strnlen_user.o 6 usercopy.o strncpy_user.o strnlen_user.o
7lib-$(CONFIG_PCI) += pci-auto.o 7lib-$(CONFIG_PCI) += pci-auto.o
diff --git a/arch/xtensa/lib/strcasecmp.c b/arch/xtensa/lib/strcasecmp.c
deleted file mode 100644
index 165b2d6effa5..000000000000
--- a/arch/xtensa/lib/strcasecmp.c
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * linux/arch/xtensa/lib/strcasecmp.c
3 *
4 * This file is subject to the terms and conditions of the GNU General
5 * Public License. See the file "COPYING" in the main directory of
6 * this archive for more details.
7 *
8 * Copyright (C) 2002 Tensilica Inc.
9 */
10
11#include <linux/string.h>
12
13
14/* We handle nothing here except the C locale. Since this is used in
15 only one place, on strings known to contain only 7 bit ASCII, this
16 is ok. */
17
18int strcasecmp(const char *a, const char *b)
19{
20 int ca, cb;
21
22 do {
23 ca = *a++ & 0xff;
24 cb = *b++ & 0xff;
25 if (ca >= 'A' && ca <= 'Z')
26 ca += 'a' - 'A';
27 if (cb >= 'A' && cb <= 'Z')
28 cb += 'a' - 'A';
29 } while (ca == cb && ca != '\0');
30
31 return ca - cb;
32}