diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-04-27 12:29:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-04-27 12:29:04 -0400 |
commit | 0278ef8b484a71917bd4f03a763285cdaac10954 (patch) | |
tree | 8f6f7bf2e2a85b4643dfe3d0475811ce858fb4fc | |
parent | 15c54033964a943de7b0763efd3bd0ede7326395 (diff) | |
parent | cd9ad58d4061494e7fdd70ded7bcf2418daf356a (diff) |
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6: (67 commits)
[SCSI] SUNESP: Complete driver rewrite to version 2.0
[SPARC64]: Convert PCI over to generic struct iommu/strbuf.
[SPARC]: device_node name constification fallout
[SPARC64]: Convert SBUS over to generic iommu/strbuf structs.
[SPARC64]: Add generic iommu and strbuf structs to iommu.h
[SPARC64]: Consolidate {sbus,pci}_iommu_arena.
[SPARC]: Make device_node name and type const
[SPARC64]: constify some paramaters of OF routines
[TIGON3]: of_get_property() returns const.
[SPARC64]: Fix PCI rework to adhere to of_get_property() const return.
[SPARC64]: Document and fix calculation of pages_avail.
[SPARC64]: Make sure pbm->prom_node is setup easly enough in psycho.c
[SPARC64]: Use bootmem_bootmap_pages() in choose_bootmap_pfn().
[SPARC64]: Add proper header file extern for cmdline_memory_size.
[SPARC64]: Kill sparc_ultra_dump_{i,d}tlb()
[SPARC64]: Use DECLARE_BITMAP and BITS_TO_LONGS in mm/init.c
[SPARC64]: Give move verbose show_mem() output just like i386.
[SPARC64]: Mark show_mem() printk's with KERN_INFO.
[SPARC64]: Kill kvaddr_to_phys() and friends.
[SPARC64]: Privatize sun4u_get_pte() and fix name.
...
94 files changed, 5648 insertions, 8101 deletions
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile index 21cf624d7329..ea098f3b629f 100644 --- a/arch/alpha/lib/Makefile +++ b/arch/alpha/lib/Makefile | |||
@@ -36,7 +36,6 @@ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \ | |||
36 | $(ev6-y)csum_ipv6_magic.o \ | 36 | $(ev6-y)csum_ipv6_magic.o \ |
37 | $(ev6-y)clear_page.o \ | 37 | $(ev6-y)clear_page.o \ |
38 | $(ev6-y)copy_page.o \ | 38 | $(ev6-y)copy_page.o \ |
39 | strcasecmp.o \ | ||
40 | fpreg.o \ | 39 | fpreg.o \ |
41 | callback_srm.o srm_puts.o srm_printk.o | 40 | callback_srm.o srm_puts.o srm_printk.o |
42 | 41 | ||
diff --git a/arch/alpha/lib/strcasecmp.c b/arch/alpha/lib/strcasecmp.c deleted file mode 100644 index 4e57a216feaf..000000000000 --- a/arch/alpha/lib/strcasecmp.c +++ /dev/null | |||
@@ -1,26 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/lib/strcasecmp.c | ||
3 | */ | ||
4 | |||
5 | #include <linux/string.h> | ||
6 | |||
7 | |||
8 | /* We handle nothing here except the C locale. Since this is used in | ||
9 | only one place, on strings known to contain only 7 bit ASCII, this | ||
10 | is ok. */ | ||
11 | |||
12 | int strcasecmp(const char *a, const char *b) | ||
13 | { | ||
14 | int ca, cb; | ||
15 | |||
16 | do { | ||
17 | ca = *a++ & 0xff; | ||
18 | cb = *b++ & 0xff; | ||
19 | if (ca >= 'A' && ca <= 'Z') | ||
20 | ca += 'a' - 'A'; | ||
21 | if (cb >= 'A' && cb <= 'Z') | ||
22 | cb += 'a' - 'A'; | ||
23 | } while (ca == cb && ca != '\0'); | ||
24 | |||
25 | return ca - cb; | ||
26 | } | ||
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index ecee596d28f6..2f8e9c02c92a 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c | |||
@@ -84,8 +84,6 @@ EXPORT_SYMBOL(strncpy); | |||
84 | EXPORT_SYMBOL(strcat); | 84 | EXPORT_SYMBOL(strcat); |
85 | EXPORT_SYMBOL(strlen); | 85 | EXPORT_SYMBOL(strlen); |
86 | EXPORT_SYMBOL(strcmp); | 86 | EXPORT_SYMBOL(strcmp); |
87 | EXPORT_SYMBOL(strcasecmp); | ||
88 | EXPORT_SYMBOL(strncasecmp); | ||
89 | 87 | ||
90 | EXPORT_SYMBOL(csum_partial); | 88 | EXPORT_SYMBOL(csum_partial); |
91 | EXPORT_SYMBOL(csum_partial_copy_generic); | 89 | EXPORT_SYMBOL(csum_partial_copy_generic); |
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 4b1ba49fbd9e..450258de7ca1 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile | |||
@@ -7,13 +7,12 @@ EXTRA_CFLAGS += -mno-minimal-toc | |||
7 | endif | 7 | endif |
8 | 8 | ||
9 | ifeq ($(CONFIG_PPC_MERGE),y) | 9 | ifeq ($(CONFIG_PPC_MERGE),y) |
10 | obj-y := string.o strcase.o | 10 | obj-y := string.o |
11 | obj-$(CONFIG_PPC32) += div64.o copy_32.o checksum_32.o | 11 | obj-$(CONFIG_PPC32) += div64.o copy_32.o checksum_32.o |
12 | endif | 12 | endif |
13 | 13 | ||
14 | obj-$(CONFIG_PPC64) += checksum_64.o copypage_64.o copyuser_64.o \ | 14 | obj-$(CONFIG_PPC64) += checksum_64.o copypage_64.o copyuser_64.o \ |
15 | memcpy_64.o usercopy_64.o mem_64.o string.o \ | 15 | memcpy_64.o usercopy_64.o mem_64.o string.o |
16 | strcase.o | ||
17 | obj-$(CONFIG_QUICC_ENGINE) += rheap.o | 16 | obj-$(CONFIG_QUICC_ENGINE) += rheap.o |
18 | obj-$(CONFIG_XMON) += sstep.o | 17 | obj-$(CONFIG_XMON) += sstep.o |
19 | obj-$(CONFIG_KPROBES) += sstep.o | 18 | obj-$(CONFIG_KPROBES) += sstep.o |
diff --git a/arch/powerpc/lib/strcase.c b/arch/powerpc/lib/strcase.c deleted file mode 100644 index f8ec1eba3fdd..000000000000 --- a/arch/powerpc/lib/strcase.c +++ /dev/null | |||
@@ -1,25 +0,0 @@ | |||
1 | #include <linux/types.h> | ||
2 | #include <linux/ctype.h> | ||
3 | #include <linux/string.h> | ||
4 | |||
5 | int strcasecmp(const char *s1, const char *s2) | ||
6 | { | ||
7 | int c1, c2; | ||
8 | |||
9 | do { | ||
10 | c1 = tolower(*s1++); | ||
11 | c2 = tolower(*s2++); | ||
12 | } while (c1 == c2 && c1 != 0); | ||
13 | return c1 - c2; | ||
14 | } | ||
15 | |||
16 | int strncasecmp(const char *s1, const char *s2, size_t n) | ||
17 | { | ||
18 | int c1, c2; | ||
19 | |||
20 | do { | ||
21 | c1 = tolower(*s1++); | ||
22 | c2 = tolower(*s2++); | ||
23 | } while ((--n > 0) && c1 == c2 && c1 != 0); | ||
24 | return c1 - c2; | ||
25 | } | ||
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c index 1318b6f4c3df..4ad499605d05 100644 --- a/arch/ppc/kernel/ppc_ksyms.c +++ b/arch/ppc/kernel/ppc_ksyms.c | |||
@@ -93,8 +93,6 @@ EXPORT_SYMBOL(strncpy); | |||
93 | EXPORT_SYMBOL(strcat); | 93 | EXPORT_SYMBOL(strcat); |
94 | EXPORT_SYMBOL(strlen); | 94 | EXPORT_SYMBOL(strlen); |
95 | EXPORT_SYMBOL(strcmp); | 95 | EXPORT_SYMBOL(strcmp); |
96 | EXPORT_SYMBOL(strcasecmp); | ||
97 | EXPORT_SYMBOL(strncasecmp); | ||
98 | EXPORT_SYMBOL(__div64_32); | 96 | EXPORT_SYMBOL(__div64_32); |
99 | 97 | ||
100 | EXPORT_SYMBOL(csum_partial); | 98 | EXPORT_SYMBOL(csum_partial); |
diff --git a/arch/ppc/lib/Makefile b/arch/ppc/lib/Makefile index 50358e4ea159..422bef9bae7b 100644 --- a/arch/ppc/lib/Makefile +++ b/arch/ppc/lib/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for ppc-specific library files.. | 2 | # Makefile for ppc-specific library files.. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := checksum.o string.o strcase.o div64.o | 5 | obj-y := checksum.o string.o div64.o |
6 | 6 | ||
7 | obj-$(CONFIG_8xx) += rheap.o | 7 | obj-$(CONFIG_8xx) += rheap.o |
8 | obj-$(CONFIG_CPM2) += rheap.o | 8 | obj-$(CONFIG_CPM2) += rheap.o |
diff --git a/arch/ppc/lib/strcase.c b/arch/ppc/lib/strcase.c deleted file mode 100644 index 3b0094cc2b52..000000000000 --- a/arch/ppc/lib/strcase.c +++ /dev/null | |||
@@ -1,24 +0,0 @@ | |||
1 | #include <linux/ctype.h> | ||
2 | #include <linux/types.h> | ||
3 | |||
4 | int strcasecmp(const char *s1, const char *s2) | ||
5 | { | ||
6 | int c1, c2; | ||
7 | |||
8 | do { | ||
9 | c1 = tolower(*s1++); | ||
10 | c2 = tolower(*s2++); | ||
11 | } while (c1 == c2 && c1 != 0); | ||
12 | return c1 - c2; | ||
13 | } | ||
14 | |||
15 | int strncasecmp(const char *s1, const char *s2, size_t n) | ||
16 | { | ||
17 | int c1, c2; | ||
18 | |||
19 | do { | ||
20 | c1 = tolower(*s1++); | ||
21 | c2 = tolower(*s2++); | ||
22 | } while ((--n > 0) && c1 == c2 && c1 != 0); | ||
23 | return c1 - c2; | ||
24 | } | ||
diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile index b5681e3f9684..0b9cca5c7cb4 100644 --- a/arch/sh/lib/Makefile +++ b/arch/sh/lib/Makefile | |||
@@ -3,7 +3,7 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | lib-y = delay.o memset.o memmove.o memchr.o \ | 5 | lib-y = delay.o memset.o memmove.o memchr.o \ |
6 | checksum.o strcasecmp.o strlen.o div64.o udivdi3.o \ | 6 | checksum.o strlen.o div64.o udivdi3.o \ |
7 | div64-generic.o | 7 | div64-generic.o |
8 | 8 | ||
9 | memcpy-y := memcpy.o | 9 | memcpy-y := memcpy.o |
diff --git a/arch/sh/lib/strcasecmp.c b/arch/sh/lib/strcasecmp.c deleted file mode 100644 index 4e57a216feaf..000000000000 --- a/arch/sh/lib/strcasecmp.c +++ /dev/null | |||
@@ -1,26 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/alpha/lib/strcasecmp.c | ||
3 | */ | ||
4 | |||
5 | #include <linux/string.h> | ||
6 | |||
7 | |||
8 | /* We handle nothing here except the C locale. Since this is used in | ||
9 | only one place, on strings known to contain only 7 bit ASCII, this | ||
10 | is ok. */ | ||
11 | |||
12 | int strcasecmp(const char *a, const char *b) | ||
13 | { | ||
14 | int ca, cb; | ||
15 | |||
16 | do { | ||
17 | ca = *a++ & 0xff; | ||
18 | cb = *b++ & 0xff; | ||
19 | if (ca >= 'A' && ca <= 'Z') | ||
20 | ca += 'a' - 'A'; | ||
21 | if (cb >= 'A' && cb <= 'Z') | ||
22 | cb += 'a' - 'A'; | ||
23 | } while (ca == cb && ca != '\0'); | ||
24 | |||
25 | return ca - cb; | ||
26 | } | ||
diff --git a/arch/sparc/kernel/ebus.c b/arch/sparc/kernel/ebus.c index ba58c3a061fd..7bb86b9cdaa3 100644 --- a/arch/sparc/kernel/ebus.c +++ b/arch/sparc/kernel/ebus.c | |||
@@ -25,7 +25,7 @@ | |||
25 | struct linux_ebus *ebus_chain = NULL; | 25 | struct linux_ebus *ebus_chain = NULL; |
26 | 26 | ||
27 | /* We are together with pcic.c under CONFIG_PCI. */ | 27 | /* We are together with pcic.c under CONFIG_PCI. */ |
28 | extern unsigned int pcic_pin_to_irq(unsigned int, char *name); | 28 | extern unsigned int pcic_pin_to_irq(unsigned int, const char *name); |
29 | 29 | ||
30 | /* | 30 | /* |
31 | * IRQ Blacklist | 31 | * IRQ Blacklist |
@@ -69,7 +69,7 @@ static inline unsigned long ebus_alloc(size_t size) | |||
69 | 69 | ||
70 | /* | 70 | /* |
71 | */ | 71 | */ |
72 | int __init ebus_blacklist_irq(char *name) | 72 | int __init ebus_blacklist_irq(const char *name) |
73 | { | 73 | { |
74 | struct ebus_device_irq *dp; | 74 | struct ebus_device_irq *dp; |
75 | 75 | ||
@@ -86,8 +86,8 @@ int __init ebus_blacklist_irq(char *name) | |||
86 | void __init fill_ebus_child(struct device_node *dp, | 86 | void __init fill_ebus_child(struct device_node *dp, |
87 | struct linux_ebus_child *dev) | 87 | struct linux_ebus_child *dev) |
88 | { | 88 | { |
89 | int *regs; | 89 | const int *regs; |
90 | int *irqs; | 90 | const int *irqs; |
91 | int i, len; | 91 | int i, len; |
92 | 92 | ||
93 | dev->prom_node = dp; | 93 | dev->prom_node = dp; |
@@ -146,9 +146,9 @@ void __init fill_ebus_child(struct device_node *dp, | |||
146 | 146 | ||
147 | void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_device *dev) | 147 | void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_device *dev) |
148 | { | 148 | { |
149 | struct linux_prom_registers *regs; | 149 | const struct linux_prom_registers *regs; |
150 | struct linux_ebus_child *child; | 150 | struct linux_ebus_child *child; |
151 | int *irqs; | 151 | const int *irqs; |
152 | int i, n, len; | 152 | int i, n, len; |
153 | unsigned long baseaddr; | 153 | unsigned long baseaddr; |
154 | 154 | ||
@@ -269,7 +269,7 @@ void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_device *d | |||
269 | 269 | ||
270 | void __init ebus_init(void) | 270 | void __init ebus_init(void) |
271 | { | 271 | { |
272 | struct linux_prom_pci_registers *regs; | 272 | const struct linux_prom_pci_registers *regs; |
273 | struct linux_pbm_info *pbm; | 273 | struct linux_pbm_info *pbm; |
274 | struct linux_ebus_device *dev; | 274 | struct linux_ebus_device *dev; |
275 | struct linux_ebus *ebus; | 275 | struct linux_ebus *ebus; |
diff --git a/arch/sparc/kernel/of_device.c b/arch/sparc/kernel/of_device.c index 48c24f7518c2..fd7f8cb668a3 100644 --- a/arch/sparc/kernel/of_device.c +++ b/arch/sparc/kernel/of_device.c | |||
@@ -210,7 +210,7 @@ struct of_bus { | |||
210 | int *addrc, int *sizec); | 210 | int *addrc, int *sizec); |
211 | int (*map)(u32 *addr, const u32 *range, | 211 | int (*map)(u32 *addr, const u32 *range, |
212 | int na, int ns, int pna); | 212 | int na, int ns, int pna); |
213 | unsigned int (*get_flags)(u32 *addr); | 213 | unsigned int (*get_flags)(const u32 *addr); |
214 | }; | 214 | }; |
215 | 215 | ||
216 | /* | 216 | /* |
@@ -270,7 +270,7 @@ static int of_bus_default_map(u32 *addr, const u32 *range, | |||
270 | return 0; | 270 | return 0; |
271 | } | 271 | } |
272 | 272 | ||
273 | static unsigned int of_bus_default_get_flags(u32 *addr) | 273 | static unsigned int of_bus_default_get_flags(const u32 *addr) |
274 | { | 274 | { |
275 | return IORESOURCE_MEM; | 275 | return IORESOURCE_MEM; |
276 | } | 276 | } |
@@ -334,7 +334,7 @@ static int of_bus_pci_map(u32 *addr, const u32 *range, | |||
334 | return 0; | 334 | return 0; |
335 | } | 335 | } |
336 | 336 | ||
337 | static unsigned int of_bus_pci_get_flags(u32 *addr) | 337 | static unsigned int of_bus_pci_get_flags(const u32 *addr) |
338 | { | 338 | { |
339 | unsigned int flags = 0; | 339 | unsigned int flags = 0; |
340 | u32 w = addr[0]; | 340 | u32 w = addr[0]; |
@@ -375,7 +375,7 @@ static int of_bus_sbus_map(u32 *addr, const u32 *range, int na, int ns, int pna) | |||
375 | return of_bus_default_map(addr, range, na, ns, pna); | 375 | return of_bus_default_map(addr, range, na, ns, pna); |
376 | } | 376 | } |
377 | 377 | ||
378 | static unsigned int of_bus_sbus_get_flags(u32 *addr) | 378 | static unsigned int of_bus_sbus_get_flags(const u32 *addr) |
379 | { | 379 | { |
380 | return IORESOURCE_MEM; | 380 | return IORESOURCE_MEM; |
381 | } | 381 | } |
@@ -432,7 +432,7 @@ static int __init build_one_resource(struct device_node *parent, | |||
432 | u32 *addr, | 432 | u32 *addr, |
433 | int na, int ns, int pna) | 433 | int na, int ns, int pna) |
434 | { | 434 | { |
435 | u32 *ranges; | 435 | const u32 *ranges; |
436 | unsigned int rlen; | 436 | unsigned int rlen; |
437 | int rone; | 437 | int rone; |
438 | 438 | ||
@@ -470,7 +470,7 @@ static void __init build_device_resources(struct of_device *op, | |||
470 | struct of_bus *bus; | 470 | struct of_bus *bus; |
471 | int na, ns; | 471 | int na, ns; |
472 | int index, num_reg; | 472 | int index, num_reg; |
473 | void *preg; | 473 | const void *preg; |
474 | 474 | ||
475 | if (!parent) | 475 | if (!parent) |
476 | return; | 476 | return; |
@@ -492,7 +492,7 @@ static void __init build_device_resources(struct of_device *op, | |||
492 | for (index = 0; index < num_reg; index++) { | 492 | for (index = 0; index < num_reg; index++) { |
493 | struct resource *r = &op->resource[index]; | 493 | struct resource *r = &op->resource[index]; |
494 | u32 addr[OF_MAX_ADDR_CELLS]; | 494 | u32 addr[OF_MAX_ADDR_CELLS]; |
495 | u32 *reg = (preg + (index * ((na + ns) * 4))); | 495 | const u32 *reg = (preg + (index * ((na + ns) * 4))); |
496 | struct device_node *dp = op->node; | 496 | struct device_node *dp = op->node; |
497 | struct device_node *pp = p_op->node; | 497 | struct device_node *pp = p_op->node; |
498 | struct of_bus *pbus, *dbus; | 498 | struct of_bus *pbus, *dbus; |
@@ -559,7 +559,7 @@ static struct of_device * __init scan_one_device(struct device_node *dp, | |||
559 | struct device *parent) | 559 | struct device *parent) |
560 | { | 560 | { |
561 | struct of_device *op = kzalloc(sizeof(*op), GFP_KERNEL); | 561 | struct of_device *op = kzalloc(sizeof(*op), GFP_KERNEL); |
562 | struct linux_prom_irqs *intr; | 562 | const struct linux_prom_irqs *intr; |
563 | int len, i; | 563 | int len, i; |
564 | 564 | ||
565 | if (!op) | 565 | if (!op) |
@@ -579,7 +579,8 @@ static struct of_device * __init scan_one_device(struct device_node *dp, | |||
579 | for (i = 0; i < op->num_irqs; i++) | 579 | for (i = 0; i < op->num_irqs; i++) |
580 | op->irqs[i] = intr[i].pri; | 580 | op->irqs[i] = intr[i].pri; |
581 | } else { | 581 | } else { |
582 | unsigned int *irq = of_get_property(dp, "interrupts", &len); | 582 | const unsigned int *irq = |
583 | of_get_property(dp, "interrupts", &len); | ||
583 | 584 | ||
584 | if (irq) { | 585 | if (irq) { |
585 | op->num_irqs = len / sizeof(unsigned int); | 586 | op->num_irqs = len / sizeof(unsigned int); |
@@ -594,7 +595,7 @@ static struct of_device * __init scan_one_device(struct device_node *dp, | |||
594 | 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0, | 595 | 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0, |
595 | }; | 596 | }; |
596 | struct device_node *io_unit, *sbi = dp->parent; | 597 | struct device_node *io_unit, *sbi = dp->parent; |
597 | struct linux_prom_registers *regs; | 598 | const struct linux_prom_registers *regs; |
598 | int board, slot; | 599 | int board, slot; |
599 | 600 | ||
600 | while (sbi) { | 601 | while (sbi) { |
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c index 1c927c538b8b..5ca7e8f42bd9 100644 --- a/arch/sparc/kernel/pcic.c +++ b/arch/sparc/kernel/pcic.c | |||
@@ -37,8 +37,6 @@ | |||
37 | #include <asm/irq_regs.h> | 37 | #include <asm/irq_regs.h> |
38 | 38 | ||
39 | 39 | ||
40 | unsigned int pcic_pin_to_irq(unsigned int pin, char *name); | ||
41 | |||
42 | /* | 40 | /* |
43 | * I studied different documents and many live PROMs both from 2.30 | 41 | * I studied different documents and many live PROMs both from 2.30 |
44 | * family and 3.xx versions. I came to the amazing conclusion: there is | 42 | * family and 3.xx versions. I came to the amazing conclusion: there is |
@@ -681,7 +679,7 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus) | |||
681 | * pcic_pin_to_irq() is exported to ebus.c. | 679 | * pcic_pin_to_irq() is exported to ebus.c. |
682 | */ | 680 | */ |
683 | unsigned int | 681 | unsigned int |
684 | pcic_pin_to_irq(unsigned int pin, char *name) | 682 | pcic_pin_to_irq(unsigned int pin, const char *name) |
685 | { | 683 | { |
686 | struct linux_pcic *pcic = &pcic0; | 684 | struct linux_pcic *pcic = &pcic0; |
687 | unsigned int irq; | 685 | unsigned int irq; |
diff --git a/arch/sparc/kernel/prom.c b/arch/sparc/kernel/prom.c index 2cc302b6bec0..eed140b3c739 100644 --- a/arch/sparc/kernel/prom.c +++ b/arch/sparc/kernel/prom.c | |||
@@ -32,12 +32,13 @@ static struct device_node *allnodes; | |||
32 | */ | 32 | */ |
33 | static DEFINE_RWLOCK(devtree_lock); | 33 | static DEFINE_RWLOCK(devtree_lock); |
34 | 34 | ||
35 | int of_device_is_compatible(struct device_node *device, const char *compat) | 35 | int of_device_is_compatible(const struct device_node *device, |
36 | const char *compat) | ||
36 | { | 37 | { |
37 | const char* cp; | 38 | const char* cp; |
38 | int cplen, l; | 39 | int cplen, l; |
39 | 40 | ||
40 | cp = (char *) of_get_property(device, "compatible", &cplen); | 41 | cp = of_get_property(device, "compatible", &cplen); |
41 | if (cp == NULL) | 42 | if (cp == NULL) |
42 | return 0; | 43 | return 0; |
43 | while (cplen > 0) { | 44 | while (cplen > 0) { |
@@ -150,13 +151,14 @@ struct device_node *of_find_compatible_node(struct device_node *from, | |||
150 | } | 151 | } |
151 | EXPORT_SYMBOL(of_find_compatible_node); | 152 | EXPORT_SYMBOL(of_find_compatible_node); |
152 | 153 | ||
153 | struct property *of_find_property(struct device_node *np, const char *name, | 154 | struct property *of_find_property(const struct device_node *np, |
155 | const char *name, | ||
154 | int *lenp) | 156 | int *lenp) |
155 | { | 157 | { |
156 | struct property *pp; | 158 | struct property *pp; |
157 | 159 | ||
158 | for (pp = np->properties; pp != 0; pp = pp->next) { | 160 | for (pp = np->properties; pp != 0; pp = pp->next) { |
159 | if (strcmp(pp->name, name) == 0) { | 161 | if (strcasecmp(pp->name, name) == 0) { |
160 | if (lenp != 0) | 162 | if (lenp != 0) |
161 | *lenp = pp->length; | 163 | *lenp = pp->length; |
162 | break; | 164 | break; |
@@ -170,7 +172,8 @@ EXPORT_SYMBOL(of_find_property); | |||
170 | * Find a property with a given name for a given node | 172 | * Find a property with a given name for a given node |
171 | * and return the value. | 173 | * and return the value. |
172 | */ | 174 | */ |
173 | void *of_get_property(struct device_node *np, const char *name, int *lenp) | 175 | const void *of_get_property(const struct device_node *np, const char *name, |
176 | int *lenp) | ||
174 | { | 177 | { |
175 | struct property *pp = of_find_property(np,name,lenp); | 178 | struct property *pp = of_find_property(np,name,lenp); |
176 | return pp ? pp->value : NULL; | 179 | return pp ? pp->value : NULL; |
@@ -192,7 +195,7 @@ EXPORT_SYMBOL(of_getintprop_default); | |||
192 | 195 | ||
193 | int of_n_addr_cells(struct device_node *np) | 196 | int of_n_addr_cells(struct device_node *np) |
194 | { | 197 | { |
195 | int* ip; | 198 | const int* ip; |
196 | do { | 199 | do { |
197 | if (np->parent) | 200 | if (np->parent) |
198 | np = np->parent; | 201 | np = np->parent; |
@@ -207,7 +210,7 @@ EXPORT_SYMBOL(of_n_addr_cells); | |||
207 | 210 | ||
208 | int of_n_size_cells(struct device_node *np) | 211 | int of_n_size_cells(struct device_node *np) |
209 | { | 212 | { |
210 | int* ip; | 213 | const int* ip; |
211 | do { | 214 | do { |
212 | if (np->parent) | 215 | if (np->parent) |
213 | np = np->parent; | 216 | np = np->parent; |
@@ -239,7 +242,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len | |||
239 | while (*prevp) { | 242 | while (*prevp) { |
240 | struct property *prop = *prevp; | 243 | struct property *prop = *prevp; |
241 | 244 | ||
242 | if (!strcmp(prop->name, name)) { | 245 | if (!strcasecmp(prop->name, name)) { |
243 | void *old_val = prop->value; | 246 | void *old_val = prop->value; |
244 | int ret; | 247 | int ret; |
245 | 248 | ||
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c index 9bb1240aaf8a..f1401b57ccc7 100644 --- a/arch/sparc/kernel/time.c +++ b/arch/sparc/kernel/time.c | |||
@@ -301,7 +301,7 @@ static __inline__ void sun4_clock_probe(void) | |||
301 | static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match) | 301 | static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match) |
302 | { | 302 | { |
303 | struct device_node *dp = op->node; | 303 | struct device_node *dp = op->node; |
304 | char *model = of_get_property(dp, "model", NULL); | 304 | const char *model = of_get_property(dp, "model", NULL); |
305 | 305 | ||
306 | if (!model) | 306 | if (!model) |
307 | return -ENODEV; | 307 | return -ENODEV; |
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig index 1a6348b565fb..590a41b864b9 100644 --- a/arch/sparc64/Kconfig +++ b/arch/sparc64/Kconfig | |||
@@ -19,6 +19,14 @@ config SPARC64 | |||
19 | SPARC64 ports; its web page is available at | 19 | SPARC64 ports; its web page is available at |
20 | <http://www.ultralinux.org/>. | 20 | <http://www.ultralinux.org/>. |
21 | 21 | ||
22 | config GENERIC_TIME | ||
23 | bool | ||
24 | default y | ||
25 | |||
26 | config GENERIC_CLOCKEVENTS | ||
27 | bool | ||
28 | default y | ||
29 | |||
22 | config 64BIT | 30 | config 64BIT |
23 | def_bool y | 31 | def_bool y |
24 | 32 | ||
@@ -34,10 +42,6 @@ config LOCKDEP_SUPPORT | |||
34 | bool | 42 | bool |
35 | default y | 43 | default y |
36 | 44 | ||
37 | config TIME_INTERPOLATION | ||
38 | bool | ||
39 | default y | ||
40 | |||
41 | config ARCH_MAY_HAVE_PC_FDC | 45 | config ARCH_MAY_HAVE_PC_FDC |
42 | bool | 46 | bool |
43 | default y | 47 | default y |
@@ -113,6 +117,8 @@ config GENERIC_HARDIRQS | |||
113 | 117 | ||
114 | menu "General machine setup" | 118 | menu "General machine setup" |
115 | 119 | ||
120 | source "kernel/time/Kconfig" | ||
121 | |||
116 | config SMP | 122 | config SMP |
117 | bool "Symmetric multi-processing support" | 123 | bool "Symmetric multi-processing support" |
118 | ---help--- | 124 | ---help--- |
@@ -214,6 +220,7 @@ config ARCH_SPARSEMEM_ENABLE | |||
214 | 220 | ||
215 | config ARCH_SPARSEMEM_DEFAULT | 221 | config ARCH_SPARSEMEM_DEFAULT |
216 | def_bool y | 222 | def_bool y |
223 | select SPARSEMEM_STATIC | ||
217 | 224 | ||
218 | config LARGE_ALLOCS | 225 | config LARGE_ALLOCS |
219 | def_bool y | 226 | def_bool y |
diff --git a/arch/sparc64/kernel/central.c b/arch/sparc64/kernel/central.c index e724c54af029..c65b2f9c98d8 100644 --- a/arch/sparc64/kernel/central.c +++ b/arch/sparc64/kernel/central.c | |||
@@ -32,7 +32,7 @@ static void central_probe_failure(int line) | |||
32 | static void central_ranges_init(struct linux_central *central) | 32 | static void central_ranges_init(struct linux_central *central) |
33 | { | 33 | { |
34 | struct device_node *dp = central->prom_node; | 34 | struct device_node *dp = central->prom_node; |
35 | void *pval; | 35 | const void *pval; |
36 | int len; | 36 | int len; |
37 | 37 | ||
38 | central->num_central_ranges = 0; | 38 | central->num_central_ranges = 0; |
@@ -47,7 +47,7 @@ static void central_ranges_init(struct linux_central *central) | |||
47 | static void fhc_ranges_init(struct linux_fhc *fhc) | 47 | static void fhc_ranges_init(struct linux_fhc *fhc) |
48 | { | 48 | { |
49 | struct device_node *dp = fhc->prom_node; | 49 | struct device_node *dp = fhc->prom_node; |
50 | void *pval; | 50 | const void *pval; |
51 | int len; | 51 | int len; |
52 | 52 | ||
53 | fhc->num_fhc_ranges = 0; | 53 | fhc->num_fhc_ranges = 0; |
@@ -119,7 +119,7 @@ static unsigned long prom_reg_to_paddr(struct linux_prom_registers *r) | |||
119 | static void probe_other_fhcs(void) | 119 | static void probe_other_fhcs(void) |
120 | { | 120 | { |
121 | struct device_node *dp; | 121 | struct device_node *dp; |
122 | struct linux_prom64_registers *fpregs; | 122 | const struct linux_prom64_registers *fpregs; |
123 | 123 | ||
124 | for_each_node_by_name(dp, "fhc") { | 124 | for_each_node_by_name(dp, "fhc") { |
125 | struct linux_fhc *fhc; | 125 | struct linux_fhc *fhc; |
@@ -190,7 +190,8 @@ static void probe_clock_board(struct linux_central *central, | |||
190 | struct device_node *fp) | 190 | struct device_node *fp) |
191 | { | 191 | { |
192 | struct device_node *dp; | 192 | struct device_node *dp; |
193 | struct linux_prom_registers cregs[3], *pr; | 193 | struct linux_prom_registers cregs[3]; |
194 | const struct linux_prom_registers *pr; | ||
194 | int nslots, tmp, nregs; | 195 | int nslots, tmp, nregs; |
195 | 196 | ||
196 | dp = fp->child; | 197 | dp = fp->child; |
@@ -299,7 +300,8 @@ static void init_all_fhc_hw(void) | |||
299 | 300 | ||
300 | void central_probe(void) | 301 | void central_probe(void) |
301 | { | 302 | { |
302 | struct linux_prom_registers fpregs[6], *pr; | 303 | struct linux_prom_registers fpregs[6]; |
304 | const struct linux_prom_registers *pr; | ||
303 | struct linux_fhc *fhc; | 305 | struct linux_fhc *fhc; |
304 | struct device_node *dp, *fp; | 306 | struct device_node *dp, *fp; |
305 | int err; | 307 | int err; |
diff --git a/arch/sparc64/kernel/chmc.c b/arch/sparc64/kernel/chmc.c index 9699abeb9907..777d34577045 100644 --- a/arch/sparc64/kernel/chmc.c +++ b/arch/sparc64/kernel/chmc.c | |||
@@ -343,8 +343,8 @@ static int init_one_mctrl(struct device_node *dp) | |||
343 | { | 343 | { |
344 | struct mctrl_info *mp = kzalloc(sizeof(*mp), GFP_KERNEL); | 344 | struct mctrl_info *mp = kzalloc(sizeof(*mp), GFP_KERNEL); |
345 | int portid = of_getintprop_default(dp, "portid", -1); | 345 | int portid = of_getintprop_default(dp, "portid", -1); |
346 | struct linux_prom64_registers *regs; | 346 | const struct linux_prom64_registers *regs; |
347 | void *pval; | 347 | const void *pval; |
348 | int len; | 348 | int len; |
349 | 349 | ||
350 | if (!mp) | 350 | if (!mp) |
diff --git a/arch/sparc64/kernel/ebus.c b/arch/sparc64/kernel/ebus.c index 35bf895fdeee..0ace17bafba4 100644 --- a/arch/sparc64/kernel/ebus.c +++ b/arch/sparc64/kernel/ebus.c | |||
@@ -285,7 +285,7 @@ static void __init fill_ebus_child(struct device_node *dp, | |||
285 | int non_standard_regs) | 285 | int non_standard_regs) |
286 | { | 286 | { |
287 | struct of_device *op; | 287 | struct of_device *op; |
288 | int *regs; | 288 | const int *regs; |
289 | int i, len; | 289 | int i, len; |
290 | 290 | ||
291 | dev->prom_node = dp; | 291 | dev->prom_node = dp; |
@@ -438,11 +438,9 @@ static struct pci_dev *find_next_ebus(struct pci_dev *start, int *is_rio_p) | |||
438 | 438 | ||
439 | void __init ebus_init(void) | 439 | void __init ebus_init(void) |
440 | { | 440 | { |
441 | struct pci_pbm_info *pbm; | ||
442 | struct linux_ebus_device *dev; | 441 | struct linux_ebus_device *dev; |
443 | struct linux_ebus *ebus; | 442 | struct linux_ebus *ebus; |
444 | struct pci_dev *pdev; | 443 | struct pci_dev *pdev; |
445 | struct pcidev_cookie *cookie; | ||
446 | struct device_node *dp; | 444 | struct device_node *dp; |
447 | int is_rio; | 445 | int is_rio; |
448 | int num_ebus = 0; | 446 | int num_ebus = 0; |
@@ -453,8 +451,7 @@ void __init ebus_init(void) | |||
453 | return; | 451 | return; |
454 | } | 452 | } |
455 | 453 | ||
456 | cookie = pdev->sysdata; | 454 | dp = pci_device_to_OF_node(pdev); |
457 | dp = cookie->prom_node; | ||
458 | 455 | ||
459 | ebus_chain = ebus = ebus_alloc(sizeof(struct linux_ebus)); | 456 | ebus_chain = ebus = ebus_alloc(sizeof(struct linux_ebus)); |
460 | ebus->next = NULL; | 457 | ebus->next = NULL; |
@@ -480,8 +477,7 @@ void __init ebus_init(void) | |||
480 | break; | 477 | break; |
481 | } | 478 | } |
482 | ebus->is_rio = is_rio; | 479 | ebus->is_rio = is_rio; |
483 | cookie = pdev->sysdata; | 480 | dp = pci_device_to_OF_node(pdev); |
484 | dp = cookie->prom_node; | ||
485 | continue; | 481 | continue; |
486 | } | 482 | } |
487 | printk("ebus%d:", num_ebus); | 483 | printk("ebus%d:", num_ebus); |
@@ -489,7 +485,6 @@ void __init ebus_init(void) | |||
489 | ebus->index = num_ebus; | 485 | ebus->index = num_ebus; |
490 | ebus->prom_node = dp; | 486 | ebus->prom_node = dp; |
491 | ebus->self = pdev; | 487 | ebus->self = pdev; |
492 | ebus->parent = pbm = cookie->pbm; | ||
493 | 488 | ||
494 | ebus->ofdev.node = dp; | 489 | ebus->ofdev.node = dp; |
495 | ebus->ofdev.dev.parent = &pdev->dev; | 490 | ebus->ofdev.dev.parent = &pdev->dev; |
@@ -531,8 +526,7 @@ void __init ebus_init(void) | |||
531 | if (!pdev) | 526 | if (!pdev) |
532 | break; | 527 | break; |
533 | 528 | ||
534 | cookie = pdev->sysdata; | 529 | dp = pci_device_to_OF_node(pdev); |
535 | dp = cookie->prom_node; | ||
536 | 530 | ||
537 | ebus->next = ebus_alloc(sizeof(struct linux_ebus)); | 531 | ebus->next = ebus_alloc(sizeof(struct linux_ebus)); |
538 | ebus = ebus->next; | 532 | ebus = ebus->next; |
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index c443db184371..6241e3dbbd57 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c | |||
@@ -589,32 +589,6 @@ void ack_bad_irq(unsigned int virt_irq) | |||
589 | ino, virt_irq); | 589 | ino, virt_irq); |
590 | } | 590 | } |
591 | 591 | ||
592 | #ifndef CONFIG_SMP | ||
593 | extern irqreturn_t timer_interrupt(int, void *); | ||
594 | |||
595 | void timer_irq(int irq, struct pt_regs *regs) | ||
596 | { | ||
597 | unsigned long clr_mask = 1 << irq; | ||
598 | unsigned long tick_mask = tick_ops->softint_mask; | ||
599 | struct pt_regs *old_regs; | ||
600 | |||
601 | if (get_softint() & tick_mask) { | ||
602 | irq = 0; | ||
603 | clr_mask = tick_mask; | ||
604 | } | ||
605 | clear_softint(clr_mask); | ||
606 | |||
607 | old_regs = set_irq_regs(regs); | ||
608 | irq_enter(); | ||
609 | |||
610 | kstat_this_cpu.irqs[0]++; | ||
611 | timer_interrupt(irq, NULL); | ||
612 | |||
613 | irq_exit(); | ||
614 | set_irq_regs(old_regs); | ||
615 | } | ||
616 | #endif | ||
617 | |||
618 | void handler_irq(int irq, struct pt_regs *regs) | 592 | void handler_irq(int irq, struct pt_regs *regs) |
619 | { | 593 | { |
620 | struct ino_bucket *bucket; | 594 | struct ino_bucket *bucket; |
@@ -653,7 +627,7 @@ static u64 prom_limit0, prom_limit1; | |||
653 | static void map_prom_timers(void) | 627 | static void map_prom_timers(void) |
654 | { | 628 | { |
655 | struct device_node *dp; | 629 | struct device_node *dp; |
656 | unsigned int *addr; | 630 | const unsigned int *addr; |
657 | 631 | ||
658 | /* PROM timer node hangs out in the top level of device siblings... */ | 632 | /* PROM timer node hangs out in the top level of device siblings... */ |
659 | dp = of_find_node_by_path("/"); | 633 | dp = of_find_node_by_path("/"); |
diff --git a/arch/sparc64/kernel/isa.c b/arch/sparc64/kernel/isa.c index 98721a8f8619..6a6882e57ff2 100644 --- a/arch/sparc64/kernel/isa.c +++ b/arch/sparc64/kernel/isa.c | |||
@@ -24,27 +24,9 @@ static void __init report_dev(struct sparc_isa_device *isa_dev, int child) | |||
24 | 24 | ||
25 | static void __init isa_dev_get_resource(struct sparc_isa_device *isa_dev) | 25 | static void __init isa_dev_get_resource(struct sparc_isa_device *isa_dev) |
26 | { | 26 | { |
27 | struct linux_prom_registers *pregs; | 27 | struct of_device *op = of_find_device_by_node(isa_dev->prom_node); |
28 | unsigned long base, len; | ||
29 | int prop_len; | ||
30 | |||
31 | pregs = of_get_property(isa_dev->prom_node, "reg", &prop_len); | ||
32 | if (!pregs) | ||
33 | return; | ||
34 | |||
35 | /* Only the first one is interesting. */ | ||
36 | len = pregs[0].reg_size; | ||
37 | base = (((unsigned long)pregs[0].which_io << 32) | | ||
38 | (unsigned long)pregs[0].phys_addr); | ||
39 | base += isa_dev->bus->parent->io_space.start; | ||
40 | |||
41 | isa_dev->resource.start = base; | ||
42 | isa_dev->resource.end = (base + len - 1UL); | ||
43 | isa_dev->resource.flags = IORESOURCE_IO; | ||
44 | isa_dev->resource.name = isa_dev->prom_node->name; | ||
45 | 28 | ||
46 | request_resource(&isa_dev->bus->parent->io_space, | 29 | memcpy(&isa_dev->resource, &op->resource[0], sizeof(struct resource)); |
47 | &isa_dev->resource); | ||
48 | } | 30 | } |
49 | 31 | ||
50 | static void __init isa_dev_get_irq(struct sparc_isa_device *isa_dev) | 32 | static void __init isa_dev_get_irq(struct sparc_isa_device *isa_dev) |
@@ -158,19 +140,10 @@ void __init isa_init(void) | |||
158 | 140 | ||
159 | pdev = NULL; | 141 | pdev = NULL; |
160 | while ((pdev = pci_get_device(vendor, device, pdev)) != NULL) { | 142 | while ((pdev = pci_get_device(vendor, device, pdev)) != NULL) { |
161 | struct pcidev_cookie *pdev_cookie; | ||
162 | struct pci_pbm_info *pbm; | ||
163 | struct sparc_isa_bridge *isa_br; | 143 | struct sparc_isa_bridge *isa_br; |
164 | struct device_node *dp; | 144 | struct device_node *dp; |
165 | 145 | ||
166 | pdev_cookie = pdev->sysdata; | 146 | dp = pci_device_to_OF_node(pdev); |
167 | if (!pdev_cookie) { | ||
168 | printk("ISA: Warning, ISA bridge ignored due to " | ||
169 | "lack of OBP data.\n"); | ||
170 | continue; | ||
171 | } | ||
172 | pbm = pdev_cookie->pbm; | ||
173 | dp = pdev_cookie->prom_node; | ||
174 | 147 | ||
175 | isa_br = kzalloc(sizeof(*isa_br), GFP_KERNEL); | 148 | isa_br = kzalloc(sizeof(*isa_br), GFP_KERNEL); |
176 | if (!isa_br) { | 149 | if (!isa_br) { |
@@ -195,10 +168,9 @@ void __init isa_init(void) | |||
195 | isa_br->next = isa_chain; | 168 | isa_br->next = isa_chain; |
196 | isa_chain = isa_br; | 169 | isa_chain = isa_br; |
197 | 170 | ||
198 | isa_br->parent = pbm; | ||
199 | isa_br->self = pdev; | 171 | isa_br->self = pdev; |
200 | isa_br->index = index++; | 172 | isa_br->index = index++; |
201 | isa_br->prom_node = pdev_cookie->prom_node; | 173 | isa_br->prom_node = dp; |
202 | 174 | ||
203 | printk("isa%d:", isa_br->index); | 175 | printk("isa%d:", isa_br->index); |
204 | 176 | ||
diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c index fb9bf1e4d036..9ac9a307999a 100644 --- a/arch/sparc64/kernel/of_device.c +++ b/arch/sparc64/kernel/of_device.c | |||
@@ -245,7 +245,7 @@ struct of_bus { | |||
245 | int *addrc, int *sizec); | 245 | int *addrc, int *sizec); |
246 | int (*map)(u32 *addr, const u32 *range, | 246 | int (*map)(u32 *addr, const u32 *range, |
247 | int na, int ns, int pna); | 247 | int na, int ns, int pna); |
248 | unsigned int (*get_flags)(u32 *addr); | 248 | unsigned int (*get_flags)(const u32 *addr); |
249 | }; | 249 | }; |
250 | 250 | ||
251 | /* | 251 | /* |
@@ -305,7 +305,7 @@ static int of_bus_default_map(u32 *addr, const u32 *range, | |||
305 | return 0; | 305 | return 0; |
306 | } | 306 | } |
307 | 307 | ||
308 | static unsigned int of_bus_default_get_flags(u32 *addr) | 308 | static unsigned int of_bus_default_get_flags(const u32 *addr) |
309 | { | 309 | { |
310 | return IORESOURCE_MEM; | 310 | return IORESOURCE_MEM; |
311 | } | 311 | } |
@@ -317,6 +317,11 @@ static unsigned int of_bus_default_get_flags(u32 *addr) | |||
317 | static int of_bus_pci_match(struct device_node *np) | 317 | static int of_bus_pci_match(struct device_node *np) |
318 | { | 318 | { |
319 | if (!strcmp(np->type, "pci") || !strcmp(np->type, "pciex")) { | 319 | if (!strcmp(np->type, "pci") || !strcmp(np->type, "pciex")) { |
320 | const char *model = of_get_property(np, "model", NULL); | ||
321 | |||
322 | if (model && !strcmp(model, "SUNW,simba")) | ||
323 | return 0; | ||
324 | |||
320 | /* Do not do PCI specific frobbing if the | 325 | /* Do not do PCI specific frobbing if the |
321 | * PCI bridge lacks a ranges property. We | 326 | * PCI bridge lacks a ranges property. We |
322 | * want to pass it through up to the next | 327 | * want to pass it through up to the next |
@@ -332,6 +337,21 @@ static int of_bus_pci_match(struct device_node *np) | |||
332 | return 0; | 337 | return 0; |
333 | } | 338 | } |
334 | 339 | ||
340 | static int of_bus_simba_match(struct device_node *np) | ||
341 | { | ||
342 | const char *model = of_get_property(np, "model", NULL); | ||
343 | |||
344 | if (model && !strcmp(model, "SUNW,simba")) | ||
345 | return 1; | ||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | static int of_bus_simba_map(u32 *addr, const u32 *range, | ||
350 | int na, int ns, int pna) | ||
351 | { | ||
352 | return 0; | ||
353 | } | ||
354 | |||
335 | static void of_bus_pci_count_cells(struct device_node *np, | 355 | static void of_bus_pci_count_cells(struct device_node *np, |
336 | int *addrc, int *sizec) | 356 | int *addrc, int *sizec) |
337 | { | 357 | { |
@@ -369,7 +389,7 @@ static int of_bus_pci_map(u32 *addr, const u32 *range, | |||
369 | return 0; | 389 | return 0; |
370 | } | 390 | } |
371 | 391 | ||
372 | static unsigned int of_bus_pci_get_flags(u32 *addr) | 392 | static unsigned int of_bus_pci_get_flags(const u32 *addr) |
373 | { | 393 | { |
374 | unsigned int flags = 0; | 394 | unsigned int flags = 0; |
375 | u32 w = addr[0]; | 395 | u32 w = addr[0]; |
@@ -436,6 +456,15 @@ static struct of_bus of_busses[] = { | |||
436 | .map = of_bus_pci_map, | 456 | .map = of_bus_pci_map, |
437 | .get_flags = of_bus_pci_get_flags, | 457 | .get_flags = of_bus_pci_get_flags, |
438 | }, | 458 | }, |
459 | /* SIMBA */ | ||
460 | { | ||
461 | .name = "simba", | ||
462 | .addr_prop_name = "assigned-addresses", | ||
463 | .match = of_bus_simba_match, | ||
464 | .count_cells = of_bus_pci_count_cells, | ||
465 | .map = of_bus_simba_map, | ||
466 | .get_flags = of_bus_pci_get_flags, | ||
467 | }, | ||
439 | /* SBUS */ | 468 | /* SBUS */ |
440 | { | 469 | { |
441 | .name = "sbus", | 470 | .name = "sbus", |
@@ -482,7 +511,7 @@ static int __init build_one_resource(struct device_node *parent, | |||
482 | u32 *addr, | 511 | u32 *addr, |
483 | int na, int ns, int pna) | 512 | int na, int ns, int pna) |
484 | { | 513 | { |
485 | u32 *ranges; | 514 | const u32 *ranges; |
486 | unsigned int rlen; | 515 | unsigned int rlen; |
487 | int rone; | 516 | int rone; |
488 | 517 | ||
@@ -513,7 +542,7 @@ static int __init build_one_resource(struct device_node *parent, | |||
513 | 542 | ||
514 | static int __init use_1to1_mapping(struct device_node *pp) | 543 | static int __init use_1to1_mapping(struct device_node *pp) |
515 | { | 544 | { |
516 | char *model; | 545 | const char *model; |
517 | 546 | ||
518 | /* If this is on the PMU bus, don't try to translate it even | 547 | /* If this is on the PMU bus, don't try to translate it even |
519 | * if a ranges property exists. | 548 | * if a ranges property exists. |
@@ -548,7 +577,7 @@ static void __init build_device_resources(struct of_device *op, | |||
548 | struct of_bus *bus; | 577 | struct of_bus *bus; |
549 | int na, ns; | 578 | int na, ns; |
550 | int index, num_reg; | 579 | int index, num_reg; |
551 | void *preg; | 580 | const void *preg; |
552 | 581 | ||
553 | if (!parent) | 582 | if (!parent) |
554 | return; | 583 | return; |
@@ -578,7 +607,7 @@ static void __init build_device_resources(struct of_device *op, | |||
578 | for (index = 0; index < num_reg; index++) { | 607 | for (index = 0; index < num_reg; index++) { |
579 | struct resource *r = &op->resource[index]; | 608 | struct resource *r = &op->resource[index]; |
580 | u32 addr[OF_MAX_ADDR_CELLS]; | 609 | u32 addr[OF_MAX_ADDR_CELLS]; |
581 | u32 *reg = (preg + (index * ((na + ns) * 4))); | 610 | const u32 *reg = (preg + (index * ((na + ns) * 4))); |
582 | struct device_node *dp = op->node; | 611 | struct device_node *dp = op->node; |
583 | struct device_node *pp = p_op->node; | 612 | struct device_node *pp = p_op->node; |
584 | struct of_bus *pbus, *dbus; | 613 | struct of_bus *pbus, *dbus; |
@@ -643,14 +672,14 @@ static void __init build_device_resources(struct of_device *op, | |||
643 | 672 | ||
644 | static struct device_node * __init | 673 | static struct device_node * __init |
645 | apply_interrupt_map(struct device_node *dp, struct device_node *pp, | 674 | apply_interrupt_map(struct device_node *dp, struct device_node *pp, |
646 | u32 *imap, int imlen, u32 *imask, | 675 | const u32 *imap, int imlen, const u32 *imask, |
647 | unsigned int *irq_p) | 676 | unsigned int *irq_p) |
648 | { | 677 | { |
649 | struct device_node *cp; | 678 | struct device_node *cp; |
650 | unsigned int irq = *irq_p; | 679 | unsigned int irq = *irq_p; |
651 | struct of_bus *bus; | 680 | struct of_bus *bus; |
652 | phandle handle; | 681 | phandle handle; |
653 | u32 *reg; | 682 | const u32 *reg; |
654 | int na, num_reg, i; | 683 | int na, num_reg, i; |
655 | 684 | ||
656 | bus = of_match_bus(pp); | 685 | bus = of_match_bus(pp); |
@@ -705,7 +734,7 @@ static unsigned int __init pci_irq_swizzle(struct device_node *dp, | |||
705 | struct device_node *pp, | 734 | struct device_node *pp, |
706 | unsigned int irq) | 735 | unsigned int irq) |
707 | { | 736 | { |
708 | struct linux_prom_pci_registers *regs; | 737 | const struct linux_prom_pci_registers *regs; |
709 | unsigned int bus, devfn, slot, ret; | 738 | unsigned int bus, devfn, slot, ret; |
710 | 739 | ||
711 | if (irq < 1 || irq > 4) | 740 | if (irq < 1 || irq > 4) |
@@ -730,12 +759,6 @@ static unsigned int __init pci_irq_swizzle(struct device_node *dp, | |||
730 | * D: 2-bit slot number, derived from PCI device number as | 759 | * D: 2-bit slot number, derived from PCI device number as |
731 | * (dev - 1) for bus A, or (dev - 2) for bus B | 760 | * (dev - 1) for bus A, or (dev - 2) for bus B |
732 | * L: 2-bit line number | 761 | * L: 2-bit line number |
733 | * | ||
734 | * Actually, more "portable" way to calculate the funky | ||
735 | * slot number is to subtract pbm->pci_first_slot from the | ||
736 | * device number, and that's exactly what the pre-OF | ||
737 | * sparc64 code did, but we're building this stuff generically | ||
738 | * using the OBP tree, not in the PCI controller layer. | ||
739 | */ | 762 | */ |
740 | if (bus & 0x80) { | 763 | if (bus & 0x80) { |
741 | /* PBM-A */ | 764 | /* PBM-A */ |
@@ -794,7 +817,7 @@ static unsigned int __init build_one_device_irq(struct of_device *op, | |||
794 | pp = dp->parent; | 817 | pp = dp->parent; |
795 | ip = NULL; | 818 | ip = NULL; |
796 | while (pp) { | 819 | while (pp) { |
797 | void *imap, *imsk; | 820 | const void *imap, *imsk; |
798 | int imlen; | 821 | int imlen; |
799 | 822 | ||
800 | imap = of_get_property(pp, "interrupt-map", &imlen); | 823 | imap = of_get_property(pp, "interrupt-map", &imlen); |
@@ -859,7 +882,7 @@ static struct of_device * __init scan_one_device(struct device_node *dp, | |||
859 | struct device *parent) | 882 | struct device *parent) |
860 | { | 883 | { |
861 | struct of_device *op = kzalloc(sizeof(*op), GFP_KERNEL); | 884 | struct of_device *op = kzalloc(sizeof(*op), GFP_KERNEL); |
862 | unsigned int *irq; | 885 | const unsigned int *irq; |
863 | int len, i; | 886 | int len, i; |
864 | 887 | ||
865 | if (!op) | 888 | if (!op) |
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c index 12109886bb1e..023af41ad68d 100644 --- a/arch/sparc64/kernel/pci.c +++ b/arch/sparc64/kernel/pci.c | |||
@@ -1,9 +1,11 @@ | |||
1 | /* $Id: pci.c,v 1.39 2002/01/05 01:13:43 davem Exp $ | 1 | /* pci.c: UltraSparc PCI controller support. |
2 | * pci.c: UltraSparc PCI controller support. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com) | 3 | * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com) |
5 | * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) | 4 | * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) |
6 | * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz) | 5 | * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz) |
6 | * | ||
7 | * OF tree based PCI bus probing taken from the PowerPC port | ||
8 | * with minor modifications, see there for credits. | ||
7 | */ | 9 | */ |
8 | 10 | ||
9 | #include <linux/module.h> | 11 | #include <linux/module.h> |
@@ -24,6 +26,9 @@ | |||
24 | #include <asm/ebus.h> | 26 | #include <asm/ebus.h> |
25 | #include <asm/isa.h> | 27 | #include <asm/isa.h> |
26 | #include <asm/prom.h> | 28 | #include <asm/prom.h> |
29 | #include <asm/apb.h> | ||
30 | |||
31 | #include "pci_impl.h" | ||
27 | 32 | ||
28 | unsigned long pci_memspace_mask = 0xffffffffUL; | 33 | unsigned long pci_memspace_mask = 0xffffffffUL; |
29 | 34 | ||
@@ -277,10 +282,10 @@ int __init pcic_present(void) | |||
277 | return pci_controller_scan(pci_is_controller); | 282 | return pci_controller_scan(pci_is_controller); |
278 | } | 283 | } |
279 | 284 | ||
280 | struct pci_iommu_ops *pci_iommu_ops; | 285 | const struct pci_iommu_ops *pci_iommu_ops; |
281 | EXPORT_SYMBOL(pci_iommu_ops); | 286 | EXPORT_SYMBOL(pci_iommu_ops); |
282 | 287 | ||
283 | extern struct pci_iommu_ops pci_sun4u_iommu_ops, | 288 | extern const struct pci_iommu_ops pci_sun4u_iommu_ops, |
284 | pci_sun4v_iommu_ops; | 289 | pci_sun4v_iommu_ops; |
285 | 290 | ||
286 | /* Find each controller in the system, attach and initialize | 291 | /* Find each controller in the system, attach and initialize |
@@ -300,6 +305,467 @@ static void __init pci_controller_probe(void) | |||
300 | pci_controller_scan(pci_controller_init); | 305 | pci_controller_scan(pci_controller_init); |
301 | } | 306 | } |
302 | 307 | ||
308 | static unsigned long pci_parse_of_flags(u32 addr0) | ||
309 | { | ||
310 | unsigned long flags = 0; | ||
311 | |||
312 | if (addr0 & 0x02000000) { | ||
313 | flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY; | ||
314 | flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64; | ||
315 | flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M; | ||
316 | if (addr0 & 0x40000000) | ||
317 | flags |= IORESOURCE_PREFETCH | ||
318 | | PCI_BASE_ADDRESS_MEM_PREFETCH; | ||
319 | } else if (addr0 & 0x01000000) | ||
320 | flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO; | ||
321 | return flags; | ||
322 | } | ||
323 | |||
324 | /* The of_device layer has translated all of the assigned-address properties | ||
325 | * into physical address resources, we only have to figure out the register | ||
326 | * mapping. | ||
327 | */ | ||
328 | static void pci_parse_of_addrs(struct of_device *op, | ||
329 | struct device_node *node, | ||
330 | struct pci_dev *dev) | ||
331 | { | ||
332 | struct resource *op_res; | ||
333 | const u32 *addrs; | ||
334 | int proplen; | ||
335 | |||
336 | addrs = of_get_property(node, "assigned-addresses", &proplen); | ||
337 | if (!addrs) | ||
338 | return; | ||
339 | printk(" parse addresses (%d bytes) @ %p\n", proplen, addrs); | ||
340 | op_res = &op->resource[0]; | ||
341 | for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) { | ||
342 | struct resource *res; | ||
343 | unsigned long flags; | ||
344 | int i; | ||
345 | |||
346 | flags = pci_parse_of_flags(addrs[0]); | ||
347 | if (!flags) | ||
348 | continue; | ||
349 | i = addrs[0] & 0xff; | ||
350 | printk(" start: %lx, end: %lx, i: %x\n", | ||
351 | op_res->start, op_res->end, i); | ||
352 | |||
353 | if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { | ||
354 | res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; | ||
355 | } else if (i == dev->rom_base_reg) { | ||
356 | res = &dev->resource[PCI_ROM_RESOURCE]; | ||
357 | flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE; | ||
358 | } else { | ||
359 | printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); | ||
360 | continue; | ||
361 | } | ||
362 | res->start = op_res->start; | ||
363 | res->end = op_res->end; | ||
364 | res->flags = flags; | ||
365 | res->name = pci_name(dev); | ||
366 | } | ||
367 | } | ||
368 | |||
369 | struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, | ||
370 | struct device_node *node, | ||
371 | struct pci_bus *bus, int devfn, | ||
372 | int host_controller) | ||
373 | { | ||
374 | struct dev_archdata *sd; | ||
375 | struct pci_dev *dev; | ||
376 | const char *type; | ||
377 | u32 class; | ||
378 | |||
379 | dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL); | ||
380 | if (!dev) | ||
381 | return NULL; | ||
382 | |||
383 | sd = &dev->dev.archdata; | ||
384 | sd->iommu = pbm->iommu; | ||
385 | sd->stc = &pbm->stc; | ||
386 | sd->host_controller = pbm; | ||
387 | sd->prom_node = node; | ||
388 | sd->op = of_find_device_by_node(node); | ||
389 | sd->msi_num = 0xffffffff; | ||
390 | |||
391 | type = of_get_property(node, "device_type", NULL); | ||
392 | if (type == NULL) | ||
393 | type = ""; | ||
394 | |||
395 | printk(" create device, devfn: %x, type: %s hostcontroller(%d)\n", | ||
396 | devfn, type, host_controller); | ||
397 | |||
398 | dev->bus = bus; | ||
399 | dev->sysdata = node; | ||
400 | dev->dev.parent = bus->bridge; | ||
401 | dev->dev.bus = &pci_bus_type; | ||
402 | dev->devfn = devfn; | ||
403 | dev->multifunction = 0; /* maybe a lie? */ | ||
404 | |||
405 | if (host_controller) { | ||
406 | dev->vendor = 0x108e; | ||
407 | dev->device = 0x8000; | ||
408 | dev->subsystem_vendor = 0x0000; | ||
409 | dev->subsystem_device = 0x0000; | ||
410 | dev->cfg_size = 256; | ||
411 | dev->class = PCI_CLASS_BRIDGE_HOST << 8; | ||
412 | sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus), | ||
413 | 0x00, PCI_SLOT(devfn), PCI_FUNC(devfn)); | ||
414 | } else { | ||
415 | dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff); | ||
416 | dev->device = of_getintprop_default(node, "device-id", 0xffff); | ||
417 | dev->subsystem_vendor = | ||
418 | of_getintprop_default(node, "subsystem-vendor-id", 0); | ||
419 | dev->subsystem_device = | ||
420 | of_getintprop_default(node, "subsystem-id", 0); | ||
421 | |||
422 | dev->cfg_size = pci_cfg_space_size(dev); | ||
423 | |||
424 | /* We can't actually use the firmware value, we have | ||
425 | * to read what is in the register right now. One | ||
426 | * reason is that in the case of IDE interfaces the | ||
427 | * firmware can sample the value before the the IDE | ||
428 | * interface is programmed into native mode. | ||
429 | */ | ||
430 | pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); | ||
431 | dev->class = class >> 8; | ||
432 | |||
433 | sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus), | ||
434 | dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); | ||
435 | } | ||
436 | printk(" class: 0x%x device name: %s\n", | ||
437 | dev->class, pci_name(dev)); | ||
438 | |||
439 | dev->current_state = 4; /* unknown power state */ | ||
440 | dev->error_state = pci_channel_io_normal; | ||
441 | |||
442 | if (host_controller) { | ||
443 | dev->hdr_type = PCI_HEADER_TYPE_BRIDGE; | ||
444 | dev->rom_base_reg = PCI_ROM_ADDRESS1; | ||
445 | dev->irq = PCI_IRQ_NONE; | ||
446 | } else { | ||
447 | if (!strcmp(type, "pci") || !strcmp(type, "pciex")) { | ||
448 | /* a PCI-PCI bridge */ | ||
449 | dev->hdr_type = PCI_HEADER_TYPE_BRIDGE; | ||
450 | dev->rom_base_reg = PCI_ROM_ADDRESS1; | ||
451 | } else if (!strcmp(type, "cardbus")) { | ||
452 | dev->hdr_type = PCI_HEADER_TYPE_CARDBUS; | ||
453 | } else { | ||
454 | dev->hdr_type = PCI_HEADER_TYPE_NORMAL; | ||
455 | dev->rom_base_reg = PCI_ROM_ADDRESS; | ||
456 | |||
457 | dev->irq = sd->op->irqs[0]; | ||
458 | if (dev->irq == 0xffffffff) | ||
459 | dev->irq = PCI_IRQ_NONE; | ||
460 | } | ||
461 | } | ||
462 | pci_parse_of_addrs(sd->op, node, dev); | ||
463 | |||
464 | printk(" adding to system ...\n"); | ||
465 | |||
466 | pci_device_add(dev, bus); | ||
467 | |||
468 | return dev; | ||
469 | } | ||
470 | |||
471 | static void __init apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p) | ||
472 | { | ||
473 | u32 idx, first, last; | ||
474 | |||
475 | first = 8; | ||
476 | last = 0; | ||
477 | for (idx = 0; idx < 8; idx++) { | ||
478 | if ((map & (1 << idx)) != 0) { | ||
479 | if (first > idx) | ||
480 | first = idx; | ||
481 | if (last < idx) | ||
482 | last = idx; | ||
483 | } | ||
484 | } | ||
485 | |||
486 | *first_p = first; | ||
487 | *last_p = last; | ||
488 | } | ||
489 | |||
490 | static void __init pci_resource_adjust(struct resource *res, | ||
491 | struct resource *root) | ||
492 | { | ||
493 | res->start += root->start; | ||
494 | res->end += root->start; | ||
495 | } | ||
496 | |||
497 | /* Cook up fake bus resources for SUNW,simba PCI bridges which lack | ||
498 | * a proper 'ranges' property. | ||
499 | */ | ||
500 | static void __init apb_fake_ranges(struct pci_dev *dev, | ||
501 | struct pci_bus *bus, | ||
502 | struct pci_pbm_info *pbm) | ||
503 | { | ||
504 | struct resource *res; | ||
505 | u32 first, last; | ||
506 | u8 map; | ||
507 | |||
508 | pci_read_config_byte(dev, APB_IO_ADDRESS_MAP, &map); | ||
509 | apb_calc_first_last(map, &first, &last); | ||
510 | res = bus->resource[0]; | ||
511 | res->start = (first << 21); | ||
512 | res->end = (last << 21) + ((1 << 21) - 1); | ||
513 | res->flags = IORESOURCE_IO; | ||
514 | pci_resource_adjust(res, &pbm->io_space); | ||
515 | |||
516 | pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map); | ||
517 | apb_calc_first_last(map, &first, &last); | ||
518 | res = bus->resource[1]; | ||
519 | res->start = (first << 21); | ||
520 | res->end = (last << 21) + ((1 << 21) - 1); | ||
521 | res->flags = IORESOURCE_MEM; | ||
522 | pci_resource_adjust(res, &pbm->mem_space); | ||
523 | } | ||
524 | |||
525 | static void __init pci_of_scan_bus(struct pci_pbm_info *pbm, | ||
526 | struct device_node *node, | ||
527 | struct pci_bus *bus); | ||
528 | |||
529 | #define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1]) | ||
530 | |||
531 | void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm, | ||
532 | struct device_node *node, | ||
533 | struct pci_dev *dev) | ||
534 | { | ||
535 | struct pci_bus *bus; | ||
536 | const u32 *busrange, *ranges; | ||
537 | int len, i, simba; | ||
538 | struct resource *res; | ||
539 | unsigned int flags; | ||
540 | u64 size; | ||
541 | |||
542 | printk("of_scan_pci_bridge(%s)\n", node->full_name); | ||
543 | |||
544 | /* parse bus-range property */ | ||
545 | busrange = of_get_property(node, "bus-range", &len); | ||
546 | if (busrange == NULL || len != 8) { | ||
547 | printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n", | ||
548 | node->full_name); | ||
549 | return; | ||
550 | } | ||
551 | ranges = of_get_property(node, "ranges", &len); | ||
552 | simba = 0; | ||
553 | if (ranges == NULL) { | ||
554 | const char *model = of_get_property(node, "model", NULL); | ||
555 | if (model && !strcmp(model, "SUNW,simba")) { | ||
556 | simba = 1; | ||
557 | } else { | ||
558 | printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n", | ||
559 | node->full_name); | ||
560 | return; | ||
561 | } | ||
562 | } | ||
563 | |||
564 | bus = pci_add_new_bus(dev->bus, dev, busrange[0]); | ||
565 | if (!bus) { | ||
566 | printk(KERN_ERR "Failed to create pci bus for %s\n", | ||
567 | node->full_name); | ||
568 | return; | ||
569 | } | ||
570 | |||
571 | bus->primary = dev->bus->number; | ||
572 | bus->subordinate = busrange[1]; | ||
573 | bus->bridge_ctl = 0; | ||
574 | |||
575 | /* parse ranges property, or cook one up by hand for Simba */ | ||
576 | /* PCI #address-cells == 3 and #size-cells == 2 always */ | ||
577 | res = &dev->resource[PCI_BRIDGE_RESOURCES]; | ||
578 | for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) { | ||
579 | res->flags = 0; | ||
580 | bus->resource[i] = res; | ||
581 | ++res; | ||
582 | } | ||
583 | if (simba) { | ||
584 | apb_fake_ranges(dev, bus, pbm); | ||
585 | goto simba_cont; | ||
586 | } | ||
587 | i = 1; | ||
588 | for (; len >= 32; len -= 32, ranges += 8) { | ||
589 | struct resource *root; | ||
590 | |||
591 | flags = pci_parse_of_flags(ranges[0]); | ||
592 | size = GET_64BIT(ranges, 6); | ||
593 | if (flags == 0 || size == 0) | ||
594 | continue; | ||
595 | if (flags & IORESOURCE_IO) { | ||
596 | res = bus->resource[0]; | ||
597 | if (res->flags) { | ||
598 | printk(KERN_ERR "PCI: ignoring extra I/O range" | ||
599 | " for bridge %s\n", node->full_name); | ||
600 | continue; | ||
601 | } | ||
602 | root = &pbm->io_space; | ||
603 | } else { | ||
604 | if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) { | ||
605 | printk(KERN_ERR "PCI: too many memory ranges" | ||
606 | " for bridge %s\n", node->full_name); | ||
607 | continue; | ||
608 | } | ||
609 | res = bus->resource[i]; | ||
610 | ++i; | ||
611 | root = &pbm->mem_space; | ||
612 | } | ||
613 | |||
614 | res->start = GET_64BIT(ranges, 1); | ||
615 | res->end = res->start + size - 1; | ||
616 | res->flags = flags; | ||
617 | |||
618 | /* Another way to implement this would be to add an of_device | ||
619 | * layer routine that can calculate a resource for a given | ||
620 | * range property value in a PCI device. | ||
621 | */ | ||
622 | pci_resource_adjust(res, root); | ||
623 | } | ||
624 | simba_cont: | ||
625 | sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), | ||
626 | bus->number); | ||
627 | printk(" bus name: %s\n", bus->name); | ||
628 | |||
629 | pci_of_scan_bus(pbm, node, bus); | ||
630 | } | ||
631 | |||
632 | static void __init pci_of_scan_bus(struct pci_pbm_info *pbm, | ||
633 | struct device_node *node, | ||
634 | struct pci_bus *bus) | ||
635 | { | ||
636 | struct device_node *child; | ||
637 | const u32 *reg; | ||
638 | int reglen, devfn; | ||
639 | struct pci_dev *dev; | ||
640 | |||
641 | printk("PCI: scan_bus[%s] bus no %d\n", | ||
642 | node->full_name, bus->number); | ||
643 | |||
644 | child = NULL; | ||
645 | while ((child = of_get_next_child(node, child)) != NULL) { | ||
646 | printk(" * %s\n", child->full_name); | ||
647 | reg = of_get_property(child, "reg", ®len); | ||
648 | if (reg == NULL || reglen < 20) | ||
649 | continue; | ||
650 | devfn = (reg[0] >> 8) & 0xff; | ||
651 | |||
652 | /* create a new pci_dev for this device */ | ||
653 | dev = of_create_pci_dev(pbm, child, bus, devfn, 0); | ||
654 | if (!dev) | ||
655 | continue; | ||
656 | printk("PCI: dev header type: %x\n", dev->hdr_type); | ||
657 | |||
658 | if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || | ||
659 | dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) | ||
660 | of_scan_pci_bridge(pbm, child, dev); | ||
661 | } | ||
662 | } | ||
663 | |||
664 | static ssize_t | ||
665 | show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char * buf) | ||
666 | { | ||
667 | struct pci_dev *pdev; | ||
668 | struct device_node *dp; | ||
669 | |||
670 | pdev = to_pci_dev(dev); | ||
671 | dp = pdev->dev.archdata.prom_node; | ||
672 | |||
673 | return snprintf (buf, PAGE_SIZE, "%s\n", dp->full_name); | ||
674 | } | ||
675 | |||
676 | static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL); | ||
677 | |||
678 | static void __devinit pci_bus_register_of_sysfs(struct pci_bus *bus) | ||
679 | { | ||
680 | struct pci_dev *dev; | ||
681 | struct pci_bus *child_bus; | ||
682 | int err; | ||
683 | |||
684 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
685 | /* we don't really care if we can create this file or | ||
686 | * not, but we need to assign the result of the call | ||
687 | * or the world will fall under alien invasion and | ||
688 | * everybody will be frozen on a spaceship ready to be | ||
689 | * eaten on alpha centauri by some green and jelly | ||
690 | * humanoid. | ||
691 | */ | ||
692 | err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr); | ||
693 | } | ||
694 | list_for_each_entry(child_bus, &bus->children, node) | ||
695 | pci_bus_register_of_sysfs(child_bus); | ||
696 | } | ||
697 | |||
698 | int pci_host_bridge_read_pci_cfg(struct pci_bus *bus_dev, | ||
699 | unsigned int devfn, | ||
700 | int where, int size, | ||
701 | u32 *value) | ||
702 | { | ||
703 | static u8 fake_pci_config[] = { | ||
704 | 0x8e, 0x10, /* Vendor: 0x108e (Sun) */ | ||
705 | 0x00, 0x80, /* Device: 0x8000 (PBM) */ | ||
706 | 0x46, 0x01, /* Command: 0x0146 (SERR, PARITY, MASTER, MEM) */ | ||
707 | 0xa0, 0x22, /* Status: 0x02a0 (DEVSEL_MED, FB2B, 66MHZ) */ | ||
708 | 0x00, 0x00, 0x00, 0x06, /* Class: 0x06000000 host bridge */ | ||
709 | 0x00, /* Cacheline: 0x00 */ | ||
710 | 0x40, /* Latency: 0x40 */ | ||
711 | 0x00, /* Header-Type: 0x00 normal */ | ||
712 | }; | ||
713 | |||
714 | *value = 0; | ||
715 | if (where >= 0 && where < sizeof(fake_pci_config) && | ||
716 | (where + size) >= 0 && | ||
717 | (where + size) < sizeof(fake_pci_config) && | ||
718 | size <= sizeof(u32)) { | ||
719 | while (size--) { | ||
720 | *value <<= 8; | ||
721 | *value |= fake_pci_config[where + size]; | ||
722 | } | ||
723 | } | ||
724 | |||
725 | return PCIBIOS_SUCCESSFUL; | ||
726 | } | ||
727 | |||
728 | int pci_host_bridge_write_pci_cfg(struct pci_bus *bus_dev, | ||
729 | unsigned int devfn, | ||
730 | int where, int size, | ||
731 | u32 value) | ||
732 | { | ||
733 | return PCIBIOS_SUCCESSFUL; | ||
734 | } | ||
735 | |||
736 | struct pci_bus * __init pci_scan_one_pbm(struct pci_pbm_info *pbm) | ||
737 | { | ||
738 | struct pci_controller_info *p = pbm->parent; | ||
739 | struct device_node *node = pbm->prom_node; | ||
740 | struct pci_dev *host_pdev; | ||
741 | struct pci_bus *bus; | ||
742 | |||
743 | printk("PCI: Scanning PBM %s\n", node->full_name); | ||
744 | |||
745 | /* XXX parent device? XXX */ | ||
746 | bus = pci_create_bus(NULL, pbm->pci_first_busno, p->pci_ops, pbm); | ||
747 | if (!bus) { | ||
748 | printk(KERN_ERR "Failed to create bus for %s\n", | ||
749 | node->full_name); | ||
750 | return NULL; | ||
751 | } | ||
752 | bus->secondary = pbm->pci_first_busno; | ||
753 | bus->subordinate = pbm->pci_last_busno; | ||
754 | |||
755 | bus->resource[0] = &pbm->io_space; | ||
756 | bus->resource[1] = &pbm->mem_space; | ||
757 | |||
758 | /* Create the dummy host bridge and link it in. */ | ||
759 | host_pdev = of_create_pci_dev(pbm, node, bus, 0x00, 1); | ||
760 | bus->self = host_pdev; | ||
761 | |||
762 | pci_of_scan_bus(pbm, node, bus); | ||
763 | pci_bus_add_devices(bus); | ||
764 | pci_bus_register_of_sysfs(bus); | ||
765 | |||
766 | return bus; | ||
767 | } | ||
768 | |||
303 | static void __init pci_scan_each_controller_bus(void) | 769 | static void __init pci_scan_each_controller_bus(void) |
304 | { | 770 | { |
305 | struct pci_controller_info *p; | 771 | struct pci_controller_info *p; |
@@ -360,8 +826,33 @@ void pcibios_align_resource(void *data, struct resource *res, | |||
360 | { | 826 | { |
361 | } | 827 | } |
362 | 828 | ||
363 | int pcibios_enable_device(struct pci_dev *pdev, int mask) | 829 | int pcibios_enable_device(struct pci_dev *dev, int mask) |
364 | { | 830 | { |
831 | u16 cmd, oldcmd; | ||
832 | int i; | ||
833 | |||
834 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | ||
835 | oldcmd = cmd; | ||
836 | |||
837 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | ||
838 | struct resource *res = &dev->resource[i]; | ||
839 | |||
840 | /* Only set up the requested stuff */ | ||
841 | if (!(mask & (1<<i))) | ||
842 | continue; | ||
843 | |||
844 | if (res->flags & IORESOURCE_IO) | ||
845 | cmd |= PCI_COMMAND_IO; | ||
846 | if (res->flags & IORESOURCE_MEM) | ||
847 | cmd |= PCI_COMMAND_MEMORY; | ||
848 | } | ||
849 | |||
850 | if (cmd != oldcmd) { | ||
851 | printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n", | ||
852 | pci_name(dev), cmd); | ||
853 | /* Enable the appropriate bits in the PCI command register. */ | ||
854 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
855 | } | ||
365 | return 0; | 856 | return 0; |
366 | } | 857 | } |
367 | 858 | ||
@@ -380,7 +871,7 @@ void pcibios_resource_to_bus(struct pci_dev *pdev, struct pci_bus_region *region | |||
380 | else | 871 | else |
381 | root = &pbm->mem_space; | 872 | root = &pbm->mem_space; |
382 | 873 | ||
383 | pbm->parent->resource_adjust(pdev, &zero_res, root); | 874 | pci_resource_adjust(&zero_res, root); |
384 | 875 | ||
385 | region->start = res->start - zero_res.start; | 876 | region->start = res->start - zero_res.start; |
386 | region->end = res->end - zero_res.start; | 877 | region->end = res->end - zero_res.start; |
@@ -401,7 +892,7 @@ void pcibios_bus_to_resource(struct pci_dev *pdev, struct resource *res, | |||
401 | else | 892 | else |
402 | root = &pbm->mem_space; | 893 | root = &pbm->mem_space; |
403 | 894 | ||
404 | pbm->parent->resource_adjust(pdev, res, root); | 895 | pci_resource_adjust(res, root); |
405 | } | 896 | } |
406 | EXPORT_SYMBOL(pcibios_bus_to_resource); | 897 | EXPORT_SYMBOL(pcibios_bus_to_resource); |
407 | 898 | ||
@@ -422,55 +913,17 @@ char * __devinit pcibios_setup(char *str) | |||
422 | static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma, | 913 | static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma, |
423 | enum pci_mmap_state mmap_state) | 914 | enum pci_mmap_state mmap_state) |
424 | { | 915 | { |
425 | struct pcidev_cookie *pcp = pdev->sysdata; | 916 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; |
426 | struct pci_pbm_info *pbm; | ||
427 | struct pci_controller_info *p; | 917 | struct pci_controller_info *p; |
428 | unsigned long space_size, user_offset, user_size; | 918 | unsigned long space_size, user_offset, user_size; |
429 | 919 | ||
430 | if (!pcp) | ||
431 | return -ENXIO; | ||
432 | pbm = pcp->pbm; | ||
433 | if (!pbm) | ||
434 | return -ENXIO; | ||
435 | |||
436 | p = pbm->parent; | 920 | p = pbm->parent; |
437 | if (p->pbms_same_domain) { | 921 | if (mmap_state == pci_mmap_io) { |
438 | unsigned long lowest, highest; | 922 | space_size = (pbm->io_space.end - |
439 | 923 | pbm->io_space.start) + 1; | |
440 | lowest = ~0UL; highest = 0UL; | ||
441 | if (mmap_state == pci_mmap_io) { | ||
442 | if (p->pbm_A.io_space.flags) { | ||
443 | lowest = p->pbm_A.io_space.start; | ||
444 | highest = p->pbm_A.io_space.end + 1; | ||
445 | } | ||
446 | if (p->pbm_B.io_space.flags) { | ||
447 | if (lowest > p->pbm_B.io_space.start) | ||
448 | lowest = p->pbm_B.io_space.start; | ||
449 | if (highest < p->pbm_B.io_space.end + 1) | ||
450 | highest = p->pbm_B.io_space.end + 1; | ||
451 | } | ||
452 | space_size = highest - lowest; | ||
453 | } else { | ||
454 | if (p->pbm_A.mem_space.flags) { | ||
455 | lowest = p->pbm_A.mem_space.start; | ||
456 | highest = p->pbm_A.mem_space.end + 1; | ||
457 | } | ||
458 | if (p->pbm_B.mem_space.flags) { | ||
459 | if (lowest > p->pbm_B.mem_space.start) | ||
460 | lowest = p->pbm_B.mem_space.start; | ||
461 | if (highest < p->pbm_B.mem_space.end + 1) | ||
462 | highest = p->pbm_B.mem_space.end + 1; | ||
463 | } | ||
464 | space_size = highest - lowest; | ||
465 | } | ||
466 | } else { | 924 | } else { |
467 | if (mmap_state == pci_mmap_io) { | 925 | space_size = (pbm->mem_space.end - |
468 | space_size = (pbm->io_space.end - | 926 | pbm->mem_space.start) + 1; |
469 | pbm->io_space.start) + 1; | ||
470 | } else { | ||
471 | space_size = (pbm->mem_space.end - | ||
472 | pbm->mem_space.start) + 1; | ||
473 | } | ||
474 | } | 927 | } |
475 | 928 | ||
476 | /* Make sure the request is in range. */ | 929 | /* Make sure the request is in range. */ |
@@ -481,31 +934,12 @@ static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struc | |||
481 | (user_offset + user_size) > space_size) | 934 | (user_offset + user_size) > space_size) |
482 | return -EINVAL; | 935 | return -EINVAL; |
483 | 936 | ||
484 | if (p->pbms_same_domain) { | 937 | if (mmap_state == pci_mmap_io) { |
485 | unsigned long lowest = ~0UL; | 938 | vma->vm_pgoff = (pbm->io_space.start + |
486 | 939 | user_offset) >> PAGE_SHIFT; | |
487 | if (mmap_state == pci_mmap_io) { | ||
488 | if (p->pbm_A.io_space.flags) | ||
489 | lowest = p->pbm_A.io_space.start; | ||
490 | if (p->pbm_B.io_space.flags && | ||
491 | lowest > p->pbm_B.io_space.start) | ||
492 | lowest = p->pbm_B.io_space.start; | ||
493 | } else { | ||
494 | if (p->pbm_A.mem_space.flags) | ||
495 | lowest = p->pbm_A.mem_space.start; | ||
496 | if (p->pbm_B.mem_space.flags && | ||
497 | lowest > p->pbm_B.mem_space.start) | ||
498 | lowest = p->pbm_B.mem_space.start; | ||
499 | } | ||
500 | vma->vm_pgoff = (lowest + user_offset) >> PAGE_SHIFT; | ||
501 | } else { | 940 | } else { |
502 | if (mmap_state == pci_mmap_io) { | 941 | vma->vm_pgoff = (pbm->mem_space.start + |
503 | vma->vm_pgoff = (pbm->io_space.start + | 942 | user_offset) >> PAGE_SHIFT; |
504 | user_offset) >> PAGE_SHIFT; | ||
505 | } else { | ||
506 | vma->vm_pgoff = (pbm->mem_space.start + | ||
507 | user_offset) >> PAGE_SHIFT; | ||
508 | } | ||
509 | } | 943 | } |
510 | 944 | ||
511 | return 0; | 945 | return 0; |
@@ -639,9 +1073,8 @@ int pci_domain_nr(struct pci_bus *pbus) | |||
639 | struct pci_controller_info *p = pbm->parent; | 1073 | struct pci_controller_info *p = pbm->parent; |
640 | 1074 | ||
641 | ret = p->index; | 1075 | ret = p->index; |
642 | if (p->pbms_same_domain == 0) | 1076 | ret = ((ret << 1) + |
643 | ret = ((ret << 1) + | 1077 | ((pbm == &pbm->parent->pbm_B) ? 1 : 0)); |
644 | ((pbm == &pbm->parent->pbm_B) ? 1 : 0)); | ||
645 | } | 1078 | } |
646 | 1079 | ||
647 | return ret; | 1080 | return ret; |
@@ -651,8 +1084,7 @@ EXPORT_SYMBOL(pci_domain_nr); | |||
651 | #ifdef CONFIG_PCI_MSI | 1084 | #ifdef CONFIG_PCI_MSI |
652 | int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) | 1085 | int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) |
653 | { | 1086 | { |
654 | struct pcidev_cookie *pcp = pdev->sysdata; | 1087 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; |
655 | struct pci_pbm_info *pbm = pcp->pbm; | ||
656 | struct pci_controller_info *p = pbm->parent; | 1088 | struct pci_controller_info *p = pbm->parent; |
657 | int virt_irq, err; | 1089 | int virt_irq, err; |
658 | 1090 | ||
@@ -670,8 +1102,7 @@ void arch_teardown_msi_irq(unsigned int virt_irq) | |||
670 | { | 1102 | { |
671 | struct msi_desc *entry = get_irq_msi(virt_irq); | 1103 | struct msi_desc *entry = get_irq_msi(virt_irq); |
672 | struct pci_dev *pdev = entry->dev; | 1104 | struct pci_dev *pdev = entry->dev; |
673 | struct pcidev_cookie *pcp = pdev->sysdata; | 1105 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; |
674 | struct pci_pbm_info *pbm = pcp->pbm; | ||
675 | struct pci_controller_info *p = pbm->parent; | 1106 | struct pci_controller_info *p = pbm->parent; |
676 | 1107 | ||
677 | if (!pbm->msi_num || !p->setup_msi_irq) | 1108 | if (!pbm->msi_num || !p->setup_msi_irq) |
@@ -683,9 +1114,7 @@ void arch_teardown_msi_irq(unsigned int virt_irq) | |||
683 | 1114 | ||
684 | struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) | 1115 | struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) |
685 | { | 1116 | { |
686 | struct pcidev_cookie *pc = pdev->sysdata; | 1117 | return pdev->dev.archdata.prom_node; |
687 | |||
688 | return pc->op->node; | ||
689 | } | 1118 | } |
690 | EXPORT_SYMBOL(pci_device_to_OF_node); | 1119 | EXPORT_SYMBOL(pci_device_to_OF_node); |
691 | 1120 | ||
diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c index 5a92cb90ebe0..1e6aeedf43c4 100644 --- a/arch/sparc64/kernel/pci_common.c +++ b/arch/sparc64/kernel/pci_common.c | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: pci_common.c,v 1.29 2002/02/01 00:56:03 davem Exp $ | 1 | /* pci_common.c: PCI controller common support. |
2 | * pci_common.c: PCI controller common support. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1999 David S. Miller (davem@redhat.com) | 3 | * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net) |
5 | */ | 4 | */ |
6 | 5 | ||
7 | #include <linux/string.h> | 6 | #include <linux/string.h> |
@@ -16,748 +15,137 @@ | |||
16 | 15 | ||
17 | #include "pci_impl.h" | 16 | #include "pci_impl.h" |
18 | 17 | ||
19 | /* Fix self device of BUS and hook it into BUS->self. | 18 | static void pci_register_legacy_regions(struct resource *io_res, |
20 | * The pci_scan_bus does not do this for the host bridge. | 19 | struct resource *mem_res) |
21 | */ | ||
22 | void __init pci_fixup_host_bridge_self(struct pci_bus *pbus) | ||
23 | { | ||
24 | struct pci_dev *pdev; | ||
25 | |||
26 | list_for_each_entry(pdev, &pbus->devices, bus_list) { | ||
27 | if (pdev->class >> 8 == PCI_CLASS_BRIDGE_HOST) { | ||
28 | pbus->self = pdev; | ||
29 | return; | ||
30 | } | ||
31 | } | ||
32 | |||
33 | prom_printf("PCI: Critical error, cannot find host bridge PDEV.\n"); | ||
34 | prom_halt(); | ||
35 | } | ||
36 | |||
37 | /* Find the OBP PROM device tree node for a PCI device. */ | ||
38 | static struct device_node * __init | ||
39 | find_device_prom_node(struct pci_pbm_info *pbm, struct pci_dev *pdev, | ||
40 | struct device_node *bus_node, | ||
41 | struct linux_prom_pci_registers **pregs, | ||
42 | int *nregs) | ||
43 | { | 20 | { |
44 | struct device_node *dp; | 21 | struct resource *p; |
45 | |||
46 | *nregs = 0; | ||
47 | |||
48 | /* | ||
49 | * Return the PBM's PROM node in case we are it's PCI device, | ||
50 | * as the PBM's reg property is different to standard PCI reg | ||
51 | * properties. We would delete this device entry otherwise, | ||
52 | * which confuses XFree86's device probing... | ||
53 | */ | ||
54 | if ((pdev->bus->number == pbm->pci_bus->number) && (pdev->devfn == 0) && | ||
55 | (pdev->vendor == PCI_VENDOR_ID_SUN) && | ||
56 | (pdev->device == PCI_DEVICE_ID_SUN_PBM || | ||
57 | pdev->device == PCI_DEVICE_ID_SUN_SCHIZO || | ||
58 | pdev->device == PCI_DEVICE_ID_SUN_TOMATILLO || | ||
59 | pdev->device == PCI_DEVICE_ID_SUN_SABRE || | ||
60 | pdev->device == PCI_DEVICE_ID_SUN_HUMMINGBIRD)) | ||
61 | return bus_node; | ||
62 | |||
63 | dp = bus_node->child; | ||
64 | while (dp) { | ||
65 | struct linux_prom_pci_registers *regs; | ||
66 | struct property *prop; | ||
67 | int len; | ||
68 | |||
69 | prop = of_find_property(dp, "reg", &len); | ||
70 | if (!prop) | ||
71 | goto do_next_sibling; | ||
72 | |||
73 | regs = prop->value; | ||
74 | if (((regs[0].phys_hi >> 8) & 0xff) == pdev->devfn) { | ||
75 | *pregs = regs; | ||
76 | *nregs = len / sizeof(struct linux_prom_pci_registers); | ||
77 | return dp; | ||
78 | } | ||
79 | |||
80 | do_next_sibling: | ||
81 | dp = dp->sibling; | ||
82 | } | ||
83 | |||
84 | return NULL; | ||
85 | } | ||
86 | 22 | ||
87 | /* Older versions of OBP on PCI systems encode 64-bit MEM | 23 | /* VGA Video RAM. */ |
88 | * space assignments incorrectly, this fixes them up. We also | 24 | p = kzalloc(sizeof(*p), GFP_KERNEL); |
89 | * take the opportunity here to hide other kinds of bogus | 25 | if (!p) |
90 | * assignments. | ||
91 | */ | ||
92 | static void __init fixup_obp_assignments(struct pci_dev *pdev, | ||
93 | struct pcidev_cookie *pcp) | ||
94 | { | ||
95 | int i; | ||
96 | |||
97 | if (pdev->vendor == PCI_VENDOR_ID_AL && | ||
98 | (pdev->device == PCI_DEVICE_ID_AL_M7101 || | ||
99 | pdev->device == PCI_DEVICE_ID_AL_M1533)) { | ||
100 | int i; | ||
101 | |||
102 | /* Zap all of the normal resources, they are | ||
103 | * meaningless and generate bogus resource collision | ||
104 | * messages. This is OpenBoot's ill-fated attempt to | ||
105 | * represent the implicit resources that these devices | ||
106 | * have. | ||
107 | */ | ||
108 | pcp->num_prom_assignments = 0; | ||
109 | for (i = 0; i < 6; i++) { | ||
110 | pdev->resource[i].start = | ||
111 | pdev->resource[i].end = | ||
112 | pdev->resource[i].flags = 0; | ||
113 | } | ||
114 | pdev->resource[PCI_ROM_RESOURCE].start = | ||
115 | pdev->resource[PCI_ROM_RESOURCE].end = | ||
116 | pdev->resource[PCI_ROM_RESOURCE].flags = 0; | ||
117 | return; | 26 | return; |
118 | } | ||
119 | |||
120 | for (i = 0; i < pcp->num_prom_assignments; i++) { | ||
121 | struct linux_prom_pci_registers *ap; | ||
122 | int space; | ||
123 | 27 | ||
124 | ap = &pcp->prom_assignments[i]; | 28 | p->name = "Video RAM area"; |
125 | space = ap->phys_hi >> 24; | 29 | p->start = mem_res->start + 0xa0000UL; |
126 | if ((space & 0x3) == 2 && | 30 | p->end = p->start + 0x1ffffUL; |
127 | (space & 0x4) != 0) { | 31 | p->flags = IORESOURCE_BUSY; |
128 | ap->phys_hi &= ~(0x7 << 24); | 32 | request_resource(mem_res, p); |
129 | ap->phys_hi |= 0x3 << 24; | ||
130 | } | ||
131 | } | ||
132 | } | ||
133 | |||
134 | static ssize_t | ||
135 | show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char * buf) | ||
136 | { | ||
137 | struct pci_dev *pdev; | ||
138 | struct pcidev_cookie *sysdata; | ||
139 | |||
140 | pdev = to_pci_dev(dev); | ||
141 | sysdata = pdev->sysdata; | ||
142 | |||
143 | return snprintf (buf, PAGE_SIZE, "%s\n", sysdata->prom_node->full_name); | ||
144 | } | ||
145 | |||
146 | static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL); | ||
147 | 33 | ||
148 | /* Fill in the PCI device cookie sysdata for the given | 34 | p = kzalloc(sizeof(*p), GFP_KERNEL); |
149 | * PCI device. This cookie is the means by which one | 35 | if (!p) |
150 | * can get to OBP and PCI controller specific information | ||
151 | * for a PCI device. | ||
152 | */ | ||
153 | static void __init pdev_cookie_fillin(struct pci_pbm_info *pbm, | ||
154 | struct pci_dev *pdev, | ||
155 | struct device_node *bus_node) | ||
156 | { | ||
157 | struct linux_prom_pci_registers *pregs = NULL; | ||
158 | struct pcidev_cookie *pcp; | ||
159 | struct device_node *dp; | ||
160 | struct property *prop; | ||
161 | int nregs, len, err; | ||
162 | |||
163 | dp = find_device_prom_node(pbm, pdev, bus_node, | ||
164 | &pregs, &nregs); | ||
165 | if (!dp) { | ||
166 | /* If it is not in the OBP device tree then | ||
167 | * there must be a damn good reason for it. | ||
168 | * | ||
169 | * So what we do is delete the device from the | ||
170 | * PCI device tree completely. This scenario | ||
171 | * is seen, for example, on CP1500 for the | ||
172 | * second EBUS/HappyMeal pair if the external | ||
173 | * connector for it is not present. | ||
174 | */ | ||
175 | pci_remove_bus_device(pdev); | ||
176 | return; | 36 | return; |
177 | } | ||
178 | |||
179 | pcp = kzalloc(sizeof(*pcp), GFP_ATOMIC); | ||
180 | if (pcp == NULL) { | ||
181 | prom_printf("PCI_COOKIE: Fatal malloc error, aborting...\n"); | ||
182 | prom_halt(); | ||
183 | } | ||
184 | pcp->pbm = pbm; | ||
185 | pcp->prom_node = dp; | ||
186 | pcp->op = of_find_device_by_node(dp); | ||
187 | memcpy(pcp->prom_regs, pregs, | ||
188 | nregs * sizeof(struct linux_prom_pci_registers)); | ||
189 | pcp->num_prom_regs = nregs; | ||
190 | |||
191 | /* We can't have the pcidev_cookie assignments be just | ||
192 | * direct pointers into the property value, since they | ||
193 | * are potentially modified by the probing process. | ||
194 | */ | ||
195 | prop = of_find_property(dp, "assigned-addresses", &len); | ||
196 | if (!prop) { | ||
197 | pcp->num_prom_assignments = 0; | ||
198 | } else { | ||
199 | memcpy(pcp->prom_assignments, prop->value, len); | ||
200 | pcp->num_prom_assignments = | ||
201 | (len / sizeof(pcp->prom_assignments[0])); | ||
202 | } | ||
203 | |||
204 | if (strcmp(dp->name, "ebus") == 0) { | ||
205 | struct linux_prom_ebus_ranges *erng; | ||
206 | int iter; | ||
207 | |||
208 | /* EBUS is special... */ | ||
209 | prop = of_find_property(dp, "ranges", &len); | ||
210 | if (!prop) { | ||
211 | prom_printf("EBUS: Fatal error, no range property\n"); | ||
212 | prom_halt(); | ||
213 | } | ||
214 | erng = prop->value; | ||
215 | len = (len / sizeof(erng[0])); | ||
216 | for (iter = 0; iter < len; iter++) { | ||
217 | struct linux_prom_ebus_ranges *ep = &erng[iter]; | ||
218 | struct linux_prom_pci_registers *ap; | ||
219 | |||
220 | ap = &pcp->prom_assignments[iter]; | ||
221 | |||
222 | ap->phys_hi = ep->parent_phys_hi; | ||
223 | ap->phys_mid = ep->parent_phys_mid; | ||
224 | ap->phys_lo = ep->parent_phys_lo; | ||
225 | ap->size_hi = 0; | ||
226 | ap->size_lo = ep->size; | ||
227 | } | ||
228 | pcp->num_prom_assignments = len; | ||
229 | } | ||
230 | |||
231 | fixup_obp_assignments(pdev, pcp); | ||
232 | |||
233 | pdev->sysdata = pcp; | ||
234 | |||
235 | /* we don't really care if we can create this file or not, | ||
236 | * but we need to assign the result of the call or the world will fall | ||
237 | * under alien invasion and everybody will be frozen on a spaceship | ||
238 | * ready to be eaten on alpha centauri by some green and jelly humanoid. | ||
239 | */ | ||
240 | err = sysfs_create_file(&pdev->dev.kobj, &dev_attr_obppath.attr); | ||
241 | } | ||
242 | |||
243 | void __init pci_fill_in_pbm_cookies(struct pci_bus *pbus, | ||
244 | struct pci_pbm_info *pbm, | ||
245 | struct device_node *dp) | ||
246 | { | ||
247 | struct pci_dev *pdev, *pdev_next; | ||
248 | struct pci_bus *this_pbus, *pbus_next; | ||
249 | |||
250 | /* This must be _safe because the cookie fillin | ||
251 | routine can delete devices from the tree. */ | ||
252 | list_for_each_entry_safe(pdev, pdev_next, &pbus->devices, bus_list) | ||
253 | pdev_cookie_fillin(pbm, pdev, dp); | ||
254 | |||
255 | list_for_each_entry_safe(this_pbus, pbus_next, &pbus->children, node) { | ||
256 | struct pcidev_cookie *pcp = this_pbus->self->sysdata; | ||
257 | |||
258 | pci_fill_in_pbm_cookies(this_pbus, pbm, pcp->prom_node); | ||
259 | } | ||
260 | } | ||
261 | 37 | ||
262 | static void __init bad_assignment(struct pci_dev *pdev, | 38 | p->name = "System ROM"; |
263 | struct linux_prom_pci_registers *ap, | 39 | p->start = mem_res->start + 0xf0000UL; |
264 | struct resource *res, | 40 | p->end = p->start + 0xffffUL; |
265 | int do_prom_halt) | 41 | p->flags = IORESOURCE_BUSY; |
266 | { | 42 | request_resource(mem_res, p); |
267 | prom_printf("PCI: Bogus PROM assignment. BUS[%02x] DEVFN[%x]\n", | ||
268 | pdev->bus->number, pdev->devfn); | ||
269 | if (ap) | ||
270 | prom_printf("PCI: phys[%08x:%08x:%08x] size[%08x:%08x]\n", | ||
271 | ap->phys_hi, ap->phys_mid, ap->phys_lo, | ||
272 | ap->size_hi, ap->size_lo); | ||
273 | if (res) | ||
274 | prom_printf("PCI: RES[%016lx-->%016lx:(%lx)]\n", | ||
275 | res->start, res->end, res->flags); | ||
276 | if (do_prom_halt) | ||
277 | prom_halt(); | ||
278 | } | ||
279 | |||
280 | static struct resource * | ||
281 | __init get_root_resource(struct linux_prom_pci_registers *ap, | ||
282 | struct pci_pbm_info *pbm) | ||
283 | { | ||
284 | int space = (ap->phys_hi >> 24) & 3; | ||
285 | |||
286 | switch (space) { | ||
287 | case 0: | ||
288 | /* Configuration space, silently ignore it. */ | ||
289 | return NULL; | ||
290 | |||
291 | case 1: | ||
292 | /* 16-bit IO space */ | ||
293 | return &pbm->io_space; | ||
294 | |||
295 | case 2: | ||
296 | /* 32-bit MEM space */ | ||
297 | return &pbm->mem_space; | ||
298 | |||
299 | case 3: | ||
300 | /* 64-bit MEM space, these are allocated out of | ||
301 | * the 32-bit mem_space range for the PBM, ie. | ||
302 | * we just zero out the upper 32-bits. | ||
303 | */ | ||
304 | return &pbm->mem_space; | ||
305 | |||
306 | default: | ||
307 | printk("PCI: What is resource space %x?\n", space); | ||
308 | return NULL; | ||
309 | }; | ||
310 | } | ||
311 | |||
312 | static struct resource * | ||
313 | __init get_device_resource(struct linux_prom_pci_registers *ap, | ||
314 | struct pci_dev *pdev) | ||
315 | { | ||
316 | struct resource *res; | ||
317 | int breg = (ap->phys_hi & 0xff); | ||
318 | |||
319 | switch (breg) { | ||
320 | case PCI_ROM_ADDRESS: | ||
321 | /* Unfortunately I have seen several cases where | ||
322 | * buggy FCODE uses a space value of '1' (I/O space) | ||
323 | * in the register property for the ROM address | ||
324 | * so disable this sanity check for now. | ||
325 | */ | ||
326 | #if 0 | ||
327 | { | ||
328 | int space = (ap->phys_hi >> 24) & 3; | ||
329 | |||
330 | /* It had better be MEM space. */ | ||
331 | if (space != 2) | ||
332 | bad_assignment(pdev, ap, NULL, 0); | ||
333 | } | ||
334 | #endif | ||
335 | res = &pdev->resource[PCI_ROM_RESOURCE]; | ||
336 | break; | ||
337 | |||
338 | case PCI_BASE_ADDRESS_0: | ||
339 | case PCI_BASE_ADDRESS_1: | ||
340 | case PCI_BASE_ADDRESS_2: | ||
341 | case PCI_BASE_ADDRESS_3: | ||
342 | case PCI_BASE_ADDRESS_4: | ||
343 | case PCI_BASE_ADDRESS_5: | ||
344 | res = &pdev->resource[(breg - PCI_BASE_ADDRESS_0) / 4]; | ||
345 | break; | ||
346 | |||
347 | default: | ||
348 | bad_assignment(pdev, ap, NULL, 0); | ||
349 | res = NULL; | ||
350 | break; | ||
351 | }; | ||
352 | |||
353 | return res; | ||
354 | } | ||
355 | |||
356 | static void __init pdev_record_assignments(struct pci_pbm_info *pbm, | ||
357 | struct pci_dev *pdev) | ||
358 | { | ||
359 | struct pcidev_cookie *pcp = pdev->sysdata; | ||
360 | int i; | ||
361 | |||
362 | for (i = 0; i < pcp->num_prom_assignments; i++) { | ||
363 | struct linux_prom_pci_registers *ap; | ||
364 | struct resource *root, *res; | ||
365 | |||
366 | /* The format of this property is specified in | ||
367 | * the PCI Bus Binding to IEEE1275-1994. | ||
368 | */ | ||
369 | ap = &pcp->prom_assignments[i]; | ||
370 | root = get_root_resource(ap, pbm); | ||
371 | res = get_device_resource(ap, pdev); | ||
372 | if (root == NULL || res == NULL || | ||
373 | res->flags == 0) | ||
374 | continue; | ||
375 | |||
376 | /* Ok we know which resource this PROM assignment is | ||
377 | * for, sanity check it. | ||
378 | */ | ||
379 | if ((res->start & 0xffffffffUL) != ap->phys_lo) | ||
380 | bad_assignment(pdev, ap, res, 1); | ||
381 | |||
382 | /* If it is a 64-bit MEM space assignment, verify that | ||
383 | * the resource is too and that the upper 32-bits match. | ||
384 | */ | ||
385 | if (((ap->phys_hi >> 24) & 3) == 3) { | ||
386 | if (((res->flags & IORESOURCE_MEM) == 0) || | ||
387 | ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) | ||
388 | != PCI_BASE_ADDRESS_MEM_TYPE_64)) | ||
389 | bad_assignment(pdev, ap, res, 1); | ||
390 | if ((res->start >> 32) != ap->phys_mid) | ||
391 | bad_assignment(pdev, ap, res, 1); | ||
392 | |||
393 | /* PBM cannot generate cpu initiated PIOs | ||
394 | * to the full 64-bit space. Therefore the | ||
395 | * upper 32-bits better be zero. If it is | ||
396 | * not, just skip it and we will assign it | ||
397 | * properly ourselves. | ||
398 | */ | ||
399 | if ((res->start >> 32) != 0UL) { | ||
400 | printk(KERN_ERR "PCI: OBP assigns out of range MEM address " | ||
401 | "%016lx for region %ld on device %s\n", | ||
402 | res->start, (res - &pdev->resource[0]), pci_name(pdev)); | ||
403 | continue; | ||
404 | } | ||
405 | } | ||
406 | |||
407 | /* Adjust the resource into the physical address space | ||
408 | * of this PBM. | ||
409 | */ | ||
410 | pbm->parent->resource_adjust(pdev, res, root); | ||
411 | |||
412 | if (request_resource(root, res) < 0) { | ||
413 | int rnum; | ||
414 | |||
415 | /* OK, there is some conflict. But this is fine | ||
416 | * since we'll reassign it in the fixup pass. | ||
417 | * | ||
418 | * Do not print the warning for ROM resources | ||
419 | * as such a conflict is quite common and | ||
420 | * harmless as the ROM bar is disabled. | ||
421 | */ | ||
422 | rnum = (res - &pdev->resource[0]); | ||
423 | if (rnum != PCI_ROM_RESOURCE) | ||
424 | printk(KERN_ERR "PCI: Resource collision, " | ||
425 | "region %d " | ||
426 | "[%016lx:%016lx] of device %s\n", | ||
427 | rnum, | ||
428 | res->start, res->end, | ||
429 | pci_name(pdev)); | ||
430 | } | ||
431 | } | ||
432 | } | ||
433 | |||
434 | void __init pci_record_assignments(struct pci_pbm_info *pbm, | ||
435 | struct pci_bus *pbus) | ||
436 | { | ||
437 | struct pci_dev *dev; | ||
438 | struct pci_bus *bus; | ||
439 | 43 | ||
440 | list_for_each_entry(dev, &pbus->devices, bus_list) | 44 | p = kzalloc(sizeof(*p), GFP_KERNEL); |
441 | pdev_record_assignments(pbm, dev); | 45 | if (!p) |
46 | return; | ||
442 | 47 | ||
443 | list_for_each_entry(bus, &pbus->children, node) | 48 | p->name = "Video ROM"; |
444 | pci_record_assignments(pbm, bus); | 49 | p->start = mem_res->start + 0xc0000UL; |
50 | p->end = p->start + 0x7fffUL; | ||
51 | p->flags = IORESOURCE_BUSY; | ||
52 | request_resource(mem_res, p); | ||
445 | } | 53 | } |
446 | 54 | ||
447 | /* Return non-zero if PDEV has implicit I/O resources even | 55 | static void pci_register_iommu_region(struct pci_pbm_info *pbm) |
448 | * though it may not have an I/O base address register | ||
449 | * active. | ||
450 | */ | ||
451 | static int __init has_implicit_io(struct pci_dev *pdev) | ||
452 | { | 56 | { |
453 | int class = pdev->class >> 8; | 57 | const u32 *vdma = of_get_property(pbm->prom_node, "virtual-dma", NULL); |
454 | 58 | ||
455 | if (class == PCI_CLASS_NOT_DEFINED || | 59 | if (vdma) { |
456 | class == PCI_CLASS_NOT_DEFINED_VGA || | 60 | struct resource *rp = kmalloc(sizeof(*rp), GFP_KERNEL); |
457 | class == PCI_CLASS_STORAGE_IDE || | ||
458 | (pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) | ||
459 | return 1; | ||
460 | 61 | ||
461 | return 0; | 62 | if (!rp) { |
462 | } | 63 | prom_printf("Cannot allocate IOMMU resource.\n"); |
463 | |||
464 | static void __init pdev_assign_unassigned(struct pci_pbm_info *pbm, | ||
465 | struct pci_dev *pdev) | ||
466 | { | ||
467 | u32 reg; | ||
468 | u16 cmd; | ||
469 | int i, io_seen, mem_seen; | ||
470 | |||
471 | io_seen = mem_seen = 0; | ||
472 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | ||
473 | struct resource *root, *res; | ||
474 | unsigned long size, min, max, align; | ||
475 | |||
476 | res = &pdev->resource[i]; | ||
477 | |||
478 | if (res->flags & IORESOURCE_IO) | ||
479 | io_seen++; | ||
480 | else if (res->flags & IORESOURCE_MEM) | ||
481 | mem_seen++; | ||
482 | |||
483 | /* If it is already assigned or the resource does | ||
484 | * not exist, there is nothing to do. | ||
485 | */ | ||
486 | if (res->parent != NULL || res->flags == 0UL) | ||
487 | continue; | ||
488 | |||
489 | /* Determine the root we allocate from. */ | ||
490 | if (res->flags & IORESOURCE_IO) { | ||
491 | root = &pbm->io_space; | ||
492 | min = root->start + 0x400UL; | ||
493 | max = root->end; | ||
494 | } else { | ||
495 | root = &pbm->mem_space; | ||
496 | min = root->start; | ||
497 | max = min + 0x80000000UL; | ||
498 | } | ||
499 | |||
500 | size = res->end - res->start; | ||
501 | align = size + 1; | ||
502 | if (allocate_resource(root, res, size + 1, min, max, align, NULL, NULL) < 0) { | ||
503 | /* uh oh */ | ||
504 | prom_printf("PCI: Failed to allocate resource %d for %s\n", | ||
505 | i, pci_name(pdev)); | ||
506 | prom_halt(); | 64 | prom_halt(); |
507 | } | 65 | } |
508 | 66 | rp->name = "IOMMU"; | |
509 | /* Update PCI config space. */ | 67 | rp->start = pbm->mem_space.start + (unsigned long) vdma[0]; |
510 | pbm->parent->base_address_update(pdev, i); | 68 | rp->end = rp->start + (unsigned long) vdma[1] - 1UL; |
511 | } | 69 | rp->flags = IORESOURCE_BUSY; |
512 | 70 | request_resource(&pbm->mem_space, rp); | |
513 | /* Special case, disable the ROM. Several devices | ||
514 | * act funny (ie. do not respond to memory space writes) | ||
515 | * when it is left enabled. A good example are Qlogic,ISP | ||
516 | * adapters. | ||
517 | */ | ||
518 | pci_read_config_dword(pdev, PCI_ROM_ADDRESS, ®); | ||
519 | reg &= ~PCI_ROM_ADDRESS_ENABLE; | ||
520 | pci_write_config_dword(pdev, PCI_ROM_ADDRESS, reg); | ||
521 | |||
522 | /* If we saw I/O or MEM resources, enable appropriate | ||
523 | * bits in PCI command register. | ||
524 | */ | ||
525 | if (io_seen || mem_seen) { | ||
526 | pci_read_config_word(pdev, PCI_COMMAND, &cmd); | ||
527 | if (io_seen || has_implicit_io(pdev)) | ||
528 | cmd |= PCI_COMMAND_IO; | ||
529 | if (mem_seen) | ||
530 | cmd |= PCI_COMMAND_MEMORY; | ||
531 | pci_write_config_word(pdev, PCI_COMMAND, cmd); | ||
532 | } | ||
533 | |||
534 | /* If this is a PCI bridge or an IDE controller, | ||
535 | * enable bus mastering. In the former case also | ||
536 | * set the cache line size correctly. | ||
537 | */ | ||
538 | if (((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI) || | ||
539 | (((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) && | ||
540 | ((pdev->class & 0x80) != 0))) { | ||
541 | pci_read_config_word(pdev, PCI_COMMAND, &cmd); | ||
542 | cmd |= PCI_COMMAND_MASTER; | ||
543 | pci_write_config_word(pdev, PCI_COMMAND, cmd); | ||
544 | |||
545 | if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI) | ||
546 | pci_write_config_byte(pdev, | ||
547 | PCI_CACHE_LINE_SIZE, | ||
548 | (64 / sizeof(u32))); | ||
549 | } | 71 | } |
550 | } | 72 | } |
551 | 73 | ||
552 | void __init pci_assign_unassigned(struct pci_pbm_info *pbm, | 74 | void pci_determine_mem_io_space(struct pci_pbm_info *pbm) |
553 | struct pci_bus *pbus) | ||
554 | { | 75 | { |
555 | struct pci_dev *dev; | 76 | const struct linux_prom_pci_ranges *pbm_ranges; |
556 | struct pci_bus *bus; | 77 | int i, saw_mem, saw_io; |
557 | 78 | int num_pbm_ranges; | |
558 | list_for_each_entry(dev, &pbus->devices, bus_list) | ||
559 | pdev_assign_unassigned(pbm, dev); | ||
560 | 79 | ||
561 | list_for_each_entry(bus, &pbus->children, node) | 80 | saw_mem = saw_io = 0; |
562 | pci_assign_unassigned(pbm, bus); | 81 | pbm_ranges = of_get_property(pbm->prom_node, "ranges", &i); |
563 | } | 82 | num_pbm_ranges = i / sizeof(*pbm_ranges); |
564 | 83 | ||
565 | static void __init pdev_fixup_irq(struct pci_dev *pdev) | 84 | for (i = 0; i < num_pbm_ranges; i++) { |
566 | { | 85 | const struct linux_prom_pci_ranges *pr = &pbm_ranges[i]; |
567 | struct pcidev_cookie *pcp = pdev->sysdata; | 86 | unsigned long a; |
568 | struct of_device *op = pcp->op; | 87 | u32 parent_phys_hi, parent_phys_lo; |
88 | int type; | ||
569 | 89 | ||
570 | if (op->irqs[0] == 0xffffffff) { | 90 | parent_phys_hi = pr->parent_phys_hi; |
571 | pdev->irq = PCI_IRQ_NONE; | 91 | parent_phys_lo = pr->parent_phys_lo; |
572 | return; | 92 | if (tlb_type == hypervisor) |
573 | } | 93 | parent_phys_hi &= 0x0fffffff; |
574 | 94 | ||
575 | pdev->irq = op->irqs[0]; | 95 | type = (pr->child_phys_hi >> 24) & 0x3; |
96 | a = (((unsigned long)parent_phys_hi << 32UL) | | ||
97 | ((unsigned long)parent_phys_lo << 0UL)); | ||
576 | 98 | ||
577 | pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, | 99 | switch (type) { |
578 | pdev->irq & PCI_IRQ_INO); | 100 | case 0: |
579 | } | 101 | /* PCI config space, 16MB */ |
580 | 102 | pbm->config_space = a; | |
581 | void __init pci_fixup_irq(struct pci_pbm_info *pbm, | 103 | break; |
582 | struct pci_bus *pbus) | ||
583 | { | ||
584 | struct pci_dev *dev; | ||
585 | struct pci_bus *bus; | ||
586 | |||
587 | list_for_each_entry(dev, &pbus->devices, bus_list) | ||
588 | pdev_fixup_irq(dev); | ||
589 | |||
590 | list_for_each_entry(bus, &pbus->children, node) | ||
591 | pci_fixup_irq(pbm, bus); | ||
592 | } | ||
593 | |||
594 | static void pdev_setup_busmastering(struct pci_dev *pdev, int is_66mhz) | ||
595 | { | ||
596 | u16 cmd; | ||
597 | u8 hdr_type, min_gnt, ltimer; | ||
598 | |||
599 | pci_read_config_word(pdev, PCI_COMMAND, &cmd); | ||
600 | cmd |= PCI_COMMAND_MASTER; | ||
601 | pci_write_config_word(pdev, PCI_COMMAND, cmd); | ||
602 | |||
603 | /* Read it back, if the mastering bit did not | ||
604 | * get set, the device does not support bus | ||
605 | * mastering so we have nothing to do here. | ||
606 | */ | ||
607 | pci_read_config_word(pdev, PCI_COMMAND, &cmd); | ||
608 | if ((cmd & PCI_COMMAND_MASTER) == 0) | ||
609 | return; | ||
610 | |||
611 | /* Set correct cache line size, 64-byte on all | ||
612 | * Sparc64 PCI systems. Note that the value is | ||
613 | * measured in 32-bit words. | ||
614 | */ | ||
615 | pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, | ||
616 | 64 / sizeof(u32)); | ||
617 | |||
618 | pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr_type); | ||
619 | hdr_type &= ~0x80; | ||
620 | if (hdr_type != PCI_HEADER_TYPE_NORMAL) | ||
621 | return; | ||
622 | |||
623 | /* If the latency timer is already programmed with a non-zero | ||
624 | * value, assume whoever set it (OBP or whoever) knows what | ||
625 | * they are doing. | ||
626 | */ | ||
627 | pci_read_config_byte(pdev, PCI_LATENCY_TIMER, <imer); | ||
628 | if (ltimer != 0) | ||
629 | return; | ||
630 | |||
631 | /* XXX Since I'm tipping off the min grant value to | ||
632 | * XXX choose a suitable latency timer value, I also | ||
633 | * XXX considered making use of the max latency value | ||
634 | * XXX as well. Unfortunately I've seen too many bogusly | ||
635 | * XXX low settings for it to the point where it lacks | ||
636 | * XXX any usefulness. In one case, an ethernet card | ||
637 | * XXX claimed a min grant of 10 and a max latency of 5. | ||
638 | * XXX Now, if I had two such cards on the same bus I | ||
639 | * XXX could not set the desired burst period (calculated | ||
640 | * XXX from min grant) without violating the max latency | ||
641 | * XXX bound. Duh... | ||
642 | * XXX | ||
643 | * XXX I blame dumb PC bios implementors for stuff like | ||
644 | * XXX this, most of them don't even try to do something | ||
645 | * XXX sensible with latency timer values and just set some | ||
646 | * XXX default value (usually 32) into every device. | ||
647 | */ | ||
648 | |||
649 | pci_read_config_byte(pdev, PCI_MIN_GNT, &min_gnt); | ||
650 | |||
651 | if (min_gnt == 0) { | ||
652 | /* If no min_gnt setting then use a default | ||
653 | * value. | ||
654 | */ | ||
655 | if (is_66mhz) | ||
656 | ltimer = 16; | ||
657 | else | ||
658 | ltimer = 32; | ||
659 | } else { | ||
660 | int shift_factor; | ||
661 | |||
662 | if (is_66mhz) | ||
663 | shift_factor = 2; | ||
664 | else | ||
665 | shift_factor = 3; | ||
666 | |||
667 | /* Use a default value when the min_gnt value | ||
668 | * is erroneously high. | ||
669 | */ | ||
670 | if (((unsigned int) min_gnt << shift_factor) > 512 || | ||
671 | ((min_gnt << shift_factor) & 0xff) == 0) { | ||
672 | ltimer = 8 << shift_factor; | ||
673 | } else { | ||
674 | ltimer = min_gnt << shift_factor; | ||
675 | } | ||
676 | } | ||
677 | 104 | ||
678 | pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ltimer); | 105 | case 1: |
679 | } | 106 | /* 16-bit IO space, 16MB */ |
107 | pbm->io_space.start = a; | ||
108 | pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL); | ||
109 | pbm->io_space.flags = IORESOURCE_IO; | ||
110 | saw_io = 1; | ||
111 | break; | ||
680 | 112 | ||
681 | void pci_determine_66mhz_disposition(struct pci_pbm_info *pbm, | 113 | case 2: |
682 | struct pci_bus *pbus) | 114 | /* 32-bit MEM space, 2GB */ |
683 | { | 115 | pbm->mem_space.start = a; |
684 | struct pci_dev *pdev; | 116 | pbm->mem_space.end = a + (0x80000000UL - 1UL); |
685 | int all_are_66mhz; | 117 | pbm->mem_space.flags = IORESOURCE_MEM; |
686 | u16 status; | 118 | saw_mem = 1; |
119 | break; | ||
687 | 120 | ||
688 | if (pbm->is_66mhz_capable == 0) { | 121 | case 3: |
689 | all_are_66mhz = 0; | 122 | /* XXX 64-bit MEM handling XXX */ |
690 | goto out; | ||
691 | } | ||
692 | 123 | ||
693 | all_are_66mhz = 1; | 124 | default: |
694 | list_for_each_entry(pdev, &pbus->devices, bus_list) { | ||
695 | pci_read_config_word(pdev, PCI_STATUS, &status); | ||
696 | if (!(status & PCI_STATUS_66MHZ)) { | ||
697 | all_are_66mhz = 0; | ||
698 | break; | 125 | break; |
699 | } | 126 | }; |
700 | } | 127 | } |
701 | out: | ||
702 | pbm->all_devs_66mhz = all_are_66mhz; | ||
703 | |||
704 | printk("PCI%d(PBM%c): Bus running at %dMHz\n", | ||
705 | pbm->parent->index, | ||
706 | (pbm == &pbm->parent->pbm_A) ? 'A' : 'B', | ||
707 | (all_are_66mhz ? 66 : 33)); | ||
708 | } | ||
709 | |||
710 | void pci_setup_busmastering(struct pci_pbm_info *pbm, | ||
711 | struct pci_bus *pbus) | ||
712 | { | ||
713 | struct pci_dev *dev; | ||
714 | struct pci_bus *bus; | ||
715 | int is_66mhz; | ||
716 | |||
717 | is_66mhz = pbm->is_66mhz_capable && pbm->all_devs_66mhz; | ||
718 | |||
719 | list_for_each_entry(dev, &pbus->devices, bus_list) | ||
720 | pdev_setup_busmastering(dev, is_66mhz); | ||
721 | |||
722 | list_for_each_entry(bus, &pbus->children, node) | ||
723 | pci_setup_busmastering(pbm, bus); | ||
724 | } | ||
725 | |||
726 | void pci_register_legacy_regions(struct resource *io_res, | ||
727 | struct resource *mem_res) | ||
728 | { | ||
729 | struct resource *p; | ||
730 | |||
731 | /* VGA Video RAM. */ | ||
732 | p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
733 | if (!p) | ||
734 | return; | ||
735 | 128 | ||
736 | p->name = "Video RAM area"; | 129 | if (!saw_io || !saw_mem) { |
737 | p->start = mem_res->start + 0xa0000UL; | 130 | prom_printf("%s: Fatal error, missing %s PBM range.\n", |
738 | p->end = p->start + 0x1ffffUL; | 131 | pbm->name, |
739 | p->flags = IORESOURCE_BUSY; | 132 | (!saw_io ? "IO" : "MEM")); |
740 | request_resource(mem_res, p); | 133 | prom_halt(); |
134 | } | ||
741 | 135 | ||
742 | p = kzalloc(sizeof(*p), GFP_KERNEL); | 136 | printk("%s: PCI IO[%lx] MEM[%lx]\n", |
743 | if (!p) | 137 | pbm->name, |
744 | return; | 138 | pbm->io_space.start, |
139 | pbm->mem_space.start); | ||
745 | 140 | ||
746 | p->name = "System ROM"; | 141 | pbm->io_space.name = pbm->mem_space.name = pbm->name; |
747 | p->start = mem_res->start + 0xf0000UL; | ||
748 | p->end = p->start + 0xffffUL; | ||
749 | p->flags = IORESOURCE_BUSY; | ||
750 | request_resource(mem_res, p); | ||
751 | 142 | ||
752 | p = kzalloc(sizeof(*p), GFP_KERNEL); | 143 | request_resource(&ioport_resource, &pbm->io_space); |
753 | if (!p) | 144 | request_resource(&iomem_resource, &pbm->mem_space); |
754 | return; | ||
755 | 145 | ||
756 | p->name = "Video ROM"; | 146 | pci_register_legacy_regions(&pbm->io_space, |
757 | p->start = mem_res->start + 0xc0000UL; | 147 | &pbm->mem_space); |
758 | p->end = p->start + 0x7fffUL; | 148 | pci_register_iommu_region(pbm); |
759 | p->flags = IORESOURCE_BUSY; | ||
760 | request_resource(mem_res, p); | ||
761 | } | 149 | } |
762 | 150 | ||
763 | /* Generic helper routines for PCI error reporting. */ | 151 | /* Generic helper routines for PCI error reporting. */ |
diff --git a/arch/sparc64/kernel/pci_impl.h b/arch/sparc64/kernel/pci_impl.h index 971e2bea30b4..1208583fcb83 100644 --- a/arch/sparc64/kernel/pci_impl.h +++ b/arch/sparc64/kernel/pci_impl.h | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: pci_impl.h,v 1.9 2001/06/13 06:34:30 davem Exp $ | 1 | /* pci_impl.h: Helper definitions for PCI controller support. |
2 | * pci_impl.h: Helper definitions for PCI controller support. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1999 David S. Miller (davem@redhat.com) | 3 | * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net) |
5 | */ | 4 | */ |
6 | 5 | ||
7 | #ifndef PCI_IMPL_H | 6 | #ifndef PCI_IMPL_H |
@@ -13,26 +12,22 @@ | |||
13 | #include <asm/prom.h> | 12 | #include <asm/prom.h> |
14 | 13 | ||
15 | extern struct pci_controller_info *pci_controller_root; | 14 | extern struct pci_controller_info *pci_controller_root; |
15 | extern unsigned long pci_memspace_mask; | ||
16 | 16 | ||
17 | extern int pci_num_controllers; | 17 | extern int pci_num_controllers; |
18 | 18 | ||
19 | /* PCI bus scanning and fixup support. */ | 19 | /* PCI bus scanning and fixup support. */ |
20 | extern void pci_fixup_host_bridge_self(struct pci_bus *pbus); | 20 | extern struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm); |
21 | extern void pci_fill_in_pbm_cookies(struct pci_bus *pbus, | 21 | extern void pci_determine_mem_io_space(struct pci_pbm_info *pbm); |
22 | struct pci_pbm_info *pbm, | 22 | |
23 | struct device_node *prom_node); | 23 | extern int pci_host_bridge_read_pci_cfg(struct pci_bus *bus_dev, |
24 | extern void pci_record_assignments(struct pci_pbm_info *pbm, | 24 | unsigned int devfn, |
25 | struct pci_bus *pbus); | 25 | int where, int size, |
26 | extern void pci_assign_unassigned(struct pci_pbm_info *pbm, | 26 | u32 *value); |
27 | struct pci_bus *pbus); | 27 | extern int pci_host_bridge_write_pci_cfg(struct pci_bus *bus_dev, |
28 | extern void pci_fixup_irq(struct pci_pbm_info *pbm, | 28 | unsigned int devfn, |
29 | struct pci_bus *pbus); | 29 | int where, int size, |
30 | extern void pci_determine_66mhz_disposition(struct pci_pbm_info *pbm, | 30 | u32 value); |
31 | struct pci_bus *pbus); | ||
32 | extern void pci_setup_busmastering(struct pci_pbm_info *pbm, | ||
33 | struct pci_bus *pbus); | ||
34 | extern void pci_register_legacy_regions(struct resource *io_res, | ||
35 | struct resource *mem_res); | ||
36 | 31 | ||
37 | /* Error reporting support. */ | 32 | /* Error reporting support. */ |
38 | extern void pci_scan_for_target_abort(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *); | 33 | extern void pci_scan_for_target_abort(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *); |
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c index 7aca0f33f885..66712772f494 100644 --- a/arch/sparc64/kernel/pci_iommu.c +++ b/arch/sparc64/kernel/pci_iommu.c | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: pci_iommu.c,v 1.17 2001/12/17 07:05:09 davem Exp $ | 1 | /* pci_iommu.c: UltraSparc PCI controller IOM/STC support. |
2 | * pci_iommu.c: UltraSparc PCI controller IOM/STC support. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1999 David S. Miller (davem@redhat.com) | 3 | * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net) |
5 | * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com) | 4 | * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com) |
6 | */ | 5 | */ |
7 | 6 | ||
@@ -36,7 +35,7 @@ | |||
36 | "i" (ASI_PHYS_BYPASS_EC_E)) | 35 | "i" (ASI_PHYS_BYPASS_EC_E)) |
37 | 36 | ||
38 | /* Must be invoked under the IOMMU lock. */ | 37 | /* Must be invoked under the IOMMU lock. */ |
39 | static void __iommu_flushall(struct pci_iommu *iommu) | 38 | static void __iommu_flushall(struct iommu *iommu) |
40 | { | 39 | { |
41 | unsigned long tag; | 40 | unsigned long tag; |
42 | int entry; | 41 | int entry; |
@@ -64,7 +63,7 @@ static void __iommu_flushall(struct pci_iommu *iommu) | |||
64 | #define IOPTE_IS_DUMMY(iommu, iopte) \ | 63 | #define IOPTE_IS_DUMMY(iommu, iopte) \ |
65 | ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa) | 64 | ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa) |
66 | 65 | ||
67 | static inline void iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte) | 66 | static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte) |
68 | { | 67 | { |
69 | unsigned long val = iopte_val(*iopte); | 68 | unsigned long val = iopte_val(*iopte); |
70 | 69 | ||
@@ -75,9 +74,9 @@ static inline void iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte) | |||
75 | } | 74 | } |
76 | 75 | ||
77 | /* Based largely upon the ppc64 iommu allocator. */ | 76 | /* Based largely upon the ppc64 iommu allocator. */ |
78 | static long pci_arena_alloc(struct pci_iommu *iommu, unsigned long npages) | 77 | static long pci_arena_alloc(struct iommu *iommu, unsigned long npages) |
79 | { | 78 | { |
80 | struct pci_iommu_arena *arena = &iommu->arena; | 79 | struct iommu_arena *arena = &iommu->arena; |
81 | unsigned long n, i, start, end, limit; | 80 | unsigned long n, i, start, end, limit; |
82 | int pass; | 81 | int pass; |
83 | 82 | ||
@@ -116,7 +115,7 @@ again: | |||
116 | return n; | 115 | return n; |
117 | } | 116 | } |
118 | 117 | ||
119 | static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages) | 118 | static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages) |
120 | { | 119 | { |
121 | unsigned long i; | 120 | unsigned long i; |
122 | 121 | ||
@@ -124,7 +123,7 @@ static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, un | |||
124 | __clear_bit(i, arena->map); | 123 | __clear_bit(i, arena->map); |
125 | } | 124 | } |
126 | 125 | ||
127 | void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask) | 126 | void pci_iommu_table_init(struct iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask) |
128 | { | 127 | { |
129 | unsigned long i, tsbbase, order, sz, num_tsb_entries; | 128 | unsigned long i, tsbbase, order, sz, num_tsb_entries; |
130 | 129 | ||
@@ -170,7 +169,7 @@ void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, | |||
170 | iopte_make_dummy(iommu, &iommu->page_table[i]); | 169 | iopte_make_dummy(iommu, &iommu->page_table[i]); |
171 | } | 170 | } |
172 | 171 | ||
173 | static inline iopte_t *alloc_npages(struct pci_iommu *iommu, unsigned long npages) | 172 | static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages) |
174 | { | 173 | { |
175 | long entry; | 174 | long entry; |
176 | 175 | ||
@@ -181,12 +180,12 @@ static inline iopte_t *alloc_npages(struct pci_iommu *iommu, unsigned long npage | |||
181 | return iommu->page_table + entry; | 180 | return iommu->page_table + entry; |
182 | } | 181 | } |
183 | 182 | ||
184 | static inline void free_npages(struct pci_iommu *iommu, dma_addr_t base, unsigned long npages) | 183 | static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages) |
185 | { | 184 | { |
186 | pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages); | 185 | pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages); |
187 | } | 186 | } |
188 | 187 | ||
189 | static int iommu_alloc_ctx(struct pci_iommu *iommu) | 188 | static int iommu_alloc_ctx(struct iommu *iommu) |
190 | { | 189 | { |
191 | int lowest = iommu->ctx_lowest_free; | 190 | int lowest = iommu->ctx_lowest_free; |
192 | int sz = IOMMU_NUM_CTXS - lowest; | 191 | int sz = IOMMU_NUM_CTXS - lowest; |
@@ -205,7 +204,7 @@ static int iommu_alloc_ctx(struct pci_iommu *iommu) | |||
205 | return n; | 204 | return n; |
206 | } | 205 | } |
207 | 206 | ||
208 | static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx) | 207 | static inline void iommu_free_ctx(struct iommu *iommu, int ctx) |
209 | { | 208 | { |
210 | if (likely(ctx)) { | 209 | if (likely(ctx)) { |
211 | __clear_bit(ctx, iommu->ctx_bitmap); | 210 | __clear_bit(ctx, iommu->ctx_bitmap); |
@@ -220,8 +219,7 @@ static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx) | |||
220 | */ | 219 | */ |
221 | static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) | 220 | static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) |
222 | { | 221 | { |
223 | struct pcidev_cookie *pcp; | 222 | struct iommu *iommu; |
224 | struct pci_iommu *iommu; | ||
225 | iopte_t *iopte; | 223 | iopte_t *iopte; |
226 | unsigned long flags, order, first_page; | 224 | unsigned long flags, order, first_page; |
227 | void *ret; | 225 | void *ret; |
@@ -237,8 +235,7 @@ static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr | |||
237 | return NULL; | 235 | return NULL; |
238 | memset((char *)first_page, 0, PAGE_SIZE << order); | 236 | memset((char *)first_page, 0, PAGE_SIZE << order); |
239 | 237 | ||
240 | pcp = pdev->sysdata; | 238 | iommu = pdev->dev.archdata.iommu; |
241 | iommu = pcp->pbm->iommu; | ||
242 | 239 | ||
243 | spin_lock_irqsave(&iommu->lock, flags); | 240 | spin_lock_irqsave(&iommu->lock, flags); |
244 | iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT); | 241 | iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT); |
@@ -268,14 +265,12 @@ static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr | |||
268 | /* Free and unmap a consistent DMA translation. */ | 265 | /* Free and unmap a consistent DMA translation. */ |
269 | static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) | 266 | static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) |
270 | { | 267 | { |
271 | struct pcidev_cookie *pcp; | 268 | struct iommu *iommu; |
272 | struct pci_iommu *iommu; | ||
273 | iopte_t *iopte; | 269 | iopte_t *iopte; |
274 | unsigned long flags, order, npages; | 270 | unsigned long flags, order, npages; |
275 | 271 | ||
276 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; | 272 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; |
277 | pcp = pdev->sysdata; | 273 | iommu = pdev->dev.archdata.iommu; |
278 | iommu = pcp->pbm->iommu; | ||
279 | iopte = iommu->page_table + | 274 | iopte = iommu->page_table + |
280 | ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | 275 | ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); |
281 | 276 | ||
@@ -295,18 +290,16 @@ static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, | |||
295 | */ | 290 | */ |
296 | static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) | 291 | static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) |
297 | { | 292 | { |
298 | struct pcidev_cookie *pcp; | 293 | struct iommu *iommu; |
299 | struct pci_iommu *iommu; | 294 | struct strbuf *strbuf; |
300 | struct pci_strbuf *strbuf; | ||
301 | iopte_t *base; | 295 | iopte_t *base; |
302 | unsigned long flags, npages, oaddr; | 296 | unsigned long flags, npages, oaddr; |
303 | unsigned long i, base_paddr, ctx; | 297 | unsigned long i, base_paddr, ctx; |
304 | u32 bus_addr, ret; | 298 | u32 bus_addr, ret; |
305 | unsigned long iopte_protection; | 299 | unsigned long iopte_protection; |
306 | 300 | ||
307 | pcp = pdev->sysdata; | 301 | iommu = pdev->dev.archdata.iommu; |
308 | iommu = pcp->pbm->iommu; | 302 | strbuf = pdev->dev.archdata.stc; |
309 | strbuf = &pcp->pbm->stc; | ||
310 | 303 | ||
311 | if (unlikely(direction == PCI_DMA_NONE)) | 304 | if (unlikely(direction == PCI_DMA_NONE)) |
312 | goto bad_no_ctx; | 305 | goto bad_no_ctx; |
@@ -349,7 +342,7 @@ bad_no_ctx: | |||
349 | return PCI_DMA_ERROR_CODE; | 342 | return PCI_DMA_ERROR_CODE; |
350 | } | 343 | } |
351 | 344 | ||
352 | static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction) | 345 | static void pci_strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction) |
353 | { | 346 | { |
354 | int limit; | 347 | int limit; |
355 | 348 | ||
@@ -416,9 +409,8 @@ do_flush_sync: | |||
416 | /* Unmap a single streaming mode DMA translation. */ | 409 | /* Unmap a single streaming mode DMA translation. */ |
417 | static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | 410 | static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) |
418 | { | 411 | { |
419 | struct pcidev_cookie *pcp; | 412 | struct iommu *iommu; |
420 | struct pci_iommu *iommu; | 413 | struct strbuf *strbuf; |
421 | struct pci_strbuf *strbuf; | ||
422 | iopte_t *base; | 414 | iopte_t *base; |
423 | unsigned long flags, npages, ctx, i; | 415 | unsigned long flags, npages, ctx, i; |
424 | 416 | ||
@@ -428,9 +420,8 @@ static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_ | |||
428 | return; | 420 | return; |
429 | } | 421 | } |
430 | 422 | ||
431 | pcp = pdev->sysdata; | 423 | iommu = pdev->dev.archdata.iommu; |
432 | iommu = pcp->pbm->iommu; | 424 | strbuf = pdev->dev.archdata.stc; |
433 | strbuf = &pcp->pbm->stc; | ||
434 | 425 | ||
435 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | 426 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); |
436 | npages >>= IO_PAGE_SHIFT; | 427 | npages >>= IO_PAGE_SHIFT; |
@@ -549,9 +540,8 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, | |||
549 | */ | 540 | */ |
550 | static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | 541 | static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) |
551 | { | 542 | { |
552 | struct pcidev_cookie *pcp; | 543 | struct iommu *iommu; |
553 | struct pci_iommu *iommu; | 544 | struct strbuf *strbuf; |
554 | struct pci_strbuf *strbuf; | ||
555 | unsigned long flags, ctx, npages, iopte_protection; | 545 | unsigned long flags, ctx, npages, iopte_protection; |
556 | iopte_t *base; | 546 | iopte_t *base; |
557 | u32 dma_base; | 547 | u32 dma_base; |
@@ -570,9 +560,8 @@ static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n | |||
570 | return 1; | 560 | return 1; |
571 | } | 561 | } |
572 | 562 | ||
573 | pcp = pdev->sysdata; | 563 | iommu = pdev->dev.archdata.iommu; |
574 | iommu = pcp->pbm->iommu; | 564 | strbuf = pdev->dev.archdata.stc; |
575 | strbuf = &pcp->pbm->stc; | ||
576 | 565 | ||
577 | if (unlikely(direction == PCI_DMA_NONE)) | 566 | if (unlikely(direction == PCI_DMA_NONE)) |
578 | goto bad_no_ctx; | 567 | goto bad_no_ctx; |
@@ -636,9 +625,8 @@ bad_no_ctx: | |||
636 | /* Unmap a set of streaming mode DMA translations. */ | 625 | /* Unmap a set of streaming mode DMA translations. */ |
637 | static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | 626 | static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) |
638 | { | 627 | { |
639 | struct pcidev_cookie *pcp; | 628 | struct iommu *iommu; |
640 | struct pci_iommu *iommu; | 629 | struct strbuf *strbuf; |
641 | struct pci_strbuf *strbuf; | ||
642 | iopte_t *base; | 630 | iopte_t *base; |
643 | unsigned long flags, ctx, i, npages; | 631 | unsigned long flags, ctx, i, npages; |
644 | u32 bus_addr; | 632 | u32 bus_addr; |
@@ -648,9 +636,8 @@ static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in | |||
648 | WARN_ON(1); | 636 | WARN_ON(1); |
649 | } | 637 | } |
650 | 638 | ||
651 | pcp = pdev->sysdata; | 639 | iommu = pdev->dev.archdata.iommu; |
652 | iommu = pcp->pbm->iommu; | 640 | strbuf = pdev->dev.archdata.stc; |
653 | strbuf = &pcp->pbm->stc; | ||
654 | 641 | ||
655 | bus_addr = sglist->dma_address & IO_PAGE_MASK; | 642 | bus_addr = sglist->dma_address & IO_PAGE_MASK; |
656 | 643 | ||
@@ -696,14 +683,12 @@ static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in | |||
696 | */ | 683 | */ |
697 | static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | 684 | static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) |
698 | { | 685 | { |
699 | struct pcidev_cookie *pcp; | 686 | struct iommu *iommu; |
700 | struct pci_iommu *iommu; | 687 | struct strbuf *strbuf; |
701 | struct pci_strbuf *strbuf; | ||
702 | unsigned long flags, ctx, npages; | 688 | unsigned long flags, ctx, npages; |
703 | 689 | ||
704 | pcp = pdev->sysdata; | 690 | iommu = pdev->dev.archdata.iommu; |
705 | iommu = pcp->pbm->iommu; | 691 | strbuf = pdev->dev.archdata.stc; |
706 | strbuf = &pcp->pbm->stc; | ||
707 | 692 | ||
708 | if (!strbuf->strbuf_enabled) | 693 | if (!strbuf->strbuf_enabled) |
709 | return; | 694 | return; |
@@ -736,15 +721,13 @@ static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_ | |||
736 | */ | 721 | */ |
737 | static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | 722 | static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) |
738 | { | 723 | { |
739 | struct pcidev_cookie *pcp; | 724 | struct iommu *iommu; |
740 | struct pci_iommu *iommu; | 725 | struct strbuf *strbuf; |
741 | struct pci_strbuf *strbuf; | ||
742 | unsigned long flags, ctx, npages, i; | 726 | unsigned long flags, ctx, npages, i; |
743 | u32 bus_addr; | 727 | u32 bus_addr; |
744 | 728 | ||
745 | pcp = pdev->sysdata; | 729 | iommu = pdev->dev.archdata.iommu; |
746 | iommu = pcp->pbm->iommu; | 730 | strbuf = pdev->dev.archdata.stc; |
747 | strbuf = &pcp->pbm->stc; | ||
748 | 731 | ||
749 | if (!strbuf->strbuf_enabled) | 732 | if (!strbuf->strbuf_enabled) |
750 | return; | 733 | return; |
@@ -775,7 +758,7 @@ static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist | |||
775 | spin_unlock_irqrestore(&iommu->lock, flags); | 758 | spin_unlock_irqrestore(&iommu->lock, flags); |
776 | } | 759 | } |
777 | 760 | ||
778 | struct pci_iommu_ops pci_sun4u_iommu_ops = { | 761 | const struct pci_iommu_ops pci_sun4u_iommu_ops = { |
779 | .alloc_consistent = pci_4u_alloc_consistent, | 762 | .alloc_consistent = pci_4u_alloc_consistent, |
780 | .free_consistent = pci_4u_free_consistent, | 763 | .free_consistent = pci_4u_free_consistent, |
781 | .map_single = pci_4u_map_single, | 764 | .map_single = pci_4u_map_single, |
@@ -809,13 +792,12 @@ static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit) | |||
809 | 792 | ||
810 | int pci_dma_supported(struct pci_dev *pdev, u64 device_mask) | 793 | int pci_dma_supported(struct pci_dev *pdev, u64 device_mask) |
811 | { | 794 | { |
812 | struct pcidev_cookie *pcp = pdev->sysdata; | ||
813 | u64 dma_addr_mask; | 795 | u64 dma_addr_mask; |
814 | 796 | ||
815 | if (pdev == NULL) { | 797 | if (pdev == NULL) { |
816 | dma_addr_mask = 0xffffffff; | 798 | dma_addr_mask = 0xffffffff; |
817 | } else { | 799 | } else { |
818 | struct pci_iommu *iommu = pcp->pbm->iommu; | 800 | struct iommu *iommu = pdev->dev.archdata.iommu; |
819 | 801 | ||
820 | dma_addr_mask = iommu->dma_addr_mask; | 802 | dma_addr_mask = iommu->dma_addr_mask; |
821 | 803 | ||
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c index fda5db223d96..253d40ec2245 100644 --- a/arch/sparc64/kernel/pci_psycho.c +++ b/arch/sparc64/kernel/pci_psycho.c | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: pci_psycho.c,v 1.33 2002/02/01 00:58:33 davem Exp $ | 1 | /* pci_psycho.c: PSYCHO/U2P specific PCI controller support. |
2 | * pci_psycho.c: PSYCHO/U2P specific PCI controller support. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu) | 3 | * Copyright (C) 1997, 1998, 1999, 2007 David S. Miller (davem@davemloft.net) |
5 | * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) | 4 | * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) |
6 | * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com) | 5 | * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com) |
7 | */ | 6 | */ |
@@ -119,6 +118,10 @@ static int psycho_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, | |||
119 | u16 tmp16; | 118 | u16 tmp16; |
120 | u8 tmp8; | 119 | u8 tmp8; |
121 | 120 | ||
121 | if (bus_dev == pbm->pci_bus && devfn == 0x00) | ||
122 | return pci_host_bridge_read_pci_cfg(bus_dev, devfn, where, | ||
123 | size, value); | ||
124 | |||
122 | switch (size) { | 125 | switch (size) { |
123 | case 1: | 126 | case 1: |
124 | *value = 0xff; | 127 | *value = 0xff; |
@@ -172,6 +175,9 @@ static int psycho_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, | |||
172 | unsigned char bus = bus_dev->number; | 175 | unsigned char bus = bus_dev->number; |
173 | u32 *addr; | 176 | u32 *addr; |
174 | 177 | ||
178 | if (bus_dev == pbm->pci_bus && devfn == 0x00) | ||
179 | return pci_host_bridge_write_pci_cfg(bus_dev, devfn, where, | ||
180 | size, value); | ||
175 | addr = psycho_pci_config_mkaddr(pbm, bus, devfn, where); | 181 | addr = psycho_pci_config_mkaddr(pbm, bus, devfn, where); |
176 | if (!addr) | 182 | if (!addr) |
177 | return PCIBIOS_SUCCESSFUL; | 183 | return PCIBIOS_SUCCESSFUL; |
@@ -263,7 +269,7 @@ static void __psycho_check_one_stc(struct pci_controller_info *p, | |||
263 | struct pci_pbm_info *pbm, | 269 | struct pci_pbm_info *pbm, |
264 | int is_pbm_a) | 270 | int is_pbm_a) |
265 | { | 271 | { |
266 | struct pci_strbuf *strbuf = &pbm->stc; | 272 | struct strbuf *strbuf = &pbm->stc; |
267 | unsigned long regbase = p->pbm_A.controller_regs; | 273 | unsigned long regbase = p->pbm_A.controller_regs; |
268 | unsigned long err_base, tag_base, line_base; | 274 | unsigned long err_base, tag_base, line_base; |
269 | u64 control; | 275 | u64 control; |
@@ -412,7 +418,7 @@ static void psycho_check_iommu_error(struct pci_controller_info *p, | |||
412 | unsigned long afar, | 418 | unsigned long afar, |
413 | enum psycho_error_type type) | 419 | enum psycho_error_type type) |
414 | { | 420 | { |
415 | struct pci_iommu *iommu = p->pbm_A.iommu; | 421 | struct iommu *iommu = p->pbm_A.iommu; |
416 | unsigned long iommu_tag[16]; | 422 | unsigned long iommu_tag[16]; |
417 | unsigned long iommu_data[16]; | 423 | unsigned long iommu_data[16]; |
418 | unsigned long flags; | 424 | unsigned long flags; |
@@ -895,59 +901,6 @@ static void psycho_register_error_handlers(struct pci_controller_info *p) | |||
895 | } | 901 | } |
896 | 902 | ||
897 | /* PSYCHO boot time probing and initialization. */ | 903 | /* PSYCHO boot time probing and initialization. */ |
898 | static void psycho_resource_adjust(struct pci_dev *pdev, | ||
899 | struct resource *res, | ||
900 | struct resource *root) | ||
901 | { | ||
902 | res->start += root->start; | ||
903 | res->end += root->start; | ||
904 | } | ||
905 | |||
906 | static void psycho_base_address_update(struct pci_dev *pdev, int resource) | ||
907 | { | ||
908 | struct pcidev_cookie *pcp = pdev->sysdata; | ||
909 | struct pci_pbm_info *pbm = pcp->pbm; | ||
910 | struct resource *res, *root; | ||
911 | u32 reg; | ||
912 | int where, size, is_64bit; | ||
913 | |||
914 | res = &pdev->resource[resource]; | ||
915 | if (resource < 6) { | ||
916 | where = PCI_BASE_ADDRESS_0 + (resource * 4); | ||
917 | } else if (resource == PCI_ROM_RESOURCE) { | ||
918 | where = pdev->rom_base_reg; | ||
919 | } else { | ||
920 | /* Somebody might have asked allocation of a non-standard resource */ | ||
921 | return; | ||
922 | } | ||
923 | |||
924 | is_64bit = 0; | ||
925 | if (res->flags & IORESOURCE_IO) | ||
926 | root = &pbm->io_space; | ||
927 | else { | ||
928 | root = &pbm->mem_space; | ||
929 | if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) | ||
930 | == PCI_BASE_ADDRESS_MEM_TYPE_64) | ||
931 | is_64bit = 1; | ||
932 | } | ||
933 | |||
934 | size = res->end - res->start; | ||
935 | pci_read_config_dword(pdev, where, ®); | ||
936 | reg = ((reg & size) | | ||
937 | (((u32)(res->start - root->start)) & ~size)); | ||
938 | if (resource == PCI_ROM_RESOURCE) { | ||
939 | reg |= PCI_ROM_ADDRESS_ENABLE; | ||
940 | res->flags |= IORESOURCE_ROM_ENABLE; | ||
941 | } | ||
942 | pci_write_config_dword(pdev, where, reg); | ||
943 | |||
944 | /* This knows that the upper 32-bits of the address | ||
945 | * must be zero. Our PCI common layer enforces this. | ||
946 | */ | ||
947 | if (is_64bit) | ||
948 | pci_write_config_dword(pdev, where + 4, 0); | ||
949 | } | ||
950 | |||
951 | static void pbm_config_busmastering(struct pci_pbm_info *pbm) | 904 | static void pbm_config_busmastering(struct pci_pbm_info *pbm) |
952 | { | 905 | { |
953 | u8 *addr; | 906 | u8 *addr; |
@@ -968,28 +921,7 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm) | |||
968 | static void pbm_scan_bus(struct pci_controller_info *p, | 921 | static void pbm_scan_bus(struct pci_controller_info *p, |
969 | struct pci_pbm_info *pbm) | 922 | struct pci_pbm_info *pbm) |
970 | { | 923 | { |
971 | struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); | 924 | pbm->pci_bus = pci_scan_one_pbm(pbm); |
972 | |||
973 | if (!cookie) { | ||
974 | prom_printf("PSYCHO: Critical allocation failure.\n"); | ||
975 | prom_halt(); | ||
976 | } | ||
977 | |||
978 | /* All we care about is the PBM. */ | ||
979 | cookie->pbm = pbm; | ||
980 | |||
981 | pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, | ||
982 | p->pci_ops, | ||
983 | pbm); | ||
984 | pci_fixup_host_bridge_self(pbm->pci_bus); | ||
985 | pbm->pci_bus->self->sysdata = cookie; | ||
986 | |||
987 | pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node); | ||
988 | pci_record_assignments(pbm, pbm->pci_bus); | ||
989 | pci_assign_unassigned(pbm, pbm->pci_bus); | ||
990 | pci_fixup_irq(pbm, pbm->pci_bus); | ||
991 | pci_determine_66mhz_disposition(pbm, pbm->pci_bus); | ||
992 | pci_setup_busmastering(pbm, pbm->pci_bus); | ||
993 | } | 925 | } |
994 | 926 | ||
995 | static void psycho_scan_bus(struct pci_controller_info *p) | 927 | static void psycho_scan_bus(struct pci_controller_info *p) |
@@ -1009,7 +941,7 @@ static void psycho_scan_bus(struct pci_controller_info *p) | |||
1009 | 941 | ||
1010 | static void psycho_iommu_init(struct pci_controller_info *p) | 942 | static void psycho_iommu_init(struct pci_controller_info *p) |
1011 | { | 943 | { |
1012 | struct pci_iommu *iommu = p->pbm_A.iommu; | 944 | struct iommu *iommu = p->pbm_A.iommu; |
1013 | unsigned long i; | 945 | unsigned long i; |
1014 | u64 control; | 946 | u64 control; |
1015 | 947 | ||
@@ -1094,19 +1026,6 @@ static void psycho_controller_hwinit(struct pci_controller_info *p) | |||
1094 | psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIB_DIAG, tmp); | 1026 | psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIB_DIAG, tmp); |
1095 | } | 1027 | } |
1096 | 1028 | ||
1097 | static void pbm_register_toplevel_resources(struct pci_controller_info *p, | ||
1098 | struct pci_pbm_info *pbm) | ||
1099 | { | ||
1100 | char *name = pbm->name; | ||
1101 | |||
1102 | pbm->io_space.name = pbm->mem_space.name = name; | ||
1103 | |||
1104 | request_resource(&ioport_resource, &pbm->io_space); | ||
1105 | request_resource(&iomem_resource, &pbm->mem_space); | ||
1106 | pci_register_legacy_regions(&pbm->io_space, | ||
1107 | &pbm->mem_space); | ||
1108 | } | ||
1109 | |||
1110 | static void psycho_pbm_strbuf_init(struct pci_controller_info *p, | 1029 | static void psycho_pbm_strbuf_init(struct pci_controller_info *p, |
1111 | struct pci_pbm_info *pbm, | 1030 | struct pci_pbm_info *pbm, |
1112 | int is_pbm_a) | 1031 | int is_pbm_a) |
@@ -1172,19 +1091,11 @@ static void psycho_pbm_init(struct pci_controller_info *p, | |||
1172 | unsigned int *busrange; | 1091 | unsigned int *busrange; |
1173 | struct property *prop; | 1092 | struct property *prop; |
1174 | struct pci_pbm_info *pbm; | 1093 | struct pci_pbm_info *pbm; |
1175 | int len; | ||
1176 | 1094 | ||
1177 | if (is_pbm_a) { | 1095 | if (is_pbm_a) |
1178 | pbm = &p->pbm_A; | 1096 | pbm = &p->pbm_A; |
1179 | pbm->pci_first_slot = 1; | 1097 | else |
1180 | pbm->io_space.start = pbm->controller_regs + PSYCHO_IOSPACE_A; | ||
1181 | pbm->mem_space.start = pbm->controller_regs + PSYCHO_MEMSPACE_A; | ||
1182 | } else { | ||
1183 | pbm = &p->pbm_B; | 1098 | pbm = &p->pbm_B; |
1184 | pbm->pci_first_slot = 2; | ||
1185 | pbm->io_space.start = pbm->controller_regs + PSYCHO_IOSPACE_B; | ||
1186 | pbm->mem_space.start = pbm->controller_regs + PSYCHO_MEMSPACE_B; | ||
1187 | } | ||
1188 | 1099 | ||
1189 | pbm->chip_type = PBM_CHIP_TYPE_PSYCHO; | 1100 | pbm->chip_type = PBM_CHIP_TYPE_PSYCHO; |
1190 | pbm->chip_version = 0; | 1101 | pbm->chip_version = 0; |
@@ -1196,41 +1107,15 @@ static void psycho_pbm_init(struct pci_controller_info *p, | |||
1196 | if (prop) | 1107 | if (prop) |
1197 | pbm->chip_revision = *(int *) prop->value; | 1108 | pbm->chip_revision = *(int *) prop->value; |
1198 | 1109 | ||
1199 | pbm->io_space.end = pbm->io_space.start + PSYCHO_IOSPACE_SIZE; | ||
1200 | pbm->io_space.flags = IORESOURCE_IO; | ||
1201 | pbm->mem_space.end = pbm->mem_space.start + PSYCHO_MEMSPACE_SIZE; | ||
1202 | pbm->mem_space.flags = IORESOURCE_MEM; | ||
1203 | |||
1204 | pbm->parent = p; | 1110 | pbm->parent = p; |
1205 | pbm->prom_node = dp; | 1111 | pbm->prom_node = dp; |
1206 | pbm->name = dp->full_name; | 1112 | pbm->name = dp->full_name; |
1207 | 1113 | ||
1208 | pbm_register_toplevel_resources(p, pbm); | ||
1209 | |||
1210 | printk("%s: PSYCHO PCI Bus Module ver[%x:%x]\n", | 1114 | printk("%s: PSYCHO PCI Bus Module ver[%x:%x]\n", |
1211 | pbm->name, | 1115 | pbm->name, |
1212 | pbm->chip_version, pbm->chip_revision); | 1116 | pbm->chip_version, pbm->chip_revision); |
1213 | 1117 | ||
1214 | prop = of_find_property(dp, "ranges", &len); | 1118 | pci_determine_mem_io_space(pbm); |
1215 | if (prop) { | ||
1216 | pbm->pbm_ranges = prop->value; | ||
1217 | pbm->num_pbm_ranges = | ||
1218 | (len / sizeof(struct linux_prom_pci_ranges)); | ||
1219 | } else { | ||
1220 | pbm->num_pbm_ranges = 0; | ||
1221 | } | ||
1222 | |||
1223 | prop = of_find_property(dp, "interrupt-map", &len); | ||
1224 | if (prop) { | ||
1225 | pbm->pbm_intmap = prop->value; | ||
1226 | pbm->num_pbm_intmap = | ||
1227 | (len / sizeof(struct linux_prom_pci_intmap)); | ||
1228 | |||
1229 | prop = of_find_property(dp, "interrupt-map-mask", NULL); | ||
1230 | pbm->pbm_intmask = prop->value; | ||
1231 | } else { | ||
1232 | pbm->num_pbm_intmap = 0; | ||
1233 | } | ||
1234 | 1119 | ||
1235 | prop = of_find_property(dp, "bus-range", NULL); | 1120 | prop = of_find_property(dp, "bus-range", NULL); |
1236 | busrange = prop->value; | 1121 | busrange = prop->value; |
@@ -1246,7 +1131,7 @@ void psycho_init(struct device_node *dp, char *model_name) | |||
1246 | { | 1131 | { |
1247 | struct linux_prom64_registers *pr_regs; | 1132 | struct linux_prom64_registers *pr_regs; |
1248 | struct pci_controller_info *p; | 1133 | struct pci_controller_info *p; |
1249 | struct pci_iommu *iommu; | 1134 | struct iommu *iommu; |
1250 | struct property *prop; | 1135 | struct property *prop; |
1251 | u32 upa_portid; | 1136 | u32 upa_portid; |
1252 | int is_pbm_a; | 1137 | int is_pbm_a; |
@@ -1269,7 +1154,7 @@ void psycho_init(struct device_node *dp, char *model_name) | |||
1269 | prom_printf("PSYCHO: Fatal memory allocation error.\n"); | 1154 | prom_printf("PSYCHO: Fatal memory allocation error.\n"); |
1270 | prom_halt(); | 1155 | prom_halt(); |
1271 | } | 1156 | } |
1272 | iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | 1157 | iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); |
1273 | if (!iommu) { | 1158 | if (!iommu) { |
1274 | prom_printf("PSYCHO: Fatal memory allocation error.\n"); | 1159 | prom_printf("PSYCHO: Fatal memory allocation error.\n"); |
1275 | prom_halt(); | 1160 | prom_halt(); |
@@ -1282,10 +1167,7 @@ void psycho_init(struct device_node *dp, char *model_name) | |||
1282 | p->pbm_A.portid = upa_portid; | 1167 | p->pbm_A.portid = upa_portid; |
1283 | p->pbm_B.portid = upa_portid; | 1168 | p->pbm_B.portid = upa_portid; |
1284 | p->index = pci_num_controllers++; | 1169 | p->index = pci_num_controllers++; |
1285 | p->pbms_same_domain = 0; | ||
1286 | p->scan_bus = psycho_scan_bus; | 1170 | p->scan_bus = psycho_scan_bus; |
1287 | p->base_address_update = psycho_base_address_update; | ||
1288 | p->resource_adjust = psycho_resource_adjust; | ||
1289 | p->pci_ops = &psycho_ops; | 1171 | p->pci_ops = &psycho_ops; |
1290 | 1172 | ||
1291 | prop = of_find_property(dp, "reg", NULL); | 1173 | prop = of_find_property(dp, "reg", NULL); |
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c index 94bb681f2323..397862fbd9e1 100644 --- a/arch/sparc64/kernel/pci_sabre.c +++ b/arch/sparc64/kernel/pci_sabre.c | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: pci_sabre.c,v 1.42 2002/01/23 11:27:32 davem Exp $ | 1 | /* pci_sabre.c: Sabre specific PCI controller support. |
2 | * pci_sabre.c: Sabre specific PCI controller support. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu) | 3 | * Copyright (C) 1997, 1998, 1999, 2007 David S. Miller (davem@davemloft.net) |
5 | * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) | 4 | * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) |
6 | * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com) | 5 | * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com) |
7 | */ | 6 | */ |
@@ -254,9 +253,6 @@ static int __sabre_out_of_range(struct pci_pbm_info *pbm, | |||
254 | return 0; | 253 | return 0; |
255 | 254 | ||
256 | return ((pbm->parent == 0) || | 255 | return ((pbm->parent == 0) || |
257 | ((pbm == &pbm->parent->pbm_B) && | ||
258 | (bus == pbm->pci_first_busno) && | ||
259 | PCI_SLOT(devfn) > 8) || | ||
260 | ((pbm == &pbm->parent->pbm_A) && | 256 | ((pbm == &pbm->parent->pbm_A) && |
261 | (bus == pbm->pci_first_busno) && | 257 | (bus == pbm->pci_first_busno) && |
262 | PCI_SLOT(devfn) > 8)); | 258 | PCI_SLOT(devfn) > 8)); |
@@ -322,6 +318,12 @@ static int __sabre_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, | |||
322 | static int sabre_read_pci_cfg(struct pci_bus *bus, unsigned int devfn, | 318 | static int sabre_read_pci_cfg(struct pci_bus *bus, unsigned int devfn, |
323 | int where, int size, u32 *value) | 319 | int where, int size, u32 *value) |
324 | { | 320 | { |
321 | struct pci_pbm_info *pbm = bus->sysdata; | ||
322 | |||
323 | if (bus == pbm->pci_bus && devfn == 0x00) | ||
324 | return pci_host_bridge_read_pci_cfg(bus, devfn, where, | ||
325 | size, value); | ||
326 | |||
325 | if (!bus->number && sabre_out_of_range(devfn)) { | 327 | if (!bus->number && sabre_out_of_range(devfn)) { |
326 | switch (size) { | 328 | switch (size) { |
327 | case 1: | 329 | case 1: |
@@ -438,6 +440,12 @@ static int __sabre_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, | |||
438 | static int sabre_write_pci_cfg(struct pci_bus *bus, unsigned int devfn, | 440 | static int sabre_write_pci_cfg(struct pci_bus *bus, unsigned int devfn, |
439 | int where, int size, u32 value) | 441 | int where, int size, u32 value) |
440 | { | 442 | { |
443 | struct pci_pbm_info *pbm = bus->sysdata; | ||
444 | |||
445 | if (bus == pbm->pci_bus && devfn == 0x00) | ||
446 | return pci_host_bridge_write_pci_cfg(bus, devfn, where, | ||
447 | size, value); | ||
448 | |||
441 | if (bus->number) | 449 | if (bus->number) |
442 | return __sabre_write_pci_cfg(bus, devfn, where, size, value); | 450 | return __sabre_write_pci_cfg(bus, devfn, where, size, value); |
443 | 451 | ||
@@ -490,7 +498,7 @@ static void sabre_check_iommu_error(struct pci_controller_info *p, | |||
490 | unsigned long afsr, | 498 | unsigned long afsr, |
491 | unsigned long afar) | 499 | unsigned long afar) |
492 | { | 500 | { |
493 | struct pci_iommu *iommu = p->pbm_A.iommu; | 501 | struct iommu *iommu = p->pbm_A.iommu; |
494 | unsigned long iommu_tag[16]; | 502 | unsigned long iommu_tag[16]; |
495 | unsigned long iommu_data[16]; | 503 | unsigned long iommu_data[16]; |
496 | unsigned long flags; | 504 | unsigned long flags; |
@@ -710,8 +718,8 @@ static irqreturn_t sabre_pcierr_intr_other(struct pci_controller_info *p) | |||
710 | p->index); | 718 | p->index); |
711 | ret = IRQ_HANDLED; | 719 | ret = IRQ_HANDLED; |
712 | } | 720 | } |
713 | pci_read_config_word(sabre_root_bus->self, | 721 | pci_bus_read_config_word(sabre_root_bus, 0, |
714 | PCI_STATUS, &stat); | 722 | PCI_STATUS, &stat); |
715 | if (stat & (PCI_STATUS_PARITY | | 723 | if (stat & (PCI_STATUS_PARITY | |
716 | PCI_STATUS_SIG_TARGET_ABORT | | 724 | PCI_STATUS_SIG_TARGET_ABORT | |
717 | PCI_STATUS_REC_TARGET_ABORT | | 725 | PCI_STATUS_REC_TARGET_ABORT | |
@@ -719,8 +727,8 @@ static irqreturn_t sabre_pcierr_intr_other(struct pci_controller_info *p) | |||
719 | PCI_STATUS_SIG_SYSTEM_ERROR)) { | 727 | PCI_STATUS_SIG_SYSTEM_ERROR)) { |
720 | printk("SABRE%d: PCI bus error, PCI_STATUS[%04x]\n", | 728 | printk("SABRE%d: PCI bus error, PCI_STATUS[%04x]\n", |
721 | p->index, stat); | 729 | p->index, stat); |
722 | pci_write_config_word(sabre_root_bus->self, | 730 | pci_bus_write_config_word(sabre_root_bus, 0, |
723 | PCI_STATUS, 0xffff); | 731 | PCI_STATUS, 0xffff); |
724 | ret = IRQ_HANDLED; | 732 | ret = IRQ_HANDLED; |
725 | } | 733 | } |
726 | return ret; | 734 | return ret; |
@@ -800,12 +808,10 @@ static irqreturn_t sabre_pcierr_intr(int irq, void *dev_id) | |||
800 | if (error_bits & (SABRE_PIOAFSR_PTA | SABRE_PIOAFSR_STA)) { | 808 | if (error_bits & (SABRE_PIOAFSR_PTA | SABRE_PIOAFSR_STA)) { |
801 | sabre_check_iommu_error(p, afsr, afar); | 809 | sabre_check_iommu_error(p, afsr, afar); |
802 | pci_scan_for_target_abort(p, &p->pbm_A, p->pbm_A.pci_bus); | 810 | pci_scan_for_target_abort(p, &p->pbm_A, p->pbm_A.pci_bus); |
803 | pci_scan_for_target_abort(p, &p->pbm_B, p->pbm_B.pci_bus); | ||
804 | } | 811 | } |
805 | if (error_bits & (SABRE_PIOAFSR_PMA | SABRE_PIOAFSR_SMA)) { | 812 | if (error_bits & (SABRE_PIOAFSR_PMA | SABRE_PIOAFSR_SMA)) |
806 | pci_scan_for_master_abort(p, &p->pbm_A, p->pbm_A.pci_bus); | 813 | pci_scan_for_master_abort(p, &p->pbm_A, p->pbm_A.pci_bus); |
807 | pci_scan_for_master_abort(p, &p->pbm_B, p->pbm_B.pci_bus); | 814 | |
808 | } | ||
809 | /* For excessive retries, SABRE/PBM will abort the device | 815 | /* For excessive retries, SABRE/PBM will abort the device |
810 | * and there is no way to specifically check for excessive | 816 | * and there is no way to specifically check for excessive |
811 | * retries in the config space status registers. So what | 817 | * retries in the config space status registers. So what |
@@ -813,10 +819,8 @@ static irqreturn_t sabre_pcierr_intr(int irq, void *dev_id) | |||
813 | * abort events. | 819 | * abort events. |
814 | */ | 820 | */ |
815 | 821 | ||
816 | if (error_bits & (SABRE_PIOAFSR_PPERR | SABRE_PIOAFSR_SPERR)) { | 822 | if (error_bits & (SABRE_PIOAFSR_PPERR | SABRE_PIOAFSR_SPERR)) |
817 | pci_scan_for_parity_error(p, &p->pbm_A, p->pbm_A.pci_bus); | 823 | pci_scan_for_parity_error(p, &p->pbm_A, p->pbm_A.pci_bus); |
818 | pci_scan_for_parity_error(p, &p->pbm_B, p->pbm_B.pci_bus); | ||
819 | } | ||
820 | 824 | ||
821 | return IRQ_HANDLED; | 825 | return IRQ_HANDLED; |
822 | } | 826 | } |
@@ -869,144 +873,52 @@ static void sabre_register_error_handlers(struct pci_controller_info *p) | |||
869 | sabre_write(base + SABRE_PCICTRL, tmp); | 873 | sabre_write(base + SABRE_PCICTRL, tmp); |
870 | } | 874 | } |
871 | 875 | ||
872 | static void sabre_resource_adjust(struct pci_dev *pdev, | ||
873 | struct resource *res, | ||
874 | struct resource *root) | ||
875 | { | ||
876 | struct pci_pbm_info *pbm = pdev->bus->sysdata; | ||
877 | unsigned long base; | ||
878 | |||
879 | if (res->flags & IORESOURCE_IO) | ||
880 | base = pbm->controller_regs + SABRE_IOSPACE; | ||
881 | else | ||
882 | base = pbm->controller_regs + SABRE_MEMSPACE; | ||
883 | |||
884 | res->start += base; | ||
885 | res->end += base; | ||
886 | } | ||
887 | |||
888 | static void sabre_base_address_update(struct pci_dev *pdev, int resource) | ||
889 | { | ||
890 | struct pcidev_cookie *pcp = pdev->sysdata; | ||
891 | struct pci_pbm_info *pbm = pcp->pbm; | ||
892 | struct resource *res; | ||
893 | unsigned long base; | ||
894 | u32 reg; | ||
895 | int where, size, is_64bit; | ||
896 | |||
897 | res = &pdev->resource[resource]; | ||
898 | if (resource < 6) { | ||
899 | where = PCI_BASE_ADDRESS_0 + (resource * 4); | ||
900 | } else if (resource == PCI_ROM_RESOURCE) { | ||
901 | where = pdev->rom_base_reg; | ||
902 | } else { | ||
903 | /* Somebody might have asked allocation of a non-standard resource */ | ||
904 | return; | ||
905 | } | ||
906 | |||
907 | is_64bit = 0; | ||
908 | if (res->flags & IORESOURCE_IO) | ||
909 | base = pbm->controller_regs + SABRE_IOSPACE; | ||
910 | else { | ||
911 | base = pbm->controller_regs + SABRE_MEMSPACE; | ||
912 | if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) | ||
913 | == PCI_BASE_ADDRESS_MEM_TYPE_64) | ||
914 | is_64bit = 1; | ||
915 | } | ||
916 | |||
917 | size = res->end - res->start; | ||
918 | pci_read_config_dword(pdev, where, ®); | ||
919 | reg = ((reg & size) | | ||
920 | (((u32)(res->start - base)) & ~size)); | ||
921 | if (resource == PCI_ROM_RESOURCE) { | ||
922 | reg |= PCI_ROM_ADDRESS_ENABLE; | ||
923 | res->flags |= IORESOURCE_ROM_ENABLE; | ||
924 | } | ||
925 | pci_write_config_dword(pdev, where, reg); | ||
926 | |||
927 | /* This knows that the upper 32-bits of the address | ||
928 | * must be zero. Our PCI common layer enforces this. | ||
929 | */ | ||
930 | if (is_64bit) | ||
931 | pci_write_config_dword(pdev, where + 4, 0); | ||
932 | } | ||
933 | |||
934 | static void apb_init(struct pci_controller_info *p, struct pci_bus *sabre_bus) | 876 | static void apb_init(struct pci_controller_info *p, struct pci_bus *sabre_bus) |
935 | { | 877 | { |
936 | struct pci_dev *pdev; | 878 | struct pci_dev *pdev; |
937 | 879 | ||
938 | list_for_each_entry(pdev, &sabre_bus->devices, bus_list) { | 880 | list_for_each_entry(pdev, &sabre_bus->devices, bus_list) { |
939 | |||
940 | if (pdev->vendor == PCI_VENDOR_ID_SUN && | 881 | if (pdev->vendor == PCI_VENDOR_ID_SUN && |
941 | pdev->device == PCI_DEVICE_ID_SUN_SIMBA) { | 882 | pdev->device == PCI_DEVICE_ID_SUN_SIMBA) { |
942 | u32 word32; | ||
943 | u16 word16; | 883 | u16 word16; |
944 | 884 | ||
945 | sabre_read_pci_cfg(pdev->bus, pdev->devfn, | 885 | pci_read_config_word(pdev, PCI_COMMAND, &word16); |
946 | PCI_COMMAND, 2, &word32); | ||
947 | word16 = (u16) word32; | ||
948 | word16 |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY | | 886 | word16 |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY | |
949 | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | | 887 | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | |
950 | PCI_COMMAND_IO; | 888 | PCI_COMMAND_IO; |
951 | word32 = (u32) word16; | 889 | pci_write_config_word(pdev, PCI_COMMAND, word16); |
952 | sabre_write_pci_cfg(pdev->bus, pdev->devfn, | ||
953 | PCI_COMMAND, 2, word32); | ||
954 | 890 | ||
955 | /* Status register bits are "write 1 to clear". */ | 891 | /* Status register bits are "write 1 to clear". */ |
956 | sabre_write_pci_cfg(pdev->bus, pdev->devfn, | 892 | pci_write_config_word(pdev, PCI_STATUS, 0xffff); |
957 | PCI_STATUS, 2, 0xffff); | 893 | pci_write_config_word(pdev, PCI_SEC_STATUS, 0xffff); |
958 | sabre_write_pci_cfg(pdev->bus, pdev->devfn, | ||
959 | PCI_SEC_STATUS, 2, 0xffff); | ||
960 | 894 | ||
961 | /* Use a primary/seconday latency timer value | 895 | /* Use a primary/seconday latency timer value |
962 | * of 64. | 896 | * of 64. |
963 | */ | 897 | */ |
964 | sabre_write_pci_cfg(pdev->bus, pdev->devfn, | 898 | pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64); |
965 | PCI_LATENCY_TIMER, 1, 64); | 899 | pci_write_config_byte(pdev, PCI_SEC_LATENCY_TIMER, 64); |
966 | sabre_write_pci_cfg(pdev->bus, pdev->devfn, | ||
967 | PCI_SEC_LATENCY_TIMER, 1, 64); | ||
968 | 900 | ||
969 | /* Enable reporting/forwarding of master aborts, | 901 | /* Enable reporting/forwarding of master aborts, |
970 | * parity, and SERR. | 902 | * parity, and SERR. |
971 | */ | 903 | */ |
972 | sabre_write_pci_cfg(pdev->bus, pdev->devfn, | 904 | pci_write_config_byte(pdev, PCI_BRIDGE_CONTROL, |
973 | PCI_BRIDGE_CONTROL, 1, | 905 | (PCI_BRIDGE_CTL_PARITY | |
974 | (PCI_BRIDGE_CTL_PARITY | | 906 | PCI_BRIDGE_CTL_SERR | |
975 | PCI_BRIDGE_CTL_SERR | | 907 | PCI_BRIDGE_CTL_MASTER_ABORT)); |
976 | PCI_BRIDGE_CTL_MASTER_ABORT)); | ||
977 | } | 908 | } |
978 | } | 909 | } |
979 | } | 910 | } |
980 | 911 | ||
981 | static struct pcidev_cookie *alloc_bridge_cookie(struct pci_pbm_info *pbm) | ||
982 | { | ||
983 | struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); | ||
984 | |||
985 | if (!cookie) { | ||
986 | prom_printf("SABRE: Critical allocation failure.\n"); | ||
987 | prom_halt(); | ||
988 | } | ||
989 | |||
990 | /* All we care about is the PBM. */ | ||
991 | cookie->pbm = pbm; | ||
992 | |||
993 | return cookie; | ||
994 | } | ||
995 | |||
996 | static void sabre_scan_bus(struct pci_controller_info *p) | 912 | static void sabre_scan_bus(struct pci_controller_info *p) |
997 | { | 913 | { |
998 | static int once; | 914 | static int once; |
999 | struct pci_bus *sabre_bus, *pbus; | 915 | struct pci_bus *pbus; |
1000 | struct pci_pbm_info *pbm; | ||
1001 | struct pcidev_cookie *cookie; | ||
1002 | int sabres_scanned; | ||
1003 | 916 | ||
1004 | /* The APB bridge speaks to the Sabre host PCI bridge | 917 | /* The APB bridge speaks to the Sabre host PCI bridge |
1005 | * at 66Mhz, but the front side of APB runs at 33Mhz | 918 | * at 66Mhz, but the front side of APB runs at 33Mhz |
1006 | * for both segments. | 919 | * for both segments. |
1007 | */ | 920 | */ |
1008 | p->pbm_A.is_66mhz_capable = 0; | 921 | p->pbm_A.is_66mhz_capable = 0; |
1009 | p->pbm_B.is_66mhz_capable = 0; | ||
1010 | 922 | ||
1011 | /* This driver has not been verified to handle | 923 | /* This driver has not been verified to handle |
1012 | * multiple SABREs yet, so trap this. | 924 | * multiple SABREs yet, so trap this. |
@@ -1020,56 +932,13 @@ static void sabre_scan_bus(struct pci_controller_info *p) | |||
1020 | } | 932 | } |
1021 | once++; | 933 | once++; |
1022 | 934 | ||
1023 | cookie = alloc_bridge_cookie(&p->pbm_A); | 935 | pbus = pci_scan_one_pbm(&p->pbm_A); |
1024 | 936 | if (!pbus) | |
1025 | sabre_bus = pci_scan_bus(p->pci_first_busno, | 937 | return; |
1026 | p->pci_ops, | ||
1027 | &p->pbm_A); | ||
1028 | pci_fixup_host_bridge_self(sabre_bus); | ||
1029 | sabre_bus->self->sysdata = cookie; | ||
1030 | |||
1031 | sabre_root_bus = sabre_bus; | ||
1032 | |||
1033 | apb_init(p, sabre_bus); | ||
1034 | |||
1035 | sabres_scanned = 0; | ||
1036 | |||
1037 | list_for_each_entry(pbus, &sabre_bus->children, node) { | ||
1038 | |||
1039 | if (pbus->number == p->pbm_A.pci_first_busno) { | ||
1040 | pbm = &p->pbm_A; | ||
1041 | } else if (pbus->number == p->pbm_B.pci_first_busno) { | ||
1042 | pbm = &p->pbm_B; | ||
1043 | } else | ||
1044 | continue; | ||
1045 | |||
1046 | cookie = alloc_bridge_cookie(pbm); | ||
1047 | pbus->self->sysdata = cookie; | ||
1048 | |||
1049 | sabres_scanned++; | ||
1050 | 938 | ||
1051 | pbus->sysdata = pbm; | 939 | sabre_root_bus = pbus; |
1052 | pbm->pci_bus = pbus; | ||
1053 | pci_fill_in_pbm_cookies(pbus, pbm, pbm->prom_node); | ||
1054 | pci_record_assignments(pbm, pbus); | ||
1055 | pci_assign_unassigned(pbm, pbus); | ||
1056 | pci_fixup_irq(pbm, pbus); | ||
1057 | pci_determine_66mhz_disposition(pbm, pbus); | ||
1058 | pci_setup_busmastering(pbm, pbus); | ||
1059 | } | ||
1060 | 940 | ||
1061 | if (!sabres_scanned) { | 941 | apb_init(p, pbus); |
1062 | /* Hummingbird, no APBs. */ | ||
1063 | pbm = &p->pbm_A; | ||
1064 | sabre_bus->sysdata = pbm; | ||
1065 | pbm->pci_bus = sabre_bus; | ||
1066 | pci_fill_in_pbm_cookies(sabre_bus, pbm, pbm->prom_node); | ||
1067 | pci_record_assignments(pbm, sabre_bus); | ||
1068 | pci_assign_unassigned(pbm, sabre_bus); | ||
1069 | pci_fixup_irq(pbm, sabre_bus); | ||
1070 | pci_determine_66mhz_disposition(pbm, sabre_bus); | ||
1071 | pci_setup_busmastering(pbm, sabre_bus); | ||
1072 | } | ||
1073 | 942 | ||
1074 | sabre_register_error_handlers(p); | 943 | sabre_register_error_handlers(p); |
1075 | } | 944 | } |
@@ -1078,7 +947,7 @@ static void sabre_iommu_init(struct pci_controller_info *p, | |||
1078 | int tsbsize, unsigned long dvma_offset, | 947 | int tsbsize, unsigned long dvma_offset, |
1079 | u32 dma_mask) | 948 | u32 dma_mask) |
1080 | { | 949 | { |
1081 | struct pci_iommu *iommu = p->pbm_A.iommu; | 950 | struct iommu *iommu = p->pbm_A.iommu; |
1082 | unsigned long i; | 951 | unsigned long i; |
1083 | u64 control; | 952 | u64 control; |
1084 | 953 | ||
@@ -1126,224 +995,31 @@ static void sabre_iommu_init(struct pci_controller_info *p, | |||
1126 | sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control); | 995 | sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control); |
1127 | } | 996 | } |
1128 | 997 | ||
1129 | static void pbm_register_toplevel_resources(struct pci_controller_info *p, | 998 | static void sabre_pbm_init(struct pci_controller_info *p, struct device_node *dp) |
1130 | struct pci_pbm_info *pbm) | ||
1131 | { | ||
1132 | char *name = pbm->name; | ||
1133 | unsigned long ibase = p->pbm_A.controller_regs + SABRE_IOSPACE; | ||
1134 | unsigned long mbase = p->pbm_A.controller_regs + SABRE_MEMSPACE; | ||
1135 | unsigned int devfn; | ||
1136 | unsigned long first, last, i; | ||
1137 | u8 *addr, map; | ||
1138 | |||
1139 | sprintf(name, "SABRE%d PBM%c", | ||
1140 | p->index, | ||
1141 | (pbm == &p->pbm_A ? 'A' : 'B')); | ||
1142 | pbm->io_space.name = pbm->mem_space.name = name; | ||
1143 | |||
1144 | devfn = PCI_DEVFN(1, (pbm == &p->pbm_A) ? 0 : 1); | ||
1145 | addr = sabre_pci_config_mkaddr(pbm, 0, devfn, APB_IO_ADDRESS_MAP); | ||
1146 | map = 0; | ||
1147 | pci_config_read8(addr, &map); | ||
1148 | |||
1149 | first = 8; | ||
1150 | last = 0; | ||
1151 | for (i = 0; i < 8; i++) { | ||
1152 | if ((map & (1 << i)) != 0) { | ||
1153 | if (first > i) | ||
1154 | first = i; | ||
1155 | if (last < i) | ||
1156 | last = i; | ||
1157 | } | ||
1158 | } | ||
1159 | pbm->io_space.start = ibase + (first << 21UL); | ||
1160 | pbm->io_space.end = ibase + (last << 21UL) + ((1 << 21UL) - 1); | ||
1161 | pbm->io_space.flags = IORESOURCE_IO; | ||
1162 | |||
1163 | addr = sabre_pci_config_mkaddr(pbm, 0, devfn, APB_MEM_ADDRESS_MAP); | ||
1164 | map = 0; | ||
1165 | pci_config_read8(addr, &map); | ||
1166 | |||
1167 | first = 8; | ||
1168 | last = 0; | ||
1169 | for (i = 0; i < 8; i++) { | ||
1170 | if ((map & (1 << i)) != 0) { | ||
1171 | if (first > i) | ||
1172 | first = i; | ||
1173 | if (last < i) | ||
1174 | last = i; | ||
1175 | } | ||
1176 | } | ||
1177 | pbm->mem_space.start = mbase + (first << 29UL); | ||
1178 | pbm->mem_space.end = mbase + (last << 29UL) + ((1 << 29UL) - 1); | ||
1179 | pbm->mem_space.flags = IORESOURCE_MEM; | ||
1180 | |||
1181 | if (request_resource(&ioport_resource, &pbm->io_space) < 0) { | ||
1182 | prom_printf("Cannot register PBM-%c's IO space.\n", | ||
1183 | (pbm == &p->pbm_A ? 'A' : 'B')); | ||
1184 | prom_halt(); | ||
1185 | } | ||
1186 | if (request_resource(&iomem_resource, &pbm->mem_space) < 0) { | ||
1187 | prom_printf("Cannot register PBM-%c's MEM space.\n", | ||
1188 | (pbm == &p->pbm_A ? 'A' : 'B')); | ||
1189 | prom_halt(); | ||
1190 | } | ||
1191 | |||
1192 | /* Register legacy regions if this PBM covers that area. */ | ||
1193 | if (pbm->io_space.start == ibase && | ||
1194 | pbm->mem_space.start == mbase) | ||
1195 | pci_register_legacy_regions(&pbm->io_space, | ||
1196 | &pbm->mem_space); | ||
1197 | } | ||
1198 | |||
1199 | static void sabre_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 dma_start, u32 dma_end) | ||
1200 | { | 999 | { |
1201 | struct pci_pbm_info *pbm; | 1000 | struct pci_pbm_info *pbm; |
1202 | struct device_node *node; | ||
1203 | struct property *prop; | ||
1204 | u32 *busrange; | ||
1205 | int len, simbas_found; | ||
1206 | |||
1207 | simbas_found = 0; | ||
1208 | node = dp->child; | ||
1209 | while (node != NULL) { | ||
1210 | if (strcmp(node->name, "pci")) | ||
1211 | goto next_pci; | ||
1212 | |||
1213 | prop = of_find_property(node, "model", NULL); | ||
1214 | if (!prop || strncmp(prop->value, "SUNW,simba", prop->length)) | ||
1215 | goto next_pci; | ||
1216 | |||
1217 | simbas_found++; | ||
1218 | |||
1219 | prop = of_find_property(node, "bus-range", NULL); | ||
1220 | busrange = prop->value; | ||
1221 | if (busrange[0] == 1) | ||
1222 | pbm = &p->pbm_B; | ||
1223 | else | ||
1224 | pbm = &p->pbm_A; | ||
1225 | |||
1226 | pbm->name = node->full_name; | ||
1227 | printk("%s: SABRE PCI Bus Module\n", pbm->name); | ||
1228 | |||
1229 | pbm->chip_type = PBM_CHIP_TYPE_SABRE; | ||
1230 | pbm->parent = p; | ||
1231 | pbm->prom_node = node; | ||
1232 | pbm->pci_first_slot = 1; | ||
1233 | pbm->pci_first_busno = busrange[0]; | ||
1234 | pbm->pci_last_busno = busrange[1]; | ||
1235 | |||
1236 | prop = of_find_property(node, "ranges", &len); | ||
1237 | if (prop) { | ||
1238 | pbm->pbm_ranges = prop->value; | ||
1239 | pbm->num_pbm_ranges = | ||
1240 | (len / sizeof(struct linux_prom_pci_ranges)); | ||
1241 | } else { | ||
1242 | pbm->num_pbm_ranges = 0; | ||
1243 | } | ||
1244 | 1001 | ||
1245 | prop = of_find_property(node, "interrupt-map", &len); | 1002 | pbm = &p->pbm_A; |
1246 | if (prop) { | 1003 | pbm->name = dp->full_name; |
1247 | pbm->pbm_intmap = prop->value; | 1004 | printk("%s: SABRE PCI Bus Module\n", pbm->name); |
1248 | pbm->num_pbm_intmap = | ||
1249 | (len / sizeof(struct linux_prom_pci_intmap)); | ||
1250 | |||
1251 | prop = of_find_property(node, "interrupt-map-mask", | ||
1252 | NULL); | ||
1253 | pbm->pbm_intmask = prop->value; | ||
1254 | } else { | ||
1255 | pbm->num_pbm_intmap = 0; | ||
1256 | } | ||
1257 | 1005 | ||
1258 | pbm_register_toplevel_resources(p, pbm); | 1006 | pbm->chip_type = PBM_CHIP_TYPE_SABRE; |
1259 | 1007 | pbm->parent = p; | |
1260 | next_pci: | 1008 | pbm->prom_node = dp; |
1261 | node = node->sibling; | 1009 | pbm->pci_first_busno = p->pci_first_busno; |
1262 | } | 1010 | pbm->pci_last_busno = p->pci_last_busno; |
1263 | if (simbas_found == 0) { | ||
1264 | struct resource *rp; | ||
1265 | 1011 | ||
1266 | /* No APBs underneath, probably this is a hummingbird | 1012 | pci_determine_mem_io_space(pbm); |
1267 | * system. | ||
1268 | */ | ||
1269 | pbm = &p->pbm_A; | ||
1270 | pbm->parent = p; | ||
1271 | pbm->prom_node = dp; | ||
1272 | pbm->pci_first_busno = p->pci_first_busno; | ||
1273 | pbm->pci_last_busno = p->pci_last_busno; | ||
1274 | |||
1275 | prop = of_find_property(dp, "ranges", &len); | ||
1276 | if (prop) { | ||
1277 | pbm->pbm_ranges = prop->value; | ||
1278 | pbm->num_pbm_ranges = | ||
1279 | (len / sizeof(struct linux_prom_pci_ranges)); | ||
1280 | } else { | ||
1281 | pbm->num_pbm_ranges = 0; | ||
1282 | } | ||
1283 | |||
1284 | prop = of_find_property(dp, "interrupt-map", &len); | ||
1285 | if (prop) { | ||
1286 | pbm->pbm_intmap = prop->value; | ||
1287 | pbm->num_pbm_intmap = | ||
1288 | (len / sizeof(struct linux_prom_pci_intmap)); | ||
1289 | |||
1290 | prop = of_find_property(dp, "interrupt-map-mask", | ||
1291 | NULL); | ||
1292 | pbm->pbm_intmask = prop->value; | ||
1293 | } else { | ||
1294 | pbm->num_pbm_intmap = 0; | ||
1295 | } | ||
1296 | |||
1297 | pbm->name = dp->full_name; | ||
1298 | printk("%s: SABRE PCI Bus Module\n", pbm->name); | ||
1299 | |||
1300 | pbm->io_space.name = pbm->mem_space.name = pbm->name; | ||
1301 | |||
1302 | /* Hack up top-level resources. */ | ||
1303 | pbm->io_space.start = p->pbm_A.controller_regs + SABRE_IOSPACE; | ||
1304 | pbm->io_space.end = pbm->io_space.start + (1UL << 24) - 1UL; | ||
1305 | pbm->io_space.flags = IORESOURCE_IO; | ||
1306 | |||
1307 | pbm->mem_space.start = | ||
1308 | (p->pbm_A.controller_regs + SABRE_MEMSPACE); | ||
1309 | pbm->mem_space.end = | ||
1310 | (pbm->mem_space.start + ((1UL << 32UL) - 1UL)); | ||
1311 | pbm->mem_space.flags = IORESOURCE_MEM; | ||
1312 | |||
1313 | if (request_resource(&ioport_resource, &pbm->io_space) < 0) { | ||
1314 | prom_printf("Cannot register Hummingbird's IO space.\n"); | ||
1315 | prom_halt(); | ||
1316 | } | ||
1317 | if (request_resource(&iomem_resource, &pbm->mem_space) < 0) { | ||
1318 | prom_printf("Cannot register Hummingbird's MEM space.\n"); | ||
1319 | prom_halt(); | ||
1320 | } | ||
1321 | |||
1322 | rp = kmalloc(sizeof(*rp), GFP_KERNEL); | ||
1323 | if (!rp) { | ||
1324 | prom_printf("Cannot allocate IOMMU resource.\n"); | ||
1325 | prom_halt(); | ||
1326 | } | ||
1327 | rp->name = "IOMMU"; | ||
1328 | rp->start = pbm->mem_space.start + (unsigned long) dma_start; | ||
1329 | rp->end = pbm->mem_space.start + (unsigned long) dma_end - 1UL; | ||
1330 | rp->flags = IORESOURCE_BUSY; | ||
1331 | request_resource(&pbm->mem_space, rp); | ||
1332 | |||
1333 | pci_register_legacy_regions(&pbm->io_space, | ||
1334 | &pbm->mem_space); | ||
1335 | } | ||
1336 | } | 1013 | } |
1337 | 1014 | ||
1338 | void sabre_init(struct device_node *dp, char *model_name) | 1015 | void sabre_init(struct device_node *dp, char *model_name) |
1339 | { | 1016 | { |
1340 | struct linux_prom64_registers *pr_regs; | 1017 | const struct linux_prom64_registers *pr_regs; |
1341 | struct pci_controller_info *p; | 1018 | struct pci_controller_info *p; |
1342 | struct pci_iommu *iommu; | 1019 | struct iommu *iommu; |
1343 | struct property *prop; | ||
1344 | int tsbsize; | 1020 | int tsbsize; |
1345 | u32 *busrange; | 1021 | const u32 *busrange; |
1346 | u32 *vdma; | 1022 | const u32 *vdma; |
1347 | u32 upa_portid, dma_mask; | 1023 | u32 upa_portid, dma_mask; |
1348 | u64 clear_irq; | 1024 | u64 clear_irq; |
1349 | 1025 | ||
@@ -1351,13 +1027,9 @@ void sabre_init(struct device_node *dp, char *model_name) | |||
1351 | if (!strcmp(model_name, "pci108e,a001")) | 1027 | if (!strcmp(model_name, "pci108e,a001")) |
1352 | hummingbird_p = 1; | 1028 | hummingbird_p = 1; |
1353 | else if (!strcmp(model_name, "SUNW,sabre")) { | 1029 | else if (!strcmp(model_name, "SUNW,sabre")) { |
1354 | prop = of_find_property(dp, "compatible", NULL); | 1030 | const char *compat = of_get_property(dp, "compatible", NULL); |
1355 | if (prop) { | 1031 | if (compat && !strcmp(compat, "pci108e,a001")) |
1356 | const char *compat = prop->value; | 1032 | hummingbird_p = 1; |
1357 | |||
1358 | if (!strcmp(compat, "pci108e,a001")) | ||
1359 | hummingbird_p = 1; | ||
1360 | } | ||
1361 | if (!hummingbird_p) { | 1033 | if (!hummingbird_p) { |
1362 | struct device_node *dp; | 1034 | struct device_node *dp; |
1363 | 1035 | ||
@@ -1381,37 +1053,28 @@ void sabre_init(struct device_node *dp, char *model_name) | |||
1381 | prom_printf("SABRE: Error, kmalloc(pci_iommu) failed.\n"); | 1053 | prom_printf("SABRE: Error, kmalloc(pci_iommu) failed.\n"); |
1382 | prom_halt(); | 1054 | prom_halt(); |
1383 | } | 1055 | } |
1384 | p->pbm_A.iommu = p->pbm_B.iommu = iommu; | 1056 | p->pbm_A.iommu = iommu; |
1385 | 1057 | ||
1386 | upa_portid = 0xff; | 1058 | upa_portid = of_getintprop_default(dp, "upa-portid", 0xff); |
1387 | prop = of_find_property(dp, "upa-portid", NULL); | ||
1388 | if (prop) | ||
1389 | upa_portid = *(u32 *) prop->value; | ||
1390 | 1059 | ||
1391 | p->next = pci_controller_root; | 1060 | p->next = pci_controller_root; |
1392 | pci_controller_root = p; | 1061 | pci_controller_root = p; |
1393 | 1062 | ||
1394 | p->pbm_A.portid = upa_portid; | 1063 | p->pbm_A.portid = upa_portid; |
1395 | p->pbm_B.portid = upa_portid; | ||
1396 | p->index = pci_num_controllers++; | 1064 | p->index = pci_num_controllers++; |
1397 | p->pbms_same_domain = 1; | ||
1398 | p->scan_bus = sabre_scan_bus; | 1065 | p->scan_bus = sabre_scan_bus; |
1399 | p->base_address_update = sabre_base_address_update; | ||
1400 | p->resource_adjust = sabre_resource_adjust; | ||
1401 | p->pci_ops = &sabre_ops; | 1066 | p->pci_ops = &sabre_ops; |
1402 | 1067 | ||
1403 | /* | 1068 | /* |
1404 | * Map in SABRE register set and report the presence of this SABRE. | 1069 | * Map in SABRE register set and report the presence of this SABRE. |
1405 | */ | 1070 | */ |
1406 | 1071 | ||
1407 | prop = of_find_property(dp, "reg", NULL); | 1072 | pr_regs = of_get_property(dp, "reg", NULL); |
1408 | pr_regs = prop->value; | ||
1409 | 1073 | ||
1410 | /* | 1074 | /* |
1411 | * First REG in property is base of entire SABRE register space. | 1075 | * First REG in property is base of entire SABRE register space. |
1412 | */ | 1076 | */ |
1413 | p->pbm_A.controller_regs = pr_regs[0].phys_addr; | 1077 | p->pbm_A.controller_regs = pr_regs[0].phys_addr; |
1414 | p->pbm_B.controller_regs = pr_regs[0].phys_addr; | ||
1415 | 1078 | ||
1416 | /* Clear interrupts */ | 1079 | /* Clear interrupts */ |
1417 | 1080 | ||
@@ -1429,11 +1092,10 @@ void sabre_init(struct device_node *dp, char *model_name) | |||
1429 | SABRE_PCICTRL_ARBPARK | SABRE_PCICTRL_AEN)); | 1092 | SABRE_PCICTRL_ARBPARK | SABRE_PCICTRL_AEN)); |
1430 | 1093 | ||
1431 | /* Now map in PCI config space for entire SABRE. */ | 1094 | /* Now map in PCI config space for entire SABRE. */ |
1432 | p->pbm_A.config_space = p->pbm_B.config_space = | 1095 | p->pbm_A.config_space = |
1433 | (p->pbm_A.controller_regs + SABRE_CONFIGSPACE); | 1096 | (p->pbm_A.controller_regs + SABRE_CONFIGSPACE); |
1434 | 1097 | ||
1435 | prop = of_find_property(dp, "virtual-dma", NULL); | 1098 | vdma = of_get_property(dp, "virtual-dma", NULL); |
1436 | vdma = prop->value; | ||
1437 | 1099 | ||
1438 | dma_mask = vdma[0]; | 1100 | dma_mask = vdma[0]; |
1439 | switch(vdma[1]) { | 1101 | switch(vdma[1]) { |
@@ -1457,13 +1119,12 @@ void sabre_init(struct device_node *dp, char *model_name) | |||
1457 | 1119 | ||
1458 | sabre_iommu_init(p, tsbsize, vdma[0], dma_mask); | 1120 | sabre_iommu_init(p, tsbsize, vdma[0], dma_mask); |
1459 | 1121 | ||
1460 | prop = of_find_property(dp, "bus-range", NULL); | 1122 | busrange = of_get_property(dp, "bus-range", NULL); |
1461 | busrange = prop->value; | ||
1462 | p->pci_first_busno = busrange[0]; | 1123 | p->pci_first_busno = busrange[0]; |
1463 | p->pci_last_busno = busrange[1]; | 1124 | p->pci_last_busno = busrange[1]; |
1464 | 1125 | ||
1465 | /* | 1126 | /* |
1466 | * Look for APB underneath. | 1127 | * Look for APB underneath. |
1467 | */ | 1128 | */ |
1468 | sabre_pbm_init(p, dp, vdma[0], vdma[0] + vdma[1]); | 1129 | sabre_pbm_init(p, dp); |
1469 | } | 1130 | } |
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c index 66911b126aed..91a7385e5d32 100644 --- a/arch/sparc64/kernel/pci_schizo.c +++ b/arch/sparc64/kernel/pci_schizo.c | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: pci_schizo.c,v 1.24 2002/01/23 11:27:32 davem Exp $ | 1 | /* pci_schizo.c: SCHIZO/TOMATILLO specific PCI controller support. |
2 | * pci_schizo.c: SCHIZO/TOMATILLO specific PCI controller support. | ||
3 | * | 2 | * |
4 | * Copyright (C) 2001, 2002, 2003 David S. Miller (davem@redhat.com) | 3 | * Copyright (C) 2001, 2002, 2003, 2007 David S. Miller (davem@davemloft.net) |
5 | */ | 4 | */ |
6 | 5 | ||
7 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
@@ -126,6 +125,9 @@ static int schizo_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, | |||
126 | u16 tmp16; | 125 | u16 tmp16; |
127 | u8 tmp8; | 126 | u8 tmp8; |
128 | 127 | ||
128 | if (bus_dev == pbm->pci_bus && devfn == 0x00) | ||
129 | return pci_host_bridge_read_pci_cfg(bus_dev, devfn, where, | ||
130 | size, value); | ||
129 | switch (size) { | 131 | switch (size) { |
130 | case 1: | 132 | case 1: |
131 | *value = 0xff; | 133 | *value = 0xff; |
@@ -179,6 +181,9 @@ static int schizo_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, | |||
179 | unsigned char bus = bus_dev->number; | 181 | unsigned char bus = bus_dev->number; |
180 | u32 *addr; | 182 | u32 *addr; |
181 | 183 | ||
184 | if (bus_dev == pbm->pci_bus && devfn == 0x00) | ||
185 | return pci_host_bridge_write_pci_cfg(bus_dev, devfn, where, | ||
186 | size, value); | ||
182 | addr = schizo_pci_config_mkaddr(pbm, bus, devfn, where); | 187 | addr = schizo_pci_config_mkaddr(pbm, bus, devfn, where); |
183 | if (!addr) | 188 | if (!addr) |
184 | return PCIBIOS_SUCCESSFUL; | 189 | return PCIBIOS_SUCCESSFUL; |
@@ -274,7 +279,7 @@ struct pci_pbm_info *pbm_for_ino(struct pci_controller_info *p, u32 ino) | |||
274 | static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm, | 279 | static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm, |
275 | enum schizo_error_type type) | 280 | enum schizo_error_type type) |
276 | { | 281 | { |
277 | struct pci_strbuf *strbuf = &pbm->stc; | 282 | struct strbuf *strbuf = &pbm->stc; |
278 | unsigned long regbase = pbm->pbm_regs; | 283 | unsigned long regbase = pbm->pbm_regs; |
279 | unsigned long err_base, tag_base, line_base; | 284 | unsigned long err_base, tag_base, line_base; |
280 | u64 control; | 285 | u64 control; |
@@ -382,7 +387,7 @@ static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm, | |||
382 | static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm, | 387 | static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm, |
383 | enum schizo_error_type type) | 388 | enum schizo_error_type type) |
384 | { | 389 | { |
385 | struct pci_iommu *iommu = pbm->iommu; | 390 | struct iommu *iommu = pbm->iommu; |
386 | unsigned long iommu_tag[16]; | 391 | unsigned long iommu_tag[16]; |
387 | unsigned long iommu_data[16]; | 392 | unsigned long iommu_data[16]; |
388 | unsigned long flags; | 393 | unsigned long flags; |
@@ -1229,42 +1234,8 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm) | |||
1229 | pci_config_write8(addr, 64); | 1234 | pci_config_write8(addr, 64); |
1230 | } | 1235 | } |
1231 | 1236 | ||
1232 | static void pbm_scan_bus(struct pci_controller_info *p, | 1237 | static void schizo_scan_bus(struct pci_controller_info *p) |
1233 | struct pci_pbm_info *pbm) | ||
1234 | { | ||
1235 | struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); | ||
1236 | |||
1237 | if (!cookie) { | ||
1238 | prom_printf("%s: Critical allocation failure.\n", pbm->name); | ||
1239 | prom_halt(); | ||
1240 | } | ||
1241 | |||
1242 | /* All we care about is the PBM. */ | ||
1243 | cookie->pbm = pbm; | ||
1244 | |||
1245 | pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, | ||
1246 | p->pci_ops, | ||
1247 | pbm); | ||
1248 | pci_fixup_host_bridge_self(pbm->pci_bus); | ||
1249 | pbm->pci_bus->self->sysdata = cookie; | ||
1250 | |||
1251 | pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node); | ||
1252 | pci_record_assignments(pbm, pbm->pci_bus); | ||
1253 | pci_assign_unassigned(pbm, pbm->pci_bus); | ||
1254 | pci_fixup_irq(pbm, pbm->pci_bus); | ||
1255 | pci_determine_66mhz_disposition(pbm, pbm->pci_bus); | ||
1256 | pci_setup_busmastering(pbm, pbm->pci_bus); | ||
1257 | } | ||
1258 | |||
1259 | static void __schizo_scan_bus(struct pci_controller_info *p, | ||
1260 | int chip_type) | ||
1261 | { | 1238 | { |
1262 | if (!p->pbm_B.prom_node || !p->pbm_A.prom_node) { | ||
1263 | printk("PCI: Only one PCI bus module of controller found.\n"); | ||
1264 | printk("PCI: Ignoring entire controller.\n"); | ||
1265 | return; | ||
1266 | } | ||
1267 | |||
1268 | pbm_config_busmastering(&p->pbm_B); | 1239 | pbm_config_busmastering(&p->pbm_B); |
1269 | p->pbm_B.is_66mhz_capable = | 1240 | p->pbm_B.is_66mhz_capable = |
1270 | (of_find_property(p->pbm_B.prom_node, "66mhz-capable", NULL) | 1241 | (of_find_property(p->pbm_B.prom_node, "66mhz-capable", NULL) |
@@ -1273,154 +1244,19 @@ static void __schizo_scan_bus(struct pci_controller_info *p, | |||
1273 | p->pbm_A.is_66mhz_capable = | 1244 | p->pbm_A.is_66mhz_capable = |
1274 | (of_find_property(p->pbm_A.prom_node, "66mhz-capable", NULL) | 1245 | (of_find_property(p->pbm_A.prom_node, "66mhz-capable", NULL) |
1275 | != NULL); | 1246 | != NULL); |
1276 | pbm_scan_bus(p, &p->pbm_B); | 1247 | |
1277 | pbm_scan_bus(p, &p->pbm_A); | 1248 | p->pbm_B.pci_bus = pci_scan_one_pbm(&p->pbm_B); |
1249 | p->pbm_A.pci_bus = pci_scan_one_pbm(&p->pbm_A); | ||
1278 | 1250 | ||
1279 | /* After the PCI bus scan is complete, we can register | 1251 | /* After the PCI bus scan is complete, we can register |
1280 | * the error interrupt handlers. | 1252 | * the error interrupt handlers. |
1281 | */ | 1253 | */ |
1282 | if (chip_type == PBM_CHIP_TYPE_TOMATILLO) | 1254 | if (p->pbm_B.chip_type == PBM_CHIP_TYPE_TOMATILLO) |
1283 | tomatillo_register_error_handlers(p); | 1255 | tomatillo_register_error_handlers(p); |
1284 | else | 1256 | else |
1285 | schizo_register_error_handlers(p); | 1257 | schizo_register_error_handlers(p); |
1286 | } | 1258 | } |
1287 | 1259 | ||
1288 | static void schizo_scan_bus(struct pci_controller_info *p) | ||
1289 | { | ||
1290 | __schizo_scan_bus(p, PBM_CHIP_TYPE_SCHIZO); | ||
1291 | } | ||
1292 | |||
1293 | static void tomatillo_scan_bus(struct pci_controller_info *p) | ||
1294 | { | ||
1295 | __schizo_scan_bus(p, PBM_CHIP_TYPE_TOMATILLO); | ||
1296 | } | ||
1297 | |||
1298 | static void schizo_base_address_update(struct pci_dev *pdev, int resource) | ||
1299 | { | ||
1300 | struct pcidev_cookie *pcp = pdev->sysdata; | ||
1301 | struct pci_pbm_info *pbm = pcp->pbm; | ||
1302 | struct resource *res, *root; | ||
1303 | u32 reg; | ||
1304 | int where, size, is_64bit; | ||
1305 | |||
1306 | res = &pdev->resource[resource]; | ||
1307 | if (resource < 6) { | ||
1308 | where = PCI_BASE_ADDRESS_0 + (resource * 4); | ||
1309 | } else if (resource == PCI_ROM_RESOURCE) { | ||
1310 | where = pdev->rom_base_reg; | ||
1311 | } else { | ||
1312 | /* Somebody might have asked allocation of a non-standard resource */ | ||
1313 | return; | ||
1314 | } | ||
1315 | |||
1316 | is_64bit = 0; | ||
1317 | if (res->flags & IORESOURCE_IO) | ||
1318 | root = &pbm->io_space; | ||
1319 | else { | ||
1320 | root = &pbm->mem_space; | ||
1321 | if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) | ||
1322 | == PCI_BASE_ADDRESS_MEM_TYPE_64) | ||
1323 | is_64bit = 1; | ||
1324 | } | ||
1325 | |||
1326 | size = res->end - res->start; | ||
1327 | pci_read_config_dword(pdev, where, ®); | ||
1328 | reg = ((reg & size) | | ||
1329 | (((u32)(res->start - root->start)) & ~size)); | ||
1330 | if (resource == PCI_ROM_RESOURCE) { | ||
1331 | reg |= PCI_ROM_ADDRESS_ENABLE; | ||
1332 | res->flags |= IORESOURCE_ROM_ENABLE; | ||
1333 | } | ||
1334 | pci_write_config_dword(pdev, where, reg); | ||
1335 | |||
1336 | /* This knows that the upper 32-bits of the address | ||
1337 | * must be zero. Our PCI common layer enforces this. | ||
1338 | */ | ||
1339 | if (is_64bit) | ||
1340 | pci_write_config_dword(pdev, where + 4, 0); | ||
1341 | } | ||
1342 | |||
1343 | static void schizo_resource_adjust(struct pci_dev *pdev, | ||
1344 | struct resource *res, | ||
1345 | struct resource *root) | ||
1346 | { | ||
1347 | res->start += root->start; | ||
1348 | res->end += root->start; | ||
1349 | } | ||
1350 | |||
1351 | /* Use ranges property to determine where PCI MEM, I/O, and Config | ||
1352 | * space are for this PCI bus module. | ||
1353 | */ | ||
1354 | static void schizo_determine_mem_io_space(struct pci_pbm_info *pbm) | ||
1355 | { | ||
1356 | int i, saw_cfg, saw_mem, saw_io; | ||
1357 | |||
1358 | saw_cfg = saw_mem = saw_io = 0; | ||
1359 | for (i = 0; i < pbm->num_pbm_ranges; i++) { | ||
1360 | struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i]; | ||
1361 | unsigned long a; | ||
1362 | int type; | ||
1363 | |||
1364 | type = (pr->child_phys_hi >> 24) & 0x3; | ||
1365 | a = (((unsigned long)pr->parent_phys_hi << 32UL) | | ||
1366 | ((unsigned long)pr->parent_phys_lo << 0UL)); | ||
1367 | |||
1368 | switch (type) { | ||
1369 | case 0: | ||
1370 | /* PCI config space, 16MB */ | ||
1371 | pbm->config_space = a; | ||
1372 | saw_cfg = 1; | ||
1373 | break; | ||
1374 | |||
1375 | case 1: | ||
1376 | /* 16-bit IO space, 16MB */ | ||
1377 | pbm->io_space.start = a; | ||
1378 | pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL); | ||
1379 | pbm->io_space.flags = IORESOURCE_IO; | ||
1380 | saw_io = 1; | ||
1381 | break; | ||
1382 | |||
1383 | case 2: | ||
1384 | /* 32-bit MEM space, 2GB */ | ||
1385 | pbm->mem_space.start = a; | ||
1386 | pbm->mem_space.end = a + (0x80000000UL - 1UL); | ||
1387 | pbm->mem_space.flags = IORESOURCE_MEM; | ||
1388 | saw_mem = 1; | ||
1389 | break; | ||
1390 | |||
1391 | default: | ||
1392 | break; | ||
1393 | }; | ||
1394 | } | ||
1395 | |||
1396 | if (!saw_cfg || !saw_io || !saw_mem) { | ||
1397 | prom_printf("%s: Fatal error, missing %s PBM range.\n", | ||
1398 | pbm->name, | ||
1399 | ((!saw_cfg ? | ||
1400 | "CFG" : | ||
1401 | (!saw_io ? | ||
1402 | "IO" : "MEM")))); | ||
1403 | prom_halt(); | ||
1404 | } | ||
1405 | |||
1406 | printk("%s: PCI CFG[%lx] IO[%lx] MEM[%lx]\n", | ||
1407 | pbm->name, | ||
1408 | pbm->config_space, | ||
1409 | pbm->io_space.start, | ||
1410 | pbm->mem_space.start); | ||
1411 | } | ||
1412 | |||
1413 | static void pbm_register_toplevel_resources(struct pci_controller_info *p, | ||
1414 | struct pci_pbm_info *pbm) | ||
1415 | { | ||
1416 | pbm->io_space.name = pbm->mem_space.name = pbm->name; | ||
1417 | |||
1418 | request_resource(&ioport_resource, &pbm->io_space); | ||
1419 | request_resource(&iomem_resource, &pbm->mem_space); | ||
1420 | pci_register_legacy_regions(&pbm->io_space, | ||
1421 | &pbm->mem_space); | ||
1422 | } | ||
1423 | |||
1424 | #define SCHIZO_STRBUF_CONTROL (0x02800UL) | 1260 | #define SCHIZO_STRBUF_CONTROL (0x02800UL) |
1425 | #define SCHIZO_STRBUF_FLUSH (0x02808UL) | 1261 | #define SCHIZO_STRBUF_FLUSH (0x02808UL) |
1426 | #define SCHIZO_STRBUF_FSYNC (0x02810UL) | 1262 | #define SCHIZO_STRBUF_FSYNC (0x02810UL) |
@@ -1472,7 +1308,7 @@ static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm) | |||
1472 | 1308 | ||
1473 | static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm) | 1309 | static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm) |
1474 | { | 1310 | { |
1475 | struct pci_iommu *iommu = pbm->iommu; | 1311 | struct iommu *iommu = pbm->iommu; |
1476 | unsigned long i, tagbase, database; | 1312 | unsigned long i, tagbase, database; |
1477 | struct property *prop; | 1313 | struct property *prop; |
1478 | u32 vdma[2], dma_mask; | 1314 | u32 vdma[2], dma_mask; |
@@ -1654,14 +1490,12 @@ static void schizo_pbm_init(struct pci_controller_info *p, | |||
1654 | struct device_node *dp, u32 portid, | 1490 | struct device_node *dp, u32 portid, |
1655 | int chip_type) | 1491 | int chip_type) |
1656 | { | 1492 | { |
1657 | struct linux_prom64_registers *regs; | 1493 | const struct linux_prom64_registers *regs; |
1658 | struct property *prop; | 1494 | const unsigned int *busrange; |
1659 | unsigned int *busrange; | ||
1660 | struct pci_pbm_info *pbm; | 1495 | struct pci_pbm_info *pbm; |
1661 | const char *chipset_name; | 1496 | const char *chipset_name; |
1662 | u32 *ino_bitmap; | 1497 | const u32 *ino_bitmap; |
1663 | int is_pbm_a; | 1498 | int is_pbm_a; |
1664 | int len; | ||
1665 | 1499 | ||
1666 | switch (chip_type) { | 1500 | switch (chip_type) { |
1667 | case PBM_CHIP_TYPE_TOMATILLO: | 1501 | case PBM_CHIP_TYPE_TOMATILLO: |
@@ -1689,11 +1523,9 @@ static void schizo_pbm_init(struct pci_controller_info *p, | |||
1689 | * 3) PBM PCI config space | 1523 | * 3) PBM PCI config space |
1690 | * 4) Ichip regs | 1524 | * 4) Ichip regs |
1691 | */ | 1525 | */ |
1692 | prop = of_find_property(dp, "reg", NULL); | 1526 | regs = of_get_property(dp, "reg", NULL); |
1693 | regs = prop->value; | ||
1694 | 1527 | ||
1695 | is_pbm_a = ((regs[0].phys_addr & 0x00700000) == 0x00600000); | 1528 | is_pbm_a = ((regs[0].phys_addr & 0x00700000) == 0x00600000); |
1696 | |||
1697 | if (is_pbm_a) | 1529 | if (is_pbm_a) |
1698 | pbm = &p->pbm_A; | 1530 | pbm = &p->pbm_A; |
1699 | else | 1531 | else |
@@ -1702,17 +1534,10 @@ static void schizo_pbm_init(struct pci_controller_info *p, | |||
1702 | pbm->portid = portid; | 1534 | pbm->portid = portid; |
1703 | pbm->parent = p; | 1535 | pbm->parent = p; |
1704 | pbm->prom_node = dp; | 1536 | pbm->prom_node = dp; |
1705 | pbm->pci_first_slot = 1; | ||
1706 | 1537 | ||
1707 | pbm->chip_type = chip_type; | 1538 | pbm->chip_type = chip_type; |
1708 | pbm->chip_version = 0; | 1539 | pbm->chip_version = of_getintprop_default(dp, "version#", 0); |
1709 | prop = of_find_property(dp, "version#", NULL); | 1540 | pbm->chip_revision = of_getintprop_default(dp, "module-version#", 0); |
1710 | if (prop) | ||
1711 | pbm->chip_version = *(int *) prop->value; | ||
1712 | pbm->chip_revision = 0; | ||
1713 | prop = of_find_property(dp, "module-revision#", NULL); | ||
1714 | if (prop) | ||
1715 | pbm->chip_revision = *(int *) prop->value; | ||
1716 | 1541 | ||
1717 | pbm->pbm_regs = regs[0].phys_addr; | 1542 | pbm->pbm_regs = regs[0].phys_addr; |
1718 | pbm->controller_regs = regs[1].phys_addr - 0x10000UL; | 1543 | pbm->controller_regs = regs[1].phys_addr - 0x10000UL; |
@@ -1723,40 +1548,18 @@ static void schizo_pbm_init(struct pci_controller_info *p, | |||
1723 | pbm->name = dp->full_name; | 1548 | pbm->name = dp->full_name; |
1724 | 1549 | ||
1725 | printk("%s: %s PCI Bus Module ver[%x:%x]\n", | 1550 | printk("%s: %s PCI Bus Module ver[%x:%x]\n", |
1726 | pbm->name, | 1551 | pbm->name, chipset_name, |
1727 | (chip_type == PBM_CHIP_TYPE_TOMATILLO ? | ||
1728 | "TOMATILLO" : "SCHIZO"), | ||
1729 | pbm->chip_version, pbm->chip_revision); | 1552 | pbm->chip_version, pbm->chip_revision); |
1730 | 1553 | ||
1731 | schizo_pbm_hw_init(pbm); | 1554 | schizo_pbm_hw_init(pbm); |
1732 | 1555 | ||
1733 | prop = of_find_property(dp, "ranges", &len); | 1556 | pci_determine_mem_io_space(pbm); |
1734 | pbm->pbm_ranges = prop->value; | ||
1735 | pbm->num_pbm_ranges = | ||
1736 | (len / sizeof(struct linux_prom_pci_ranges)); | ||
1737 | 1557 | ||
1738 | schizo_determine_mem_io_space(pbm); | 1558 | ino_bitmap = of_get_property(dp, "ino-bitmap", NULL); |
1739 | pbm_register_toplevel_resources(p, pbm); | ||
1740 | |||
1741 | prop = of_find_property(dp, "interrupt-map", &len); | ||
1742 | if (prop) { | ||
1743 | pbm->pbm_intmap = prop->value; | ||
1744 | pbm->num_pbm_intmap = | ||
1745 | (len / sizeof(struct linux_prom_pci_intmap)); | ||
1746 | |||
1747 | prop = of_find_property(dp, "interrupt-map-mask", NULL); | ||
1748 | pbm->pbm_intmask = prop->value; | ||
1749 | } else { | ||
1750 | pbm->num_pbm_intmap = 0; | ||
1751 | } | ||
1752 | |||
1753 | prop = of_find_property(dp, "ino-bitmap", NULL); | ||
1754 | ino_bitmap = prop->value; | ||
1755 | pbm->ino_bitmap = (((u64)ino_bitmap[1] << 32UL) | | 1559 | pbm->ino_bitmap = (((u64)ino_bitmap[1] << 32UL) | |
1756 | ((u64)ino_bitmap[0] << 0UL)); | 1560 | ((u64)ino_bitmap[0] << 0UL)); |
1757 | 1561 | ||
1758 | prop = of_find_property(dp, "bus-range", NULL); | 1562 | busrange = of_get_property(dp, "bus-range", NULL); |
1759 | busrange = prop->value; | ||
1760 | pbm->pci_first_busno = busrange[0]; | 1563 | pbm->pci_first_busno = busrange[0]; |
1761 | pbm->pci_last_busno = busrange[1]; | 1564 | pbm->pci_last_busno = busrange[1]; |
1762 | 1565 | ||
@@ -1777,15 +1580,10 @@ static inline int portid_compare(u32 x, u32 y, int chip_type) | |||
1777 | static void __schizo_init(struct device_node *dp, char *model_name, int chip_type) | 1580 | static void __schizo_init(struct device_node *dp, char *model_name, int chip_type) |
1778 | { | 1581 | { |
1779 | struct pci_controller_info *p; | 1582 | struct pci_controller_info *p; |
1780 | struct pci_iommu *iommu; | 1583 | struct iommu *iommu; |
1781 | struct property *prop; | ||
1782 | int is_pbm_a; | ||
1783 | u32 portid; | 1584 | u32 portid; |
1784 | 1585 | ||
1785 | portid = 0xff; | 1586 | portid = of_getintprop_default(dp, "portid", 0xff); |
1786 | prop = of_find_property(dp, "portid", NULL); | ||
1787 | if (prop) | ||
1788 | portid = *(u32 *) prop->value; | ||
1789 | 1587 | ||
1790 | for (p = pci_controller_root; p; p = p->next) { | 1588 | for (p = pci_controller_root; p; p = p->next) { |
1791 | struct pci_pbm_info *pbm; | 1589 | struct pci_pbm_info *pbm; |
@@ -1798,48 +1596,43 @@ static void __schizo_init(struct device_node *dp, char *model_name, int chip_typ | |||
1798 | &p->pbm_B); | 1596 | &p->pbm_B); |
1799 | 1597 | ||
1800 | if (portid_compare(pbm->portid, portid, chip_type)) { | 1598 | if (portid_compare(pbm->portid, portid, chip_type)) { |
1801 | is_pbm_a = (p->pbm_A.prom_node == NULL); | ||
1802 | schizo_pbm_init(p, dp, portid, chip_type); | 1599 | schizo_pbm_init(p, dp, portid, chip_type); |
1803 | return; | 1600 | return; |
1804 | } | 1601 | } |
1805 | } | 1602 | } |
1806 | 1603 | ||
1807 | p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); | 1604 | p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); |
1808 | if (!p) { | 1605 | if (!p) |
1809 | prom_printf("SCHIZO: Fatal memory allocation error.\n"); | 1606 | goto memfail; |
1810 | prom_halt(); | 1607 | |
1811 | } | 1608 | iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); |
1609 | if (!iommu) | ||
1610 | goto memfail; | ||
1812 | 1611 | ||
1813 | iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | ||
1814 | if (!iommu) { | ||
1815 | prom_printf("SCHIZO: Fatal memory allocation error.\n"); | ||
1816 | prom_halt(); | ||
1817 | } | ||
1818 | p->pbm_A.iommu = iommu; | 1612 | p->pbm_A.iommu = iommu; |
1819 | 1613 | ||
1820 | iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | 1614 | iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); |
1821 | if (!iommu) { | 1615 | if (!iommu) |
1822 | prom_printf("SCHIZO: Fatal memory allocation error.\n"); | 1616 | goto memfail; |
1823 | prom_halt(); | 1617 | |
1824 | } | ||
1825 | p->pbm_B.iommu = iommu; | 1618 | p->pbm_B.iommu = iommu; |
1826 | 1619 | ||
1827 | p->next = pci_controller_root; | 1620 | p->next = pci_controller_root; |
1828 | pci_controller_root = p; | 1621 | pci_controller_root = p; |
1829 | 1622 | ||
1830 | p->index = pci_num_controllers++; | 1623 | p->index = pci_num_controllers++; |
1831 | p->pbms_same_domain = 0; | 1624 | p->scan_bus = schizo_scan_bus; |
1832 | p->scan_bus = (chip_type == PBM_CHIP_TYPE_TOMATILLO ? | ||
1833 | tomatillo_scan_bus : | ||
1834 | schizo_scan_bus); | ||
1835 | p->base_address_update = schizo_base_address_update; | ||
1836 | p->resource_adjust = schizo_resource_adjust; | ||
1837 | p->pci_ops = &schizo_ops; | 1625 | p->pci_ops = &schizo_ops; |
1838 | 1626 | ||
1839 | /* Like PSYCHO we have a 2GB aligned area for memory space. */ | 1627 | /* Like PSYCHO we have a 2GB aligned area for memory space. */ |
1840 | pci_memspace_mask = 0x7fffffffUL; | 1628 | pci_memspace_mask = 0x7fffffffUL; |
1841 | 1629 | ||
1842 | schizo_pbm_init(p, dp, portid, chip_type); | 1630 | schizo_pbm_init(p, dp, portid, chip_type); |
1631 | return; | ||
1632 | |||
1633 | memfail: | ||
1634 | prom_printf("SCHIZO: Fatal memory allocation error.\n"); | ||
1635 | prom_halt(); | ||
1843 | } | 1636 | } |
1844 | 1637 | ||
1845 | void schizo_init(struct device_node *dp, char *model_name) | 1638 | void schizo_init(struct device_node *dp, char *model_name) |
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index ec22cd61ec8c..94295c219329 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* pci_sun4v.c: SUN4V specific PCI controller support. | 1 | /* pci_sun4v.c: SUN4V specific PCI controller support. |
2 | * | 2 | * |
3 | * Copyright (C) 2006 David S. Miller (davem@davemloft.net) | 3 | * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
@@ -29,7 +29,7 @@ | |||
29 | 29 | ||
30 | #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) | 30 | #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) |
31 | 31 | ||
32 | struct pci_iommu_batch { | 32 | struct iommu_batch { |
33 | struct pci_dev *pdev; /* Device mapping is for. */ | 33 | struct pci_dev *pdev; /* Device mapping is for. */ |
34 | unsigned long prot; /* IOMMU page protections */ | 34 | unsigned long prot; /* IOMMU page protections */ |
35 | unsigned long entry; /* Index into IOTSB. */ | 35 | unsigned long entry; /* Index into IOTSB. */ |
@@ -37,12 +37,12 @@ struct pci_iommu_batch { | |||
37 | unsigned long npages; /* Number of pages in list. */ | 37 | unsigned long npages; /* Number of pages in list. */ |
38 | }; | 38 | }; |
39 | 39 | ||
40 | static DEFINE_PER_CPU(struct pci_iommu_batch, pci_iommu_batch); | 40 | static DEFINE_PER_CPU(struct iommu_batch, pci_iommu_batch); |
41 | 41 | ||
42 | /* Interrupts must be disabled. */ | 42 | /* Interrupts must be disabled. */ |
43 | static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry) | 43 | static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry) |
44 | { | 44 | { |
45 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | 45 | struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch); |
46 | 46 | ||
47 | p->pdev = pdev; | 47 | p->pdev = pdev; |
48 | p->prot = prot; | 48 | p->prot = prot; |
@@ -51,10 +51,10 @@ static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long pro | |||
51 | } | 51 | } |
52 | 52 | ||
53 | /* Interrupts must be disabled. */ | 53 | /* Interrupts must be disabled. */ |
54 | static long pci_iommu_batch_flush(struct pci_iommu_batch *p) | 54 | static long pci_iommu_batch_flush(struct iommu_batch *p) |
55 | { | 55 | { |
56 | struct pcidev_cookie *pcp = p->pdev->sysdata; | 56 | struct pci_pbm_info *pbm = p->pdev->dev.archdata.host_controller; |
57 | unsigned long devhandle = pcp->pbm->devhandle; | 57 | unsigned long devhandle = pbm->devhandle; |
58 | unsigned long prot = p->prot; | 58 | unsigned long prot = p->prot; |
59 | unsigned long entry = p->entry; | 59 | unsigned long entry = p->entry; |
60 | u64 *pglist = p->pglist; | 60 | u64 *pglist = p->pglist; |
@@ -89,7 +89,7 @@ static long pci_iommu_batch_flush(struct pci_iommu_batch *p) | |||
89 | /* Interrupts must be disabled. */ | 89 | /* Interrupts must be disabled. */ |
90 | static inline long pci_iommu_batch_add(u64 phys_page) | 90 | static inline long pci_iommu_batch_add(u64 phys_page) |
91 | { | 91 | { |
92 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | 92 | struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch); |
93 | 93 | ||
94 | BUG_ON(p->npages >= PGLIST_NENTS); | 94 | BUG_ON(p->npages >= PGLIST_NENTS); |
95 | 95 | ||
@@ -103,14 +103,14 @@ static inline long pci_iommu_batch_add(u64 phys_page) | |||
103 | /* Interrupts must be disabled. */ | 103 | /* Interrupts must be disabled. */ |
104 | static inline long pci_iommu_batch_end(void) | 104 | static inline long pci_iommu_batch_end(void) |
105 | { | 105 | { |
106 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | 106 | struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch); |
107 | 107 | ||
108 | BUG_ON(p->npages >= PGLIST_NENTS); | 108 | BUG_ON(p->npages >= PGLIST_NENTS); |
109 | 109 | ||
110 | return pci_iommu_batch_flush(p); | 110 | return pci_iommu_batch_flush(p); |
111 | } | 111 | } |
112 | 112 | ||
113 | static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages) | 113 | static long pci_arena_alloc(struct iommu_arena *arena, unsigned long npages) |
114 | { | 114 | { |
115 | unsigned long n, i, start, end, limit; | 115 | unsigned long n, i, start, end, limit; |
116 | int pass; | 116 | int pass; |
@@ -149,7 +149,7 @@ again: | |||
149 | return n; | 149 | return n; |
150 | } | 150 | } |
151 | 151 | ||
152 | static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages) | 152 | static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages) |
153 | { | 153 | { |
154 | unsigned long i; | 154 | unsigned long i; |
155 | 155 | ||
@@ -159,8 +159,7 @@ static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, un | |||
159 | 159 | ||
160 | static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) | 160 | static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) |
161 | { | 161 | { |
162 | struct pcidev_cookie *pcp; | 162 | struct iommu *iommu; |
163 | struct pci_iommu *iommu; | ||
164 | unsigned long flags, order, first_page, npages, n; | 163 | unsigned long flags, order, first_page, npages, n; |
165 | void *ret; | 164 | void *ret; |
166 | long entry; | 165 | long entry; |
@@ -178,8 +177,7 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr | |||
178 | 177 | ||
179 | memset((char *)first_page, 0, PAGE_SIZE << order); | 178 | memset((char *)first_page, 0, PAGE_SIZE << order); |
180 | 179 | ||
181 | pcp = pdev->sysdata; | 180 | iommu = pdev->dev.archdata.iommu; |
182 | iommu = pcp->pbm->iommu; | ||
183 | 181 | ||
184 | spin_lock_irqsave(&iommu->lock, flags); | 182 | spin_lock_irqsave(&iommu->lock, flags); |
185 | entry = pci_arena_alloc(&iommu->arena, npages); | 183 | entry = pci_arena_alloc(&iommu->arena, npages); |
@@ -226,15 +224,15 @@ arena_alloc_fail: | |||
226 | 224 | ||
227 | static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) | 225 | static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) |
228 | { | 226 | { |
229 | struct pcidev_cookie *pcp; | 227 | struct pci_pbm_info *pbm; |
230 | struct pci_iommu *iommu; | 228 | struct iommu *iommu; |
231 | unsigned long flags, order, npages, entry; | 229 | unsigned long flags, order, npages, entry; |
232 | u32 devhandle; | 230 | u32 devhandle; |
233 | 231 | ||
234 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; | 232 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; |
235 | pcp = pdev->sysdata; | 233 | iommu = pdev->dev.archdata.iommu; |
236 | iommu = pcp->pbm->iommu; | 234 | pbm = pdev->dev.archdata.host_controller; |
237 | devhandle = pcp->pbm->devhandle; | 235 | devhandle = pbm->devhandle; |
238 | entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | 236 | entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); |
239 | 237 | ||
240 | spin_lock_irqsave(&iommu->lock, flags); | 238 | spin_lock_irqsave(&iommu->lock, flags); |
@@ -259,16 +257,14 @@ static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, | |||
259 | 257 | ||
260 | static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) | 258 | static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) |
261 | { | 259 | { |
262 | struct pcidev_cookie *pcp; | 260 | struct iommu *iommu; |
263 | struct pci_iommu *iommu; | ||
264 | unsigned long flags, npages, oaddr; | 261 | unsigned long flags, npages, oaddr; |
265 | unsigned long i, base_paddr; | 262 | unsigned long i, base_paddr; |
266 | u32 bus_addr, ret; | 263 | u32 bus_addr, ret; |
267 | unsigned long prot; | 264 | unsigned long prot; |
268 | long entry; | 265 | long entry; |
269 | 266 | ||
270 | pcp = pdev->sysdata; | 267 | iommu = pdev->dev.archdata.iommu; |
271 | iommu = pcp->pbm->iommu; | ||
272 | 268 | ||
273 | if (unlikely(direction == PCI_DMA_NONE)) | 269 | if (unlikely(direction == PCI_DMA_NONE)) |
274 | goto bad; | 270 | goto bad; |
@@ -324,8 +320,8 @@ iommu_map_fail: | |||
324 | 320 | ||
325 | static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | 321 | static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) |
326 | { | 322 | { |
327 | struct pcidev_cookie *pcp; | 323 | struct pci_pbm_info *pbm; |
328 | struct pci_iommu *iommu; | 324 | struct iommu *iommu; |
329 | unsigned long flags, npages; | 325 | unsigned long flags, npages; |
330 | long entry; | 326 | long entry; |
331 | u32 devhandle; | 327 | u32 devhandle; |
@@ -336,9 +332,9 @@ static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_ | |||
336 | return; | 332 | return; |
337 | } | 333 | } |
338 | 334 | ||
339 | pcp = pdev->sysdata; | 335 | iommu = pdev->dev.archdata.iommu; |
340 | iommu = pcp->pbm->iommu; | 336 | pbm = pdev->dev.archdata.host_controller; |
341 | devhandle = pcp->pbm->devhandle; | 337 | devhandle = pbm->devhandle; |
342 | 338 | ||
343 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | 339 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); |
344 | npages >>= IO_PAGE_SHIFT; | 340 | npages >>= IO_PAGE_SHIFT; |
@@ -460,8 +456,7 @@ iommu_map_failed: | |||
460 | 456 | ||
461 | static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | 457 | static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) |
462 | { | 458 | { |
463 | struct pcidev_cookie *pcp; | 459 | struct iommu *iommu; |
464 | struct pci_iommu *iommu; | ||
465 | unsigned long flags, npages, prot; | 460 | unsigned long flags, npages, prot; |
466 | u32 dma_base; | 461 | u32 dma_base; |
467 | struct scatterlist *sgtmp; | 462 | struct scatterlist *sgtmp; |
@@ -480,8 +475,7 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n | |||
480 | return 1; | 475 | return 1; |
481 | } | 476 | } |
482 | 477 | ||
483 | pcp = pdev->sysdata; | 478 | iommu = pdev->dev.archdata.iommu; |
484 | iommu = pcp->pbm->iommu; | ||
485 | 479 | ||
486 | if (unlikely(direction == PCI_DMA_NONE)) | 480 | if (unlikely(direction == PCI_DMA_NONE)) |
487 | goto bad; | 481 | goto bad; |
@@ -537,8 +531,8 @@ iommu_map_failed: | |||
537 | 531 | ||
538 | static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | 532 | static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) |
539 | { | 533 | { |
540 | struct pcidev_cookie *pcp; | 534 | struct pci_pbm_info *pbm; |
541 | struct pci_iommu *iommu; | 535 | struct iommu *iommu; |
542 | unsigned long flags, i, npages; | 536 | unsigned long flags, i, npages; |
543 | long entry; | 537 | long entry; |
544 | u32 devhandle, bus_addr; | 538 | u32 devhandle, bus_addr; |
@@ -548,9 +542,9 @@ static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in | |||
548 | WARN_ON(1); | 542 | WARN_ON(1); |
549 | } | 543 | } |
550 | 544 | ||
551 | pcp = pdev->sysdata; | 545 | iommu = pdev->dev.archdata.iommu; |
552 | iommu = pcp->pbm->iommu; | 546 | pbm = pdev->dev.archdata.host_controller; |
553 | devhandle = pcp->pbm->devhandle; | 547 | devhandle = pbm->devhandle; |
554 | 548 | ||
555 | bus_addr = sglist->dma_address & IO_PAGE_MASK; | 549 | bus_addr = sglist->dma_address & IO_PAGE_MASK; |
556 | 550 | ||
@@ -589,7 +583,7 @@ static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist | |||
589 | /* Nothing to do... */ | 583 | /* Nothing to do... */ |
590 | } | 584 | } |
591 | 585 | ||
592 | struct pci_iommu_ops pci_sun4v_iommu_ops = { | 586 | const struct pci_iommu_ops pci_sun4v_iommu_ops = { |
593 | .alloc_consistent = pci_4v_alloc_consistent, | 587 | .alloc_consistent = pci_4v_alloc_consistent, |
594 | .free_consistent = pci_4v_free_consistent, | 588 | .free_consistent = pci_4v_free_consistent, |
595 | .map_single = pci_4v_map_single, | 589 | .map_single = pci_4v_map_single, |
@@ -600,132 +594,12 @@ struct pci_iommu_ops pci_sun4v_iommu_ops = { | |||
600 | .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu, | 594 | .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu, |
601 | }; | 595 | }; |
602 | 596 | ||
603 | /* SUN4V PCI configuration space accessors. */ | ||
604 | |||
605 | struct pdev_entry { | ||
606 | struct pdev_entry *next; | ||
607 | u32 devhandle; | ||
608 | unsigned int bus; | ||
609 | unsigned int device; | ||
610 | unsigned int func; | ||
611 | }; | ||
612 | |||
613 | #define PDEV_HTAB_SIZE 16 | ||
614 | #define PDEV_HTAB_MASK (PDEV_HTAB_SIZE - 1) | ||
615 | static struct pdev_entry *pdev_htab[PDEV_HTAB_SIZE]; | ||
616 | |||
617 | static inline unsigned int pdev_hashfn(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func) | ||
618 | { | ||
619 | unsigned int val; | ||
620 | |||
621 | val = (devhandle ^ (devhandle >> 4)); | ||
622 | val ^= bus; | ||
623 | val ^= device; | ||
624 | val ^= func; | ||
625 | |||
626 | return val & PDEV_HTAB_MASK; | ||
627 | } | ||
628 | |||
629 | static int pdev_htab_add(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func) | ||
630 | { | ||
631 | struct pdev_entry *p = kmalloc(sizeof(*p), GFP_KERNEL); | ||
632 | struct pdev_entry **slot; | ||
633 | |||
634 | if (!p) | ||
635 | return -ENOMEM; | ||
636 | |||
637 | slot = &pdev_htab[pdev_hashfn(devhandle, bus, device, func)]; | ||
638 | p->next = *slot; | ||
639 | *slot = p; | ||
640 | |||
641 | p->devhandle = devhandle; | ||
642 | p->bus = bus; | ||
643 | p->device = device; | ||
644 | p->func = func; | ||
645 | |||
646 | return 0; | ||
647 | } | ||
648 | |||
649 | /* Recursively descend into the OBP device tree, rooted at toplevel_node, | ||
650 | * looking for a PCI device matching bus and devfn. | ||
651 | */ | ||
652 | static int obp_find(struct device_node *toplevel_node, unsigned int bus, unsigned int devfn) | ||
653 | { | ||
654 | toplevel_node = toplevel_node->child; | ||
655 | |||
656 | while (toplevel_node != NULL) { | ||
657 | struct linux_prom_pci_registers *regs; | ||
658 | struct property *prop; | ||
659 | int ret; | ||
660 | |||
661 | ret = obp_find(toplevel_node, bus, devfn); | ||
662 | if (ret != 0) | ||
663 | return ret; | ||
664 | |||
665 | prop = of_find_property(toplevel_node, "reg", NULL); | ||
666 | if (!prop) | ||
667 | goto next_sibling; | ||
668 | |||
669 | regs = prop->value; | ||
670 | if (((regs->phys_hi >> 16) & 0xff) == bus && | ||
671 | ((regs->phys_hi >> 8) & 0xff) == devfn) | ||
672 | break; | ||
673 | |||
674 | next_sibling: | ||
675 | toplevel_node = toplevel_node->sibling; | ||
676 | } | ||
677 | |||
678 | return toplevel_node != NULL; | ||
679 | } | ||
680 | |||
681 | static int pdev_htab_populate(struct pci_pbm_info *pbm) | ||
682 | { | ||
683 | u32 devhandle = pbm->devhandle; | ||
684 | unsigned int bus; | ||
685 | |||
686 | for (bus = pbm->pci_first_busno; bus <= pbm->pci_last_busno; bus++) { | ||
687 | unsigned int devfn; | ||
688 | |||
689 | for (devfn = 0; devfn < 256; devfn++) { | ||
690 | unsigned int device = PCI_SLOT(devfn); | ||
691 | unsigned int func = PCI_FUNC(devfn); | ||
692 | |||
693 | if (obp_find(pbm->prom_node, bus, devfn)) { | ||
694 | int err = pdev_htab_add(devhandle, bus, | ||
695 | device, func); | ||
696 | if (err) | ||
697 | return err; | ||
698 | } | ||
699 | } | ||
700 | } | ||
701 | |||
702 | return 0; | ||
703 | } | ||
704 | |||
705 | static struct pdev_entry *pdev_find(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func) | ||
706 | { | ||
707 | struct pdev_entry *p; | ||
708 | |||
709 | p = pdev_htab[pdev_hashfn(devhandle, bus, device, func)]; | ||
710 | while (p) { | ||
711 | if (p->devhandle == devhandle && | ||
712 | p->bus == bus && | ||
713 | p->device == device && | ||
714 | p->func == func) | ||
715 | break; | ||
716 | |||
717 | p = p->next; | ||
718 | } | ||
719 | |||
720 | return p; | ||
721 | } | ||
722 | |||
723 | static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func) | 597 | static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func) |
724 | { | 598 | { |
725 | if (bus < pbm->pci_first_busno || | 599 | if (bus < pbm->pci_first_busno || |
726 | bus > pbm->pci_last_busno) | 600 | bus > pbm->pci_last_busno) |
727 | return 1; | 601 | return 1; |
728 | return pdev_find(pbm->devhandle, bus, device, func) == NULL; | 602 | return 0; |
729 | } | 603 | } |
730 | 604 | ||
731 | static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, | 605 | static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, |
@@ -738,6 +612,9 @@ static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, | |||
738 | unsigned int func = PCI_FUNC(devfn); | 612 | unsigned int func = PCI_FUNC(devfn); |
739 | unsigned long ret; | 613 | unsigned long ret; |
740 | 614 | ||
615 | if (bus_dev == pbm->pci_bus && devfn == 0x00) | ||
616 | return pci_host_bridge_read_pci_cfg(bus_dev, devfn, where, | ||
617 | size, value); | ||
741 | if (pci_sun4v_out_of_range(pbm, bus, device, func)) { | 618 | if (pci_sun4v_out_of_range(pbm, bus, device, func)) { |
742 | ret = ~0UL; | 619 | ret = ~0UL; |
743 | } else { | 620 | } else { |
@@ -776,6 +653,9 @@ static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, | |||
776 | unsigned int func = PCI_FUNC(devfn); | 653 | unsigned int func = PCI_FUNC(devfn); |
777 | unsigned long ret; | 654 | unsigned long ret; |
778 | 655 | ||
656 | if (bus_dev == pbm->pci_bus && devfn == 0x00) | ||
657 | return pci_host_bridge_write_pci_cfg(bus_dev, devfn, where, | ||
658 | size, value); | ||
779 | if (pci_sun4v_out_of_range(pbm, bus, device, func)) { | 659 | if (pci_sun4v_out_of_range(pbm, bus, device, func)) { |
780 | /* Do nothing. */ | 660 | /* Do nothing. */ |
781 | } else { | 661 | } else { |
@@ -800,27 +680,7 @@ static struct pci_ops pci_sun4v_ops = { | |||
800 | static void pbm_scan_bus(struct pci_controller_info *p, | 680 | static void pbm_scan_bus(struct pci_controller_info *p, |
801 | struct pci_pbm_info *pbm) | 681 | struct pci_pbm_info *pbm) |
802 | { | 682 | { |
803 | struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); | 683 | pbm->pci_bus = pci_scan_one_pbm(pbm); |
804 | |||
805 | if (!cookie) { | ||
806 | prom_printf("%s: Critical allocation failure.\n", pbm->name); | ||
807 | prom_halt(); | ||
808 | } | ||
809 | |||
810 | /* All we care about is the PBM. */ | ||
811 | cookie->pbm = pbm; | ||
812 | |||
813 | pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm); | ||
814 | #if 0 | ||
815 | pci_fixup_host_bridge_self(pbm->pci_bus); | ||
816 | pbm->pci_bus->self->sysdata = cookie; | ||
817 | #endif | ||
818 | pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node); | ||
819 | pci_record_assignments(pbm, pbm->pci_bus); | ||
820 | pci_assign_unassigned(pbm, pbm->pci_bus); | ||
821 | pci_fixup_irq(pbm, pbm->pci_bus); | ||
822 | pci_determine_66mhz_disposition(pbm, pbm->pci_bus); | ||
823 | pci_setup_busmastering(pbm, pbm->pci_bus); | ||
824 | } | 684 | } |
825 | 685 | ||
826 | static void pci_sun4v_scan_bus(struct pci_controller_info *p) | 686 | static void pci_sun4v_scan_bus(struct pci_controller_info *p) |
@@ -844,130 +704,10 @@ static void pci_sun4v_scan_bus(struct pci_controller_info *p) | |||
844 | /* XXX register error interrupt handlers XXX */ | 704 | /* XXX register error interrupt handlers XXX */ |
845 | } | 705 | } |
846 | 706 | ||
847 | static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource) | ||
848 | { | ||
849 | struct pcidev_cookie *pcp = pdev->sysdata; | ||
850 | struct pci_pbm_info *pbm = pcp->pbm; | ||
851 | struct resource *res, *root; | ||
852 | u32 reg; | ||
853 | int where, size, is_64bit; | ||
854 | |||
855 | res = &pdev->resource[resource]; | ||
856 | if (resource < 6) { | ||
857 | where = PCI_BASE_ADDRESS_0 + (resource * 4); | ||
858 | } else if (resource == PCI_ROM_RESOURCE) { | ||
859 | where = pdev->rom_base_reg; | ||
860 | } else { | ||
861 | /* Somebody might have asked allocation of a non-standard resource */ | ||
862 | return; | ||
863 | } | ||
864 | |||
865 | /* XXX 64-bit MEM handling is not %100 correct... XXX */ | ||
866 | is_64bit = 0; | ||
867 | if (res->flags & IORESOURCE_IO) | ||
868 | root = &pbm->io_space; | ||
869 | else { | ||
870 | root = &pbm->mem_space; | ||
871 | if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) | ||
872 | == PCI_BASE_ADDRESS_MEM_TYPE_64) | ||
873 | is_64bit = 1; | ||
874 | } | ||
875 | |||
876 | size = res->end - res->start; | ||
877 | pci_read_config_dword(pdev, where, ®); | ||
878 | reg = ((reg & size) | | ||
879 | (((u32)(res->start - root->start)) & ~size)); | ||
880 | if (resource == PCI_ROM_RESOURCE) { | ||
881 | reg |= PCI_ROM_ADDRESS_ENABLE; | ||
882 | res->flags |= IORESOURCE_ROM_ENABLE; | ||
883 | } | ||
884 | pci_write_config_dword(pdev, where, reg); | ||
885 | |||
886 | /* This knows that the upper 32-bits of the address | ||
887 | * must be zero. Our PCI common layer enforces this. | ||
888 | */ | ||
889 | if (is_64bit) | ||
890 | pci_write_config_dword(pdev, where + 4, 0); | ||
891 | } | ||
892 | |||
893 | static void pci_sun4v_resource_adjust(struct pci_dev *pdev, | ||
894 | struct resource *res, | ||
895 | struct resource *root) | ||
896 | { | ||
897 | res->start += root->start; | ||
898 | res->end += root->start; | ||
899 | } | ||
900 | |||
901 | /* Use ranges property to determine where PCI MEM, I/O, and Config | ||
902 | * space are for this PCI bus module. | ||
903 | */ | ||
904 | static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm) | ||
905 | { | ||
906 | int i, saw_mem, saw_io; | ||
907 | |||
908 | saw_mem = saw_io = 0; | ||
909 | for (i = 0; i < pbm->num_pbm_ranges; i++) { | ||
910 | struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i]; | ||
911 | unsigned long a; | ||
912 | int type; | ||
913 | |||
914 | type = (pr->child_phys_hi >> 24) & 0x3; | ||
915 | a = (((unsigned long)pr->parent_phys_hi << 32UL) | | ||
916 | ((unsigned long)pr->parent_phys_lo << 0UL)); | ||
917 | |||
918 | switch (type) { | ||
919 | case 1: | ||
920 | /* 16-bit IO space, 16MB */ | ||
921 | pbm->io_space.start = a; | ||
922 | pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL); | ||
923 | pbm->io_space.flags = IORESOURCE_IO; | ||
924 | saw_io = 1; | ||
925 | break; | ||
926 | |||
927 | case 2: | ||
928 | /* 32-bit MEM space, 2GB */ | ||
929 | pbm->mem_space.start = a; | ||
930 | pbm->mem_space.end = a + (0x80000000UL - 1UL); | ||
931 | pbm->mem_space.flags = IORESOURCE_MEM; | ||
932 | saw_mem = 1; | ||
933 | break; | ||
934 | |||
935 | case 3: | ||
936 | /* XXX 64-bit MEM handling XXX */ | ||
937 | |||
938 | default: | ||
939 | break; | ||
940 | }; | ||
941 | } | ||
942 | |||
943 | if (!saw_io || !saw_mem) { | ||
944 | prom_printf("%s: Fatal error, missing %s PBM range.\n", | ||
945 | pbm->name, | ||
946 | (!saw_io ? "IO" : "MEM")); | ||
947 | prom_halt(); | ||
948 | } | ||
949 | |||
950 | printk("%s: PCI IO[%lx] MEM[%lx]\n", | ||
951 | pbm->name, | ||
952 | pbm->io_space.start, | ||
953 | pbm->mem_space.start); | ||
954 | } | ||
955 | |||
956 | static void pbm_register_toplevel_resources(struct pci_controller_info *p, | ||
957 | struct pci_pbm_info *pbm) | ||
958 | { | ||
959 | pbm->io_space.name = pbm->mem_space.name = pbm->name; | ||
960 | |||
961 | request_resource(&ioport_resource, &pbm->io_space); | ||
962 | request_resource(&iomem_resource, &pbm->mem_space); | ||
963 | pci_register_legacy_regions(&pbm->io_space, | ||
964 | &pbm->mem_space); | ||
965 | } | ||
966 | |||
967 | static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, | 707 | static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, |
968 | struct pci_iommu *iommu) | 708 | struct iommu *iommu) |
969 | { | 709 | { |
970 | struct pci_iommu_arena *arena = &iommu->arena; | 710 | struct iommu_arena *arena = &iommu->arena; |
971 | unsigned long i, cnt = 0; | 711 | unsigned long i, cnt = 0; |
972 | u32 devhandle; | 712 | u32 devhandle; |
973 | 713 | ||
@@ -994,7 +734,7 @@ static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, | |||
994 | 734 | ||
995 | static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) | 735 | static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) |
996 | { | 736 | { |
997 | struct pci_iommu *iommu = pbm->iommu; | 737 | struct iommu *iommu = pbm->iommu; |
998 | struct property *prop; | 738 | struct property *prop; |
999 | unsigned long num_tsb_entries, sz; | 739 | unsigned long num_tsb_entries, sz; |
1000 | u32 vdma[2], dma_mask, dma_offset; | 740 | u32 vdma[2], dma_mask, dma_offset; |
@@ -1281,7 +1021,7 @@ h_error: | |||
1281 | 1021 | ||
1282 | static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) | 1022 | static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) |
1283 | { | 1023 | { |
1284 | u32 *val; | 1024 | const u32 *val; |
1285 | int len; | 1025 | int len; |
1286 | 1026 | ||
1287 | val = of_get_property(pbm->prom_node, "#msi-eqs", &len); | 1027 | val = of_get_property(pbm->prom_node, "#msi-eqs", &len); |
@@ -1289,16 +1029,16 @@ static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) | |||
1289 | goto no_msi; | 1029 | goto no_msi; |
1290 | pbm->msiq_num = *val; | 1030 | pbm->msiq_num = *val; |
1291 | if (pbm->msiq_num) { | 1031 | if (pbm->msiq_num) { |
1292 | struct msiq_prop { | 1032 | const struct msiq_prop { |
1293 | u32 first_msiq; | 1033 | u32 first_msiq; |
1294 | u32 num_msiq; | 1034 | u32 num_msiq; |
1295 | u32 first_devino; | 1035 | u32 first_devino; |
1296 | } *mqp; | 1036 | } *mqp; |
1297 | struct msi_range_prop { | 1037 | const struct msi_range_prop { |
1298 | u32 first_msi; | 1038 | u32 first_msi; |
1299 | u32 num_msi; | 1039 | u32 num_msi; |
1300 | } *mrng; | 1040 | } *mrng; |
1301 | struct addr_range_prop { | 1041 | const struct addr_range_prop { |
1302 | u32 msi32_high; | 1042 | u32 msi32_high; |
1303 | u32 msi32_low; | 1043 | u32 msi32_low; |
1304 | u32 msi32_len; | 1044 | u32 msi32_len; |
@@ -1410,8 +1150,7 @@ static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p, | |||
1410 | struct pci_dev *pdev, | 1150 | struct pci_dev *pdev, |
1411 | struct msi_desc *entry) | 1151 | struct msi_desc *entry) |
1412 | { | 1152 | { |
1413 | struct pcidev_cookie *pcp = pdev->sysdata; | 1153 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; |
1414 | struct pci_pbm_info *pbm = pcp->pbm; | ||
1415 | unsigned long devino, msiqid; | 1154 | unsigned long devino, msiqid; |
1416 | struct msi_msg msg; | 1155 | struct msi_msg msg; |
1417 | int msi_num, err; | 1156 | int msi_num, err; |
@@ -1455,7 +1194,7 @@ static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p, | |||
1455 | if (pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_VALID)) | 1194 | if (pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_VALID)) |
1456 | goto out_err; | 1195 | goto out_err; |
1457 | 1196 | ||
1458 | pcp->msi_num = msi_num; | 1197 | pdev->dev.archdata.msi_num = msi_num; |
1459 | 1198 | ||
1460 | if (entry->msi_attrib.is_64) { | 1199 | if (entry->msi_attrib.is_64) { |
1461 | msg.address_hi = pbm->msi64_start >> 32; | 1200 | msg.address_hi = pbm->msi64_start >> 32; |
@@ -1484,12 +1223,11 @@ out_err: | |||
1484 | static void pci_sun4v_teardown_msi_irq(unsigned int virt_irq, | 1223 | static void pci_sun4v_teardown_msi_irq(unsigned int virt_irq, |
1485 | struct pci_dev *pdev) | 1224 | struct pci_dev *pdev) |
1486 | { | 1225 | { |
1487 | struct pcidev_cookie *pcp = pdev->sysdata; | 1226 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; |
1488 | struct pci_pbm_info *pbm = pcp->pbm; | ||
1489 | unsigned long msiqid, err; | 1227 | unsigned long msiqid, err; |
1490 | unsigned int msi_num; | 1228 | unsigned int msi_num; |
1491 | 1229 | ||
1492 | msi_num = pcp->msi_num; | 1230 | msi_num = pdev->dev.archdata.msi_num; |
1493 | err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi_num, &msiqid); | 1231 | err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi_num, &msiqid); |
1494 | if (err) { | 1232 | if (err) { |
1495 | printk(KERN_ERR "%s: getmsiq gives error %lu\n", | 1233 | printk(KERN_ERR "%s: getmsiq gives error %lu\n", |
@@ -1516,8 +1254,6 @@ static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) | |||
1516 | static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle) | 1254 | static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle) |
1517 | { | 1255 | { |
1518 | struct pci_pbm_info *pbm; | 1256 | struct pci_pbm_info *pbm; |
1519 | struct property *prop; | ||
1520 | int len, i; | ||
1521 | 1257 | ||
1522 | if (devhandle & 0x40) | 1258 | if (devhandle & 0x40) |
1523 | pbm = &p->pbm_B; | 1259 | pbm = &p->pbm_B; |
@@ -1526,7 +1262,6 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node | |||
1526 | 1262 | ||
1527 | pbm->parent = p; | 1263 | pbm->parent = p; |
1528 | pbm->prom_node = dp; | 1264 | pbm->prom_node = dp; |
1529 | pbm->pci_first_slot = 1; | ||
1530 | 1265 | ||
1531 | pbm->devhandle = devhandle; | 1266 | pbm->devhandle = devhandle; |
1532 | 1267 | ||
@@ -1534,39 +1269,17 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node | |||
1534 | 1269 | ||
1535 | printk("%s: SUN4V PCI Bus Module\n", pbm->name); | 1270 | printk("%s: SUN4V PCI Bus Module\n", pbm->name); |
1536 | 1271 | ||
1537 | prop = of_find_property(dp, "ranges", &len); | 1272 | pci_determine_mem_io_space(pbm); |
1538 | pbm->pbm_ranges = prop->value; | ||
1539 | pbm->num_pbm_ranges = | ||
1540 | (len / sizeof(struct linux_prom_pci_ranges)); | ||
1541 | |||
1542 | /* Mask out the top 8 bits of the ranges, leaving the real | ||
1543 | * physical address. | ||
1544 | */ | ||
1545 | for (i = 0; i < pbm->num_pbm_ranges; i++) | ||
1546 | pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff; | ||
1547 | |||
1548 | pci_sun4v_determine_mem_io_space(pbm); | ||
1549 | pbm_register_toplevel_resources(p, pbm); | ||
1550 | |||
1551 | prop = of_find_property(dp, "interrupt-map", &len); | ||
1552 | pbm->pbm_intmap = prop->value; | ||
1553 | pbm->num_pbm_intmap = | ||
1554 | (len / sizeof(struct linux_prom_pci_intmap)); | ||
1555 | |||
1556 | prop = of_find_property(dp, "interrupt-map-mask", NULL); | ||
1557 | pbm->pbm_intmask = prop->value; | ||
1558 | 1273 | ||
1559 | pci_sun4v_get_bus_range(pbm); | 1274 | pci_sun4v_get_bus_range(pbm); |
1560 | pci_sun4v_iommu_init(pbm); | 1275 | pci_sun4v_iommu_init(pbm); |
1561 | pci_sun4v_msi_init(pbm); | 1276 | pci_sun4v_msi_init(pbm); |
1562 | |||
1563 | pdev_htab_populate(pbm); | ||
1564 | } | 1277 | } |
1565 | 1278 | ||
1566 | void sun4v_pci_init(struct device_node *dp, char *model_name) | 1279 | void sun4v_pci_init(struct device_node *dp, char *model_name) |
1567 | { | 1280 | { |
1568 | struct pci_controller_info *p; | 1281 | struct pci_controller_info *p; |
1569 | struct pci_iommu *iommu; | 1282 | struct iommu *iommu; |
1570 | struct property *prop; | 1283 | struct property *prop; |
1571 | struct linux_prom64_registers *regs; | 1284 | struct linux_prom64_registers *regs; |
1572 | u32 devhandle; | 1285 | u32 devhandle; |
@@ -1606,13 +1319,13 @@ void sun4v_pci_init(struct device_node *dp, char *model_name) | |||
1606 | if (!p) | 1319 | if (!p) |
1607 | goto fatal_memory_error; | 1320 | goto fatal_memory_error; |
1608 | 1321 | ||
1609 | iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | 1322 | iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); |
1610 | if (!iommu) | 1323 | if (!iommu) |
1611 | goto fatal_memory_error; | 1324 | goto fatal_memory_error; |
1612 | 1325 | ||
1613 | p->pbm_A.iommu = iommu; | 1326 | p->pbm_A.iommu = iommu; |
1614 | 1327 | ||
1615 | iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | 1328 | iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); |
1616 | if (!iommu) | 1329 | if (!iommu) |
1617 | goto fatal_memory_error; | 1330 | goto fatal_memory_error; |
1618 | 1331 | ||
@@ -1622,11 +1335,8 @@ void sun4v_pci_init(struct device_node *dp, char *model_name) | |||
1622 | pci_controller_root = p; | 1335 | pci_controller_root = p; |
1623 | 1336 | ||
1624 | p->index = pci_num_controllers++; | 1337 | p->index = pci_num_controllers++; |
1625 | p->pbms_same_domain = 0; | ||
1626 | 1338 | ||
1627 | p->scan_bus = pci_sun4v_scan_bus; | 1339 | p->scan_bus = pci_sun4v_scan_bus; |
1628 | p->base_address_update = pci_sun4v_base_address_update; | ||
1629 | p->resource_adjust = pci_sun4v_resource_adjust; | ||
1630 | #ifdef CONFIG_PCI_MSI | 1340 | #ifdef CONFIG_PCI_MSI |
1631 | p->setup_msi_irq = pci_sun4v_setup_msi_irq; | 1341 | p->setup_msi_irq = pci_sun4v_setup_msi_irq; |
1632 | p->teardown_msi_irq = pci_sun4v_teardown_msi_irq; | 1342 | p->teardown_msi_irq = pci_sun4v_teardown_msi_irq; |
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index b291060c25a6..a114151f9fbe 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/reboot.h> | 28 | #include <linux/reboot.h> |
29 | #include <linux/delay.h> | 29 | #include <linux/delay.h> |
30 | #include <linux/compat.h> | 30 | #include <linux/compat.h> |
31 | #include <linux/tick.h> | ||
31 | #include <linux/init.h> | 32 | #include <linux/init.h> |
32 | 33 | ||
33 | #include <asm/oplib.h> | 34 | #include <asm/oplib.h> |
@@ -88,12 +89,14 @@ void cpu_idle(void) | |||
88 | set_thread_flag(TIF_POLLING_NRFLAG); | 89 | set_thread_flag(TIF_POLLING_NRFLAG); |
89 | 90 | ||
90 | while(1) { | 91 | while(1) { |
91 | if (need_resched()) { | 92 | tick_nohz_stop_sched_tick(); |
92 | preempt_enable_no_resched(); | 93 | while (!need_resched()) |
93 | schedule(); | 94 | sparc64_yield(); |
94 | preempt_disable(); | 95 | tick_nohz_restart_sched_tick(); |
95 | } | 96 | |
96 | sparc64_yield(); | 97 | preempt_enable_no_resched(); |
98 | schedule(); | ||
99 | preempt_disable(); | ||
97 | } | 100 | } |
98 | } | 101 | } |
99 | 102 | ||
diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c index 0917c24c4f08..5e1fcd05160d 100644 --- a/arch/sparc64/kernel/prom.c +++ b/arch/sparc64/kernel/prom.c | |||
@@ -36,12 +36,13 @@ static struct device_node *allnodes; | |||
36 | */ | 36 | */ |
37 | static DEFINE_RWLOCK(devtree_lock); | 37 | static DEFINE_RWLOCK(devtree_lock); |
38 | 38 | ||
39 | int of_device_is_compatible(struct device_node *device, const char *compat) | 39 | int of_device_is_compatible(const struct device_node *device, |
40 | const char *compat) | ||
40 | { | 41 | { |
41 | const char* cp; | 42 | const char* cp; |
42 | int cplen, l; | 43 | int cplen, l; |
43 | 44 | ||
44 | cp = (char *) of_get_property(device, "compatible", &cplen); | 45 | cp = of_get_property(device, "compatible", &cplen); |
45 | if (cp == NULL) | 46 | if (cp == NULL) |
46 | return 0; | 47 | return 0; |
47 | while (cplen > 0) { | 48 | while (cplen > 0) { |
@@ -154,13 +155,14 @@ struct device_node *of_find_compatible_node(struct device_node *from, | |||
154 | } | 155 | } |
155 | EXPORT_SYMBOL(of_find_compatible_node); | 156 | EXPORT_SYMBOL(of_find_compatible_node); |
156 | 157 | ||
157 | struct property *of_find_property(struct device_node *np, const char *name, | 158 | struct property *of_find_property(const struct device_node *np, |
159 | const char *name, | ||
158 | int *lenp) | 160 | int *lenp) |
159 | { | 161 | { |
160 | struct property *pp; | 162 | struct property *pp; |
161 | 163 | ||
162 | for (pp = np->properties; pp != 0; pp = pp->next) { | 164 | for (pp = np->properties; pp != 0; pp = pp->next) { |
163 | if (strcmp(pp->name, name) == 0) { | 165 | if (strcasecmp(pp->name, name) == 0) { |
164 | if (lenp != 0) | 166 | if (lenp != 0) |
165 | *lenp = pp->length; | 167 | *lenp = pp->length; |
166 | break; | 168 | break; |
@@ -174,7 +176,8 @@ EXPORT_SYMBOL(of_find_property); | |||
174 | * Find a property with a given name for a given node | 176 | * Find a property with a given name for a given node |
175 | * and return the value. | 177 | * and return the value. |
176 | */ | 178 | */ |
177 | void *of_get_property(struct device_node *np, const char *name, int *lenp) | 179 | const void *of_get_property(const struct device_node *np, const char *name, |
180 | int *lenp) | ||
178 | { | 181 | { |
179 | struct property *pp = of_find_property(np,name,lenp); | 182 | struct property *pp = of_find_property(np,name,lenp); |
180 | return pp ? pp->value : NULL; | 183 | return pp ? pp->value : NULL; |
@@ -196,7 +199,7 @@ EXPORT_SYMBOL(of_getintprop_default); | |||
196 | 199 | ||
197 | int of_n_addr_cells(struct device_node *np) | 200 | int of_n_addr_cells(struct device_node *np) |
198 | { | 201 | { |
199 | int* ip; | 202 | const int* ip; |
200 | do { | 203 | do { |
201 | if (np->parent) | 204 | if (np->parent) |
202 | np = np->parent; | 205 | np = np->parent; |
@@ -211,7 +214,7 @@ EXPORT_SYMBOL(of_n_addr_cells); | |||
211 | 214 | ||
212 | int of_n_size_cells(struct device_node *np) | 215 | int of_n_size_cells(struct device_node *np) |
213 | { | 216 | { |
214 | int* ip; | 217 | const int* ip; |
215 | do { | 218 | do { |
216 | if (np->parent) | 219 | if (np->parent) |
217 | np = np->parent; | 220 | np = np->parent; |
@@ -243,7 +246,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len | |||
243 | while (*prevp) { | 246 | while (*prevp) { |
244 | struct property *prop = *prevp; | 247 | struct property *prop = *prevp; |
245 | 248 | ||
246 | if (!strcmp(prop->name, name)) { | 249 | if (!strcasecmp(prop->name, name)) { |
247 | void *old_val = prop->value; | 250 | void *old_val = prop->value; |
248 | int ret; | 251 | int ret; |
249 | 252 | ||
@@ -397,7 +400,7 @@ static unsigned int psycho_irq_build(struct device_node *dp, | |||
397 | 400 | ||
398 | static void psycho_irq_trans_init(struct device_node *dp) | 401 | static void psycho_irq_trans_init(struct device_node *dp) |
399 | { | 402 | { |
400 | struct linux_prom64_registers *regs; | 403 | const struct linux_prom64_registers *regs; |
401 | 404 | ||
402 | dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); | 405 | dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); |
403 | dp->irq_trans->irq_build = psycho_irq_build; | 406 | dp->irq_trans->irq_build = psycho_irq_build; |
@@ -547,7 +550,7 @@ static unsigned long __sabre_onboard_imap_off[] = { | |||
547 | static int sabre_device_needs_wsync(struct device_node *dp) | 550 | static int sabre_device_needs_wsync(struct device_node *dp) |
548 | { | 551 | { |
549 | struct device_node *parent = dp->parent; | 552 | struct device_node *parent = dp->parent; |
550 | char *parent_model, *parent_compat; | 553 | const char *parent_model, *parent_compat; |
551 | 554 | ||
552 | /* This traversal up towards the root is meant to | 555 | /* This traversal up towards the root is meant to |
553 | * handle two cases: | 556 | * handle two cases: |
@@ -589,7 +592,7 @@ static unsigned int sabre_irq_build(struct device_node *dp, | |||
589 | { | 592 | { |
590 | struct sabre_irq_data *irq_data = _data; | 593 | struct sabre_irq_data *irq_data = _data; |
591 | unsigned long controller_regs = irq_data->controller_regs; | 594 | unsigned long controller_regs = irq_data->controller_regs; |
592 | struct linux_prom_pci_registers *regs; | 595 | const struct linux_prom_pci_registers *regs; |
593 | unsigned long imap, iclr; | 596 | unsigned long imap, iclr; |
594 | unsigned long imap_off, iclr_off; | 597 | unsigned long imap_off, iclr_off; |
595 | int inofixup = 0; | 598 | int inofixup = 0; |
@@ -639,9 +642,9 @@ static unsigned int sabre_irq_build(struct device_node *dp, | |||
639 | 642 | ||
640 | static void sabre_irq_trans_init(struct device_node *dp) | 643 | static void sabre_irq_trans_init(struct device_node *dp) |
641 | { | 644 | { |
642 | struct linux_prom64_registers *regs; | 645 | const struct linux_prom64_registers *regs; |
643 | struct sabre_irq_data *irq_data; | 646 | struct sabre_irq_data *irq_data; |
644 | u32 *busrange; | 647 | const u32 *busrange; |
645 | 648 | ||
646 | dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); | 649 | dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); |
647 | dp->irq_trans->irq_build = sabre_irq_build; | 650 | dp->irq_trans->irq_build = sabre_irq_build; |
@@ -795,7 +798,7 @@ static unsigned int schizo_irq_build(struct device_node *dp, | |||
795 | 798 | ||
796 | static void __schizo_irq_trans_init(struct device_node *dp, int is_tomatillo) | 799 | static void __schizo_irq_trans_init(struct device_node *dp, int is_tomatillo) |
797 | { | 800 | { |
798 | struct linux_prom64_registers *regs; | 801 | const struct linux_prom64_registers *regs; |
799 | struct schizo_irq_data *irq_data; | 802 | struct schizo_irq_data *irq_data; |
800 | 803 | ||
801 | dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); | 804 | dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); |
@@ -836,7 +839,7 @@ static unsigned int pci_sun4v_irq_build(struct device_node *dp, | |||
836 | 839 | ||
837 | static void pci_sun4v_irq_trans_init(struct device_node *dp) | 840 | static void pci_sun4v_irq_trans_init(struct device_node *dp) |
838 | { | 841 | { |
839 | struct linux_prom64_registers *regs; | 842 | const struct linux_prom64_registers *regs; |
840 | 843 | ||
841 | dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); | 844 | dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); |
842 | dp->irq_trans->irq_build = pci_sun4v_irq_build; | 845 | dp->irq_trans->irq_build = pci_sun4v_irq_build; |
@@ -940,7 +943,7 @@ static unsigned int sbus_of_build_irq(struct device_node *dp, | |||
940 | void *_data) | 943 | void *_data) |
941 | { | 944 | { |
942 | unsigned long reg_base = (unsigned long) _data; | 945 | unsigned long reg_base = (unsigned long) _data; |
943 | struct linux_prom_registers *regs; | 946 | const struct linux_prom_registers *regs; |
944 | unsigned long imap, iclr; | 947 | unsigned long imap, iclr; |
945 | int sbus_slot = 0; | 948 | int sbus_slot = 0; |
946 | int sbus_level = 0; | 949 | int sbus_level = 0; |
@@ -994,7 +997,7 @@ static unsigned int sbus_of_build_irq(struct device_node *dp, | |||
994 | 997 | ||
995 | static void sbus_irq_trans_init(struct device_node *dp) | 998 | static void sbus_irq_trans_init(struct device_node *dp) |
996 | { | 999 | { |
997 | struct linux_prom64_registers *regs; | 1000 | const struct linux_prom64_registers *regs; |
998 | 1001 | ||
999 | dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); | 1002 | dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); |
1000 | dp->irq_trans->irq_build = sbus_of_build_irq; | 1003 | dp->irq_trans->irq_build = sbus_of_build_irq; |
@@ -1080,7 +1083,7 @@ static unsigned int sun4v_vdev_irq_build(struct device_node *dp, | |||
1080 | 1083 | ||
1081 | static void sun4v_vdev_irq_trans_init(struct device_node *dp) | 1084 | static void sun4v_vdev_irq_trans_init(struct device_node *dp) |
1082 | { | 1085 | { |
1083 | struct linux_prom64_registers *regs; | 1086 | const struct linux_prom64_registers *regs; |
1084 | 1087 | ||
1085 | dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); | 1088 | dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller)); |
1086 | dp->irq_trans->irq_build = sun4v_vdev_irq_build; | 1089 | dp->irq_trans->irq_build = sun4v_vdev_irq_build; |
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c index 14f78fb5e890..3b05428cc909 100644 --- a/arch/sparc64/kernel/sbus.c +++ b/arch/sparc64/kernel/sbus.c | |||
@@ -26,23 +26,9 @@ | |||
26 | 26 | ||
27 | #define MAP_BASE ((u32)0xc0000000) | 27 | #define MAP_BASE ((u32)0xc0000000) |
28 | 28 | ||
29 | struct sbus_iommu_arena { | 29 | struct sbus_info { |
30 | unsigned long *map; | 30 | struct iommu iommu; |
31 | unsigned int hint; | 31 | struct strbuf strbuf; |
32 | unsigned int limit; | ||
33 | }; | ||
34 | |||
35 | struct sbus_iommu { | ||
36 | spinlock_t lock; | ||
37 | |||
38 | struct sbus_iommu_arena arena; | ||
39 | |||
40 | iopte_t *page_table; | ||
41 | unsigned long strbuf_regs; | ||
42 | unsigned long iommu_regs; | ||
43 | unsigned long sbus_control_reg; | ||
44 | |||
45 | volatile unsigned long strbuf_flushflag; | ||
46 | }; | 32 | }; |
47 | 33 | ||
48 | /* Offsets from iommu_regs */ | 34 | /* Offsets from iommu_regs */ |
@@ -58,16 +44,17 @@ struct sbus_iommu { | |||
58 | 44 | ||
59 | #define IOMMU_DRAM_VALID (1UL << 30UL) | 45 | #define IOMMU_DRAM_VALID (1UL << 30UL) |
60 | 46 | ||
61 | static void __iommu_flushall(struct sbus_iommu *iommu) | 47 | static void __iommu_flushall(struct iommu *iommu) |
62 | { | 48 | { |
63 | unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG; | 49 | unsigned long tag; |
64 | int entry; | 50 | int entry; |
65 | 51 | ||
52 | tag = iommu->iommu_control + (IOMMU_TAGDIAG - IOMMU_CONTROL); | ||
66 | for (entry = 0; entry < 16; entry++) { | 53 | for (entry = 0; entry < 16; entry++) { |
67 | upa_writeq(0, tag); | 54 | upa_writeq(0, tag); |
68 | tag += 8UL; | 55 | tag += 8UL; |
69 | } | 56 | } |
70 | upa_readq(iommu->sbus_control_reg); | 57 | upa_readq(iommu->write_complete_reg); |
71 | } | 58 | } |
72 | 59 | ||
73 | /* Offsets from strbuf_regs */ | 60 | /* Offsets from strbuf_regs */ |
@@ -82,15 +69,14 @@ static void __iommu_flushall(struct sbus_iommu *iommu) | |||
82 | 69 | ||
83 | #define STRBUF_TAG_VALID 0x02UL | 70 | #define STRBUF_TAG_VALID 0x02UL |
84 | 71 | ||
85 | static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages, int direction) | 72 | static void sbus_strbuf_flush(struct iommu *iommu, struct strbuf *strbuf, u32 base, unsigned long npages, int direction) |
86 | { | 73 | { |
87 | unsigned long n; | 74 | unsigned long n; |
88 | int limit; | 75 | int limit; |
89 | 76 | ||
90 | n = npages; | 77 | n = npages; |
91 | while (n--) | 78 | while (n--) |
92 | upa_writeq(base + (n << IO_PAGE_SHIFT), | 79 | upa_writeq(base + (n << IO_PAGE_SHIFT), strbuf->strbuf_pflush); |
93 | iommu->strbuf_regs + STRBUF_PFLUSH); | ||
94 | 80 | ||
95 | /* If the device could not have possibly put dirty data into | 81 | /* If the device could not have possibly put dirty data into |
96 | * the streaming cache, no flush-flag synchronization needs | 82 | * the streaming cache, no flush-flag synchronization needs |
@@ -99,15 +85,14 @@ static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long | |||
99 | if (direction == SBUS_DMA_TODEVICE) | 85 | if (direction == SBUS_DMA_TODEVICE) |
100 | return; | 86 | return; |
101 | 87 | ||
102 | iommu->strbuf_flushflag = 0UL; | 88 | *(strbuf->strbuf_flushflag) = 0UL; |
103 | 89 | ||
104 | /* Whoopee cushion! */ | 90 | /* Whoopee cushion! */ |
105 | upa_writeq(__pa(&iommu->strbuf_flushflag), | 91 | upa_writeq(strbuf->strbuf_flushflag_pa, strbuf->strbuf_fsync); |
106 | iommu->strbuf_regs + STRBUF_FSYNC); | 92 | upa_readq(iommu->write_complete_reg); |
107 | upa_readq(iommu->sbus_control_reg); | ||
108 | 93 | ||
109 | limit = 100000; | 94 | limit = 100000; |
110 | while (iommu->strbuf_flushflag == 0UL) { | 95 | while (*(strbuf->strbuf_flushflag) == 0UL) { |
111 | limit--; | 96 | limit--; |
112 | if (!limit) | 97 | if (!limit) |
113 | break; | 98 | break; |
@@ -121,9 +106,9 @@ static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long | |||
121 | } | 106 | } |
122 | 107 | ||
123 | /* Based largely upon the ppc64 iommu allocator. */ | 108 | /* Based largely upon the ppc64 iommu allocator. */ |
124 | static long sbus_arena_alloc(struct sbus_iommu *iommu, unsigned long npages) | 109 | static long sbus_arena_alloc(struct iommu *iommu, unsigned long npages) |
125 | { | 110 | { |
126 | struct sbus_iommu_arena *arena = &iommu->arena; | 111 | struct iommu_arena *arena = &iommu->arena; |
127 | unsigned long n, i, start, end, limit; | 112 | unsigned long n, i, start, end, limit; |
128 | int pass; | 113 | int pass; |
129 | 114 | ||
@@ -162,7 +147,7 @@ again: | |||
162 | return n; | 147 | return n; |
163 | } | 148 | } |
164 | 149 | ||
165 | static void sbus_arena_free(struct sbus_iommu_arena *arena, unsigned long base, unsigned long npages) | 150 | static void sbus_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages) |
166 | { | 151 | { |
167 | unsigned long i; | 152 | unsigned long i; |
168 | 153 | ||
@@ -170,7 +155,7 @@ static void sbus_arena_free(struct sbus_iommu_arena *arena, unsigned long base, | |||
170 | __clear_bit(i, arena->map); | 155 | __clear_bit(i, arena->map); |
171 | } | 156 | } |
172 | 157 | ||
173 | static void sbus_iommu_table_init(struct sbus_iommu *iommu, unsigned int tsbsize) | 158 | static void sbus_iommu_table_init(struct iommu *iommu, unsigned int tsbsize) |
174 | { | 159 | { |
175 | unsigned long tsbbase, order, sz, num_tsb_entries; | 160 | unsigned long tsbbase, order, sz, num_tsb_entries; |
176 | 161 | ||
@@ -178,13 +163,14 @@ static void sbus_iommu_table_init(struct sbus_iommu *iommu, unsigned int tsbsize | |||
178 | 163 | ||
179 | /* Setup initial software IOMMU state. */ | 164 | /* Setup initial software IOMMU state. */ |
180 | spin_lock_init(&iommu->lock); | 165 | spin_lock_init(&iommu->lock); |
166 | iommu->page_table_map_base = MAP_BASE; | ||
181 | 167 | ||
182 | /* Allocate and initialize the free area map. */ | 168 | /* Allocate and initialize the free area map. */ |
183 | sz = num_tsb_entries / 8; | 169 | sz = num_tsb_entries / 8; |
184 | sz = (sz + 7UL) & ~7UL; | 170 | sz = (sz + 7UL) & ~7UL; |
185 | iommu->arena.map = kzalloc(sz, GFP_KERNEL); | 171 | iommu->arena.map = kzalloc(sz, GFP_KERNEL); |
186 | if (!iommu->arena.map) { | 172 | if (!iommu->arena.map) { |
187 | prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); | 173 | prom_printf("SBUS_IOMMU: Error, kmalloc(arena.map) failed.\n"); |
188 | prom_halt(); | 174 | prom_halt(); |
189 | } | 175 | } |
190 | iommu->arena.limit = num_tsb_entries; | 176 | iommu->arena.limit = num_tsb_entries; |
@@ -200,7 +186,7 @@ static void sbus_iommu_table_init(struct sbus_iommu *iommu, unsigned int tsbsize | |||
200 | memset(iommu->page_table, 0, tsbsize); | 186 | memset(iommu->page_table, 0, tsbsize); |
201 | } | 187 | } |
202 | 188 | ||
203 | static inline iopte_t *alloc_npages(struct sbus_iommu *iommu, unsigned long npages) | 189 | static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages) |
204 | { | 190 | { |
205 | long entry; | 191 | long entry; |
206 | 192 | ||
@@ -211,14 +197,15 @@ static inline iopte_t *alloc_npages(struct sbus_iommu *iommu, unsigned long npag | |||
211 | return iommu->page_table + entry; | 197 | return iommu->page_table + entry; |
212 | } | 198 | } |
213 | 199 | ||
214 | static inline void free_npages(struct sbus_iommu *iommu, dma_addr_t base, unsigned long npages) | 200 | static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages) |
215 | { | 201 | { |
216 | sbus_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages); | 202 | sbus_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages); |
217 | } | 203 | } |
218 | 204 | ||
219 | void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr) | 205 | void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr) |
220 | { | 206 | { |
221 | struct sbus_iommu *iommu; | 207 | struct sbus_info *info; |
208 | struct iommu *iommu; | ||
222 | iopte_t *iopte; | 209 | iopte_t *iopte; |
223 | unsigned long flags, order, first_page; | 210 | unsigned long flags, order, first_page; |
224 | void *ret; | 211 | void *ret; |
@@ -234,7 +221,8 @@ void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma | |||
234 | return NULL; | 221 | return NULL; |
235 | memset((char *)first_page, 0, PAGE_SIZE << order); | 222 | memset((char *)first_page, 0, PAGE_SIZE << order); |
236 | 223 | ||
237 | iommu = sdev->bus->iommu; | 224 | info = sdev->bus->iommu; |
225 | iommu = &info->iommu; | ||
238 | 226 | ||
239 | spin_lock_irqsave(&iommu->lock, flags); | 227 | spin_lock_irqsave(&iommu->lock, flags); |
240 | iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT); | 228 | iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT); |
@@ -245,7 +233,7 @@ void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma | |||
245 | return NULL; | 233 | return NULL; |
246 | } | 234 | } |
247 | 235 | ||
248 | *dvma_addr = (MAP_BASE + | 236 | *dvma_addr = (iommu->page_table_map_base + |
249 | ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); | 237 | ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); |
250 | ret = (void *) first_page; | 238 | ret = (void *) first_page; |
251 | npages = size >> IO_PAGE_SHIFT; | 239 | npages = size >> IO_PAGE_SHIFT; |
@@ -263,18 +251,20 @@ void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma | |||
263 | 251 | ||
264 | void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma) | 252 | void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma) |
265 | { | 253 | { |
266 | struct sbus_iommu *iommu; | 254 | struct sbus_info *info; |
255 | struct iommu *iommu; | ||
267 | iopte_t *iopte; | 256 | iopte_t *iopte; |
268 | unsigned long flags, order, npages; | 257 | unsigned long flags, order, npages; |
269 | 258 | ||
270 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; | 259 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; |
271 | iommu = sdev->bus->iommu; | 260 | info = sdev->bus->iommu; |
261 | iommu = &info->iommu; | ||
272 | iopte = iommu->page_table + | 262 | iopte = iommu->page_table + |
273 | ((dvma - MAP_BASE) >> IO_PAGE_SHIFT); | 263 | ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); |
274 | 264 | ||
275 | spin_lock_irqsave(&iommu->lock, flags); | 265 | spin_lock_irqsave(&iommu->lock, flags); |
276 | 266 | ||
277 | free_npages(iommu, dvma - MAP_BASE, npages); | 267 | free_npages(iommu, dvma - iommu->page_table_map_base, npages); |
278 | 268 | ||
279 | spin_unlock_irqrestore(&iommu->lock, flags); | 269 | spin_unlock_irqrestore(&iommu->lock, flags); |
280 | 270 | ||
@@ -285,14 +275,16 @@ void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_add | |||
285 | 275 | ||
286 | dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int direction) | 276 | dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int direction) |
287 | { | 277 | { |
288 | struct sbus_iommu *iommu; | 278 | struct sbus_info *info; |
279 | struct iommu *iommu; | ||
289 | iopte_t *base; | 280 | iopte_t *base; |
290 | unsigned long flags, npages, oaddr; | 281 | unsigned long flags, npages, oaddr; |
291 | unsigned long i, base_paddr; | 282 | unsigned long i, base_paddr; |
292 | u32 bus_addr, ret; | 283 | u32 bus_addr, ret; |
293 | unsigned long iopte_protection; | 284 | unsigned long iopte_protection; |
294 | 285 | ||
295 | iommu = sdev->bus->iommu; | 286 | info = sdev->bus->iommu; |
287 | iommu = &info->iommu; | ||
296 | 288 | ||
297 | if (unlikely(direction == SBUS_DMA_NONE)) | 289 | if (unlikely(direction == SBUS_DMA_NONE)) |
298 | BUG(); | 290 | BUG(); |
@@ -308,7 +300,7 @@ dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int dire | |||
308 | if (unlikely(!base)) | 300 | if (unlikely(!base)) |
309 | BUG(); | 301 | BUG(); |
310 | 302 | ||
311 | bus_addr = (MAP_BASE + | 303 | bus_addr = (iommu->page_table_map_base + |
312 | ((base - iommu->page_table) << IO_PAGE_SHIFT)); | 304 | ((base - iommu->page_table) << IO_PAGE_SHIFT)); |
313 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); | 305 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); |
314 | base_paddr = __pa(oaddr & IO_PAGE_MASK); | 306 | base_paddr = __pa(oaddr & IO_PAGE_MASK); |
@@ -325,7 +317,9 @@ dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int dire | |||
325 | 317 | ||
326 | void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction) | 318 | void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction) |
327 | { | 319 | { |
328 | struct sbus_iommu *iommu = sdev->bus->iommu; | 320 | struct sbus_info *info = sdev->bus->iommu; |
321 | struct iommu *iommu = &info->iommu; | ||
322 | struct strbuf *strbuf = &info->strbuf; | ||
329 | iopte_t *base; | 323 | iopte_t *base; |
330 | unsigned long flags, npages, i; | 324 | unsigned long flags, npages, i; |
331 | 325 | ||
@@ -335,15 +329,15 @@ void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, in | |||
335 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | 329 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); |
336 | npages >>= IO_PAGE_SHIFT; | 330 | npages >>= IO_PAGE_SHIFT; |
337 | base = iommu->page_table + | 331 | base = iommu->page_table + |
338 | ((bus_addr - MAP_BASE) >> IO_PAGE_SHIFT); | 332 | ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); |
339 | 333 | ||
340 | bus_addr &= IO_PAGE_MASK; | 334 | bus_addr &= IO_PAGE_MASK; |
341 | 335 | ||
342 | spin_lock_irqsave(&iommu->lock, flags); | 336 | spin_lock_irqsave(&iommu->lock, flags); |
343 | sbus_strbuf_flush(iommu, bus_addr, npages, direction); | 337 | sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction); |
344 | for (i = 0; i < npages; i++) | 338 | for (i = 0; i < npages; i++) |
345 | iopte_val(base[i]) = 0UL; | 339 | iopte_val(base[i]) = 0UL; |
346 | free_npages(iommu, bus_addr - MAP_BASE, npages); | 340 | free_npages(iommu, bus_addr - iommu->page_table_map_base, npages); |
347 | spin_unlock_irqrestore(&iommu->lock, flags); | 341 | spin_unlock_irqrestore(&iommu->lock, flags); |
348 | } | 342 | } |
349 | 343 | ||
@@ -425,7 +419,8 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, | |||
425 | 419 | ||
426 | int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction) | 420 | int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction) |
427 | { | 421 | { |
428 | struct sbus_iommu *iommu; | 422 | struct sbus_info *info; |
423 | struct iommu *iommu; | ||
429 | unsigned long flags, npages, iopte_protection; | 424 | unsigned long flags, npages, iopte_protection; |
430 | iopte_t *base; | 425 | iopte_t *base; |
431 | u32 dma_base; | 426 | u32 dma_base; |
@@ -442,7 +437,8 @@ int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, i | |||
442 | return 1; | 437 | return 1; |
443 | } | 438 | } |
444 | 439 | ||
445 | iommu = sdev->bus->iommu; | 440 | info = sdev->bus->iommu; |
441 | iommu = &info->iommu; | ||
446 | 442 | ||
447 | if (unlikely(direction == SBUS_DMA_NONE)) | 443 | if (unlikely(direction == SBUS_DMA_NONE)) |
448 | BUG(); | 444 | BUG(); |
@@ -456,7 +452,7 @@ int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, i | |||
456 | if (unlikely(base == NULL)) | 452 | if (unlikely(base == NULL)) |
457 | BUG(); | 453 | BUG(); |
458 | 454 | ||
459 | dma_base = MAP_BASE + | 455 | dma_base = iommu->page_table_map_base + |
460 | ((base - iommu->page_table) << IO_PAGE_SHIFT); | 456 | ((base - iommu->page_table) << IO_PAGE_SHIFT); |
461 | 457 | ||
462 | /* Normalize DVMA addresses. */ | 458 | /* Normalize DVMA addresses. */ |
@@ -485,7 +481,9 @@ int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, i | |||
485 | 481 | ||
486 | void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction) | 482 | void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction) |
487 | { | 483 | { |
488 | struct sbus_iommu *iommu; | 484 | struct sbus_info *info; |
485 | struct iommu *iommu; | ||
486 | struct strbuf *strbuf; | ||
489 | iopte_t *base; | 487 | iopte_t *base; |
490 | unsigned long flags, i, npages; | 488 | unsigned long flags, i, npages; |
491 | u32 bus_addr; | 489 | u32 bus_addr; |
@@ -493,7 +491,9 @@ void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems | |||
493 | if (unlikely(direction == SBUS_DMA_NONE)) | 491 | if (unlikely(direction == SBUS_DMA_NONE)) |
494 | BUG(); | 492 | BUG(); |
495 | 493 | ||
496 | iommu = sdev->bus->iommu; | 494 | info = sdev->bus->iommu; |
495 | iommu = &info->iommu; | ||
496 | strbuf = &info->strbuf; | ||
497 | 497 | ||
498 | bus_addr = sglist->dma_address & IO_PAGE_MASK; | 498 | bus_addr = sglist->dma_address & IO_PAGE_MASK; |
499 | 499 | ||
@@ -505,29 +505,33 @@ void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems | |||
505 | bus_addr) >> IO_PAGE_SHIFT; | 505 | bus_addr) >> IO_PAGE_SHIFT; |
506 | 506 | ||
507 | base = iommu->page_table + | 507 | base = iommu->page_table + |
508 | ((bus_addr - MAP_BASE) >> IO_PAGE_SHIFT); | 508 | ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); |
509 | 509 | ||
510 | spin_lock_irqsave(&iommu->lock, flags); | 510 | spin_lock_irqsave(&iommu->lock, flags); |
511 | sbus_strbuf_flush(iommu, bus_addr, npages, direction); | 511 | sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction); |
512 | for (i = 0; i < npages; i++) | 512 | for (i = 0; i < npages; i++) |
513 | iopte_val(base[i]) = 0UL; | 513 | iopte_val(base[i]) = 0UL; |
514 | free_npages(iommu, bus_addr - MAP_BASE, npages); | 514 | free_npages(iommu, bus_addr - iommu->page_table_map_base, npages); |
515 | spin_unlock_irqrestore(&iommu->lock, flags); | 515 | spin_unlock_irqrestore(&iommu->lock, flags); |
516 | } | 516 | } |
517 | 517 | ||
518 | void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction) | 518 | void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction) |
519 | { | 519 | { |
520 | struct sbus_iommu *iommu; | 520 | struct sbus_info *info; |
521 | struct iommu *iommu; | ||
522 | struct strbuf *strbuf; | ||
521 | unsigned long flags, npages; | 523 | unsigned long flags, npages; |
522 | 524 | ||
523 | iommu = sdev->bus->iommu; | 525 | info = sdev->bus->iommu; |
526 | iommu = &info->iommu; | ||
527 | strbuf = &info->strbuf; | ||
524 | 528 | ||
525 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | 529 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); |
526 | npages >>= IO_PAGE_SHIFT; | 530 | npages >>= IO_PAGE_SHIFT; |
527 | bus_addr &= IO_PAGE_MASK; | 531 | bus_addr &= IO_PAGE_MASK; |
528 | 532 | ||
529 | spin_lock_irqsave(&iommu->lock, flags); | 533 | spin_lock_irqsave(&iommu->lock, flags); |
530 | sbus_strbuf_flush(iommu, bus_addr, npages, direction); | 534 | sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction); |
531 | spin_unlock_irqrestore(&iommu->lock, flags); | 535 | spin_unlock_irqrestore(&iommu->lock, flags); |
532 | } | 536 | } |
533 | 537 | ||
@@ -537,11 +541,15 @@ void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t base, siz | |||
537 | 541 | ||
538 | void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction) | 542 | void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction) |
539 | { | 543 | { |
540 | struct sbus_iommu *iommu; | 544 | struct sbus_info *info; |
545 | struct iommu *iommu; | ||
546 | struct strbuf *strbuf; | ||
541 | unsigned long flags, npages, i; | 547 | unsigned long flags, npages, i; |
542 | u32 bus_addr; | 548 | u32 bus_addr; |
543 | 549 | ||
544 | iommu = sdev->bus->iommu; | 550 | info = sdev->bus->iommu; |
551 | iommu = &info->iommu; | ||
552 | strbuf = &info->strbuf; | ||
545 | 553 | ||
546 | bus_addr = sglist[0].dma_address & IO_PAGE_MASK; | 554 | bus_addr = sglist[0].dma_address & IO_PAGE_MASK; |
547 | for (i = 0; i < nelems; i++) { | 555 | for (i = 0; i < nelems; i++) { |
@@ -553,7 +561,7 @@ void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sglist, | |||
553 | - bus_addr) >> IO_PAGE_SHIFT; | 561 | - bus_addr) >> IO_PAGE_SHIFT; |
554 | 562 | ||
555 | spin_lock_irqsave(&iommu->lock, flags); | 563 | spin_lock_irqsave(&iommu->lock, flags); |
556 | sbus_strbuf_flush(iommu, bus_addr, npages, direction); | 564 | sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction); |
557 | spin_unlock_irqrestore(&iommu->lock, flags); | 565 | spin_unlock_irqrestore(&iommu->lock, flags); |
558 | } | 566 | } |
559 | 567 | ||
@@ -564,12 +572,13 @@ void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, | |||
564 | /* Enable 64-bit DVMA mode for the given device. */ | 572 | /* Enable 64-bit DVMA mode for the given device. */ |
565 | void sbus_set_sbus64(struct sbus_dev *sdev, int bursts) | 573 | void sbus_set_sbus64(struct sbus_dev *sdev, int bursts) |
566 | { | 574 | { |
567 | struct sbus_iommu *iommu = sdev->bus->iommu; | 575 | struct sbus_info *info = sdev->bus->iommu; |
576 | struct iommu *iommu = &info->iommu; | ||
568 | int slot = sdev->slot; | 577 | int slot = sdev->slot; |
569 | unsigned long cfg_reg; | 578 | unsigned long cfg_reg; |
570 | u64 val; | 579 | u64 val; |
571 | 580 | ||
572 | cfg_reg = iommu->sbus_control_reg; | 581 | cfg_reg = iommu->write_complete_reg; |
573 | switch (slot) { | 582 | switch (slot) { |
574 | case 0: | 583 | case 0: |
575 | cfg_reg += 0x20UL; | 584 | cfg_reg += 0x20UL; |
@@ -704,8 +713,9 @@ static unsigned long sysio_imap_to_iclr(unsigned long imap) | |||
704 | unsigned int sbus_build_irq(void *buscookie, unsigned int ino) | 713 | unsigned int sbus_build_irq(void *buscookie, unsigned int ino) |
705 | { | 714 | { |
706 | struct sbus_bus *sbus = (struct sbus_bus *)buscookie; | 715 | struct sbus_bus *sbus = (struct sbus_bus *)buscookie; |
707 | struct sbus_iommu *iommu = sbus->iommu; | 716 | struct sbus_info *info = sbus->iommu; |
708 | unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL; | 717 | struct iommu *iommu = &info->iommu; |
718 | unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; | ||
709 | unsigned long imap, iclr; | 719 | unsigned long imap, iclr; |
710 | int sbus_level = 0; | 720 | int sbus_level = 0; |
711 | 721 | ||
@@ -766,8 +776,9 @@ unsigned int sbus_build_irq(void *buscookie, unsigned int ino) | |||
766 | static irqreturn_t sysio_ue_handler(int irq, void *dev_id) | 776 | static irqreturn_t sysio_ue_handler(int irq, void *dev_id) |
767 | { | 777 | { |
768 | struct sbus_bus *sbus = dev_id; | 778 | struct sbus_bus *sbus = dev_id; |
769 | struct sbus_iommu *iommu = sbus->iommu; | 779 | struct sbus_info *info = sbus->iommu; |
770 | unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL; | 780 | struct iommu *iommu = &info->iommu; |
781 | unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; | ||
771 | unsigned long afsr_reg, afar_reg; | 782 | unsigned long afsr_reg, afar_reg; |
772 | unsigned long afsr, afar, error_bits; | 783 | unsigned long afsr, afar, error_bits; |
773 | int reported; | 784 | int reported; |
@@ -838,8 +849,9 @@ static irqreturn_t sysio_ue_handler(int irq, void *dev_id) | |||
838 | static irqreturn_t sysio_ce_handler(int irq, void *dev_id) | 849 | static irqreturn_t sysio_ce_handler(int irq, void *dev_id) |
839 | { | 850 | { |
840 | struct sbus_bus *sbus = dev_id; | 851 | struct sbus_bus *sbus = dev_id; |
841 | struct sbus_iommu *iommu = sbus->iommu; | 852 | struct sbus_info *info = sbus->iommu; |
842 | unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL; | 853 | struct iommu *iommu = &info->iommu; |
854 | unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; | ||
843 | unsigned long afsr_reg, afar_reg; | 855 | unsigned long afsr_reg, afar_reg; |
844 | unsigned long afsr, afar, error_bits; | 856 | unsigned long afsr, afar, error_bits; |
845 | int reported; | 857 | int reported; |
@@ -915,12 +927,13 @@ static irqreturn_t sysio_ce_handler(int irq, void *dev_id) | |||
915 | static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id) | 927 | static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id) |
916 | { | 928 | { |
917 | struct sbus_bus *sbus = dev_id; | 929 | struct sbus_bus *sbus = dev_id; |
918 | struct sbus_iommu *iommu = sbus->iommu; | 930 | struct sbus_info *info = sbus->iommu; |
931 | struct iommu *iommu = &info->iommu; | ||
919 | unsigned long afsr_reg, afar_reg, reg_base; | 932 | unsigned long afsr_reg, afar_reg, reg_base; |
920 | unsigned long afsr, afar, error_bits; | 933 | unsigned long afsr, afar, error_bits; |
921 | int reported; | 934 | int reported; |
922 | 935 | ||
923 | reg_base = iommu->sbus_control_reg - 0x2000UL; | 936 | reg_base = iommu->write_complete_reg - 0x2000UL; |
924 | afsr_reg = reg_base + SYSIO_SBUS_AFSR; | 937 | afsr_reg = reg_base + SYSIO_SBUS_AFSR; |
925 | afar_reg = reg_base + SYSIO_SBUS_AFAR; | 938 | afar_reg = reg_base + SYSIO_SBUS_AFAR; |
926 | 939 | ||
@@ -982,8 +995,9 @@ static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id) | |||
982 | 995 | ||
983 | static void __init sysio_register_error_handlers(struct sbus_bus *sbus) | 996 | static void __init sysio_register_error_handlers(struct sbus_bus *sbus) |
984 | { | 997 | { |
985 | struct sbus_iommu *iommu = sbus->iommu; | 998 | struct sbus_info *info = sbus->iommu; |
986 | unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL; | 999 | struct iommu *iommu = &info->iommu; |
1000 | unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; | ||
987 | unsigned int irq; | 1001 | unsigned int irq; |
988 | u64 control; | 1002 | u64 control; |
989 | 1003 | ||
@@ -1017,18 +1031,20 @@ static void __init sysio_register_error_handlers(struct sbus_bus *sbus) | |||
1017 | SYSIO_ECNTRL_CEEN), | 1031 | SYSIO_ECNTRL_CEEN), |
1018 | reg_base + ECC_CONTROL); | 1032 | reg_base + ECC_CONTROL); |
1019 | 1033 | ||
1020 | control = upa_readq(iommu->sbus_control_reg); | 1034 | control = upa_readq(iommu->write_complete_reg); |
1021 | control |= 0x100UL; /* SBUS Error Interrupt Enable */ | 1035 | control |= 0x100UL; /* SBUS Error Interrupt Enable */ |
1022 | upa_writeq(control, iommu->sbus_control_reg); | 1036 | upa_writeq(control, iommu->write_complete_reg); |
1023 | } | 1037 | } |
1024 | 1038 | ||
1025 | /* Boot time initialization. */ | 1039 | /* Boot time initialization. */ |
1026 | static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus) | 1040 | static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus) |
1027 | { | 1041 | { |
1028 | struct linux_prom64_registers *pr; | 1042 | const struct linux_prom64_registers *pr; |
1029 | struct device_node *dp; | 1043 | struct device_node *dp; |
1030 | struct sbus_iommu *iommu; | 1044 | struct sbus_info *info; |
1031 | unsigned long regs; | 1045 | struct iommu *iommu; |
1046 | struct strbuf *strbuf; | ||
1047 | unsigned long regs, reg_base; | ||
1032 | u64 control; | 1048 | u64 control; |
1033 | int i; | 1049 | int i; |
1034 | 1050 | ||
@@ -1043,33 +1059,42 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus) | |||
1043 | } | 1059 | } |
1044 | regs = pr->phys_addr; | 1060 | regs = pr->phys_addr; |
1045 | 1061 | ||
1046 | iommu = kmalloc(sizeof(*iommu) + SMP_CACHE_BYTES, GFP_ATOMIC); | 1062 | info = kzalloc(sizeof(*info), GFP_ATOMIC); |
1047 | if (iommu == NULL) { | 1063 | if (info == NULL) { |
1048 | prom_printf("sbus_iommu_init: Fatal error, kmalloc(iommu) failed\n"); | 1064 | prom_printf("sbus_iommu_init: Fatal error, " |
1065 | "kmalloc(info) failed\n"); | ||
1049 | prom_halt(); | 1066 | prom_halt(); |
1050 | } | 1067 | } |
1051 | 1068 | ||
1052 | /* Align on E$ line boundary. */ | 1069 | iommu = &info->iommu; |
1053 | iommu = (struct sbus_iommu *) | 1070 | strbuf = &info->strbuf; |
1054 | (((unsigned long)iommu + (SMP_CACHE_BYTES - 1UL)) & | ||
1055 | ~(SMP_CACHE_BYTES - 1UL)); | ||
1056 | 1071 | ||
1057 | memset(iommu, 0, sizeof(*iommu)); | 1072 | reg_base = regs + SYSIO_IOMMUREG_BASE; |
1073 | iommu->iommu_control = reg_base + IOMMU_CONTROL; | ||
1074 | iommu->iommu_tsbbase = reg_base + IOMMU_TSBBASE; | ||
1075 | iommu->iommu_flush = reg_base + IOMMU_FLUSH; | ||
1058 | 1076 | ||
1059 | /* Setup spinlock. */ | 1077 | reg_base = regs + SYSIO_STRBUFREG_BASE; |
1060 | spin_lock_init(&iommu->lock); | 1078 | strbuf->strbuf_control = reg_base + STRBUF_CONTROL; |
1079 | strbuf->strbuf_pflush = reg_base + STRBUF_PFLUSH; | ||
1080 | strbuf->strbuf_fsync = reg_base + STRBUF_FSYNC; | ||
1061 | 1081 | ||
1062 | /* Init register offsets. */ | 1082 | strbuf->strbuf_enabled = 1; |
1063 | iommu->iommu_regs = regs + SYSIO_IOMMUREG_BASE; | 1083 | |
1064 | iommu->strbuf_regs = regs + SYSIO_STRBUFREG_BASE; | 1084 | strbuf->strbuf_flushflag = (volatile unsigned long *) |
1085 | ((((unsigned long)&strbuf->__flushflag_buf[0]) | ||
1086 | + 63UL) | ||
1087 | & ~63UL); | ||
1088 | strbuf->strbuf_flushflag_pa = (unsigned long) | ||
1089 | __pa(strbuf->strbuf_flushflag); | ||
1065 | 1090 | ||
1066 | /* The SYSIO SBUS control register is used for dummy reads | 1091 | /* The SYSIO SBUS control register is used for dummy reads |
1067 | * in order to ensure write completion. | 1092 | * in order to ensure write completion. |
1068 | */ | 1093 | */ |
1069 | iommu->sbus_control_reg = regs + 0x2000UL; | 1094 | iommu->write_complete_reg = regs + 0x2000UL; |
1070 | 1095 | ||
1071 | /* Link into SYSIO software state. */ | 1096 | /* Link into SYSIO software state. */ |
1072 | sbus->iommu = iommu; | 1097 | sbus->iommu = info; |
1073 | 1098 | ||
1074 | printk("SYSIO: UPA portID %x, at %016lx\n", | 1099 | printk("SYSIO: UPA portID %x, at %016lx\n", |
1075 | sbus->portid, regs); | 1100 | sbus->portid, regs); |
@@ -1077,40 +1102,44 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus) | |||
1077 | /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */ | 1102 | /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */ |
1078 | sbus_iommu_table_init(iommu, IO_TSB_SIZE); | 1103 | sbus_iommu_table_init(iommu, IO_TSB_SIZE); |
1079 | 1104 | ||
1080 | control = upa_readq(iommu->iommu_regs + IOMMU_CONTROL); | 1105 | control = upa_readq(iommu->iommu_control); |
1081 | control = ((7UL << 16UL) | | 1106 | control = ((7UL << 16UL) | |
1082 | (0UL << 2UL) | | 1107 | (0UL << 2UL) | |
1083 | (1UL << 1UL) | | 1108 | (1UL << 1UL) | |
1084 | (1UL << 0UL)); | 1109 | (1UL << 0UL)); |
1085 | upa_writeq(control, iommu->iommu_regs + IOMMU_CONTROL); | 1110 | upa_writeq(control, iommu->iommu_control); |
1086 | 1111 | ||
1087 | /* Clean out any cruft in the IOMMU using | 1112 | /* Clean out any cruft in the IOMMU using |
1088 | * diagnostic accesses. | 1113 | * diagnostic accesses. |
1089 | */ | 1114 | */ |
1090 | for (i = 0; i < 16; i++) { | 1115 | for (i = 0; i < 16; i++) { |
1091 | unsigned long dram = iommu->iommu_regs + IOMMU_DRAMDIAG; | 1116 | unsigned long dram, tag; |
1092 | unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG; | 1117 | |
1118 | dram = iommu->iommu_control + (IOMMU_DRAMDIAG - IOMMU_CONTROL); | ||
1119 | tag = iommu->iommu_control + (IOMMU_TAGDIAG - IOMMU_CONTROL); | ||
1093 | 1120 | ||
1094 | dram += (unsigned long)i * 8UL; | 1121 | dram += (unsigned long)i * 8UL; |
1095 | tag += (unsigned long)i * 8UL; | 1122 | tag += (unsigned long)i * 8UL; |
1096 | upa_writeq(0, dram); | 1123 | upa_writeq(0, dram); |
1097 | upa_writeq(0, tag); | 1124 | upa_writeq(0, tag); |
1098 | } | 1125 | } |
1099 | upa_readq(iommu->sbus_control_reg); | 1126 | upa_readq(iommu->write_complete_reg); |
1100 | 1127 | ||
1101 | /* Give the TSB to SYSIO. */ | 1128 | /* Give the TSB to SYSIO. */ |
1102 | upa_writeq(__pa(iommu->page_table), iommu->iommu_regs + IOMMU_TSBBASE); | 1129 | upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase); |
1103 | 1130 | ||
1104 | /* Setup streaming buffer, DE=1 SB_EN=1 */ | 1131 | /* Setup streaming buffer, DE=1 SB_EN=1 */ |
1105 | control = (1UL << 1UL) | (1UL << 0UL); | 1132 | control = (1UL << 1UL) | (1UL << 0UL); |
1106 | upa_writeq(control, iommu->strbuf_regs + STRBUF_CONTROL); | 1133 | upa_writeq(control, strbuf->strbuf_control); |
1107 | 1134 | ||
1108 | /* Clear out the tags using diagnostics. */ | 1135 | /* Clear out the tags using diagnostics. */ |
1109 | for (i = 0; i < 16; i++) { | 1136 | for (i = 0; i < 16; i++) { |
1110 | unsigned long ptag, ltag; | 1137 | unsigned long ptag, ltag; |
1111 | 1138 | ||
1112 | ptag = iommu->strbuf_regs + STRBUF_PTAGDIAG; | 1139 | ptag = strbuf->strbuf_control + |
1113 | ltag = iommu->strbuf_regs + STRBUF_LTAGDIAG; | 1140 | (STRBUF_PTAGDIAG - STRBUF_CONTROL); |
1141 | ltag = strbuf->strbuf_control + | ||
1142 | (STRBUF_LTAGDIAG - STRBUF_CONTROL); | ||
1114 | ptag += (unsigned long)i * 8UL; | 1143 | ptag += (unsigned long)i * 8UL; |
1115 | ltag += (unsigned long)i * 8UL; | 1144 | ltag += (unsigned long)i * 8UL; |
1116 | 1145 | ||
@@ -1119,9 +1148,9 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus) | |||
1119 | } | 1148 | } |
1120 | 1149 | ||
1121 | /* Enable DVMA arbitration for all devices/slots. */ | 1150 | /* Enable DVMA arbitration for all devices/slots. */ |
1122 | control = upa_readq(iommu->sbus_control_reg); | 1151 | control = upa_readq(iommu->write_complete_reg); |
1123 | control |= 0x3fUL; | 1152 | control |= 0x3fUL; |
1124 | upa_writeq(control, iommu->sbus_control_reg); | 1153 | upa_writeq(control, iommu->write_complete_reg); |
1125 | 1154 | ||
1126 | /* Now some Xfire specific grot... */ | 1155 | /* Now some Xfire specific grot... */ |
1127 | if (this_is_starfire) | 1156 | if (this_is_starfire) |
@@ -1133,7 +1162,7 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus) | |||
1133 | void sbus_fill_device_irq(struct sbus_dev *sdev) | 1162 | void sbus_fill_device_irq(struct sbus_dev *sdev) |
1134 | { | 1163 | { |
1135 | struct device_node *dp = of_find_node_by_phandle(sdev->prom_node); | 1164 | struct device_node *dp = of_find_node_by_phandle(sdev->prom_node); |
1136 | struct linux_prom_irqs *irqs; | 1165 | const struct linux_prom_irqs *irqs; |
1137 | 1166 | ||
1138 | irqs = of_get_property(dp, "interrupts", NULL); | 1167 | irqs = of_get_property(dp, "interrupts", NULL); |
1139 | if (!irqs) { | 1168 | if (!irqs) { |
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index fc99f7b8012f..d4f0a70f4845 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -45,7 +45,7 @@ | |||
45 | extern void calibrate_delay(void); | 45 | extern void calibrate_delay(void); |
46 | 46 | ||
47 | /* Please don't make this stuff initdata!!! --DaveM */ | 47 | /* Please don't make this stuff initdata!!! --DaveM */ |
48 | static unsigned char boot_cpu_id; | 48 | unsigned char boot_cpu_id; |
49 | 49 | ||
50 | cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; | 50 | cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; |
51 | cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE; | 51 | cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE; |
@@ -81,8 +81,6 @@ void __init smp_store_cpu_info(int id) | |||
81 | struct device_node *dp; | 81 | struct device_node *dp; |
82 | int def; | 82 | int def; |
83 | 83 | ||
84 | /* multiplier and counter set by | ||
85 | smp_setup_percpu_timer() */ | ||
86 | cpu_data(id).udelay_val = loops_per_jiffy; | 84 | cpu_data(id).udelay_val = loops_per_jiffy; |
87 | 85 | ||
88 | cpu_find_by_mid(id, &dp); | 86 | cpu_find_by_mid(id, &dp); |
@@ -125,7 +123,7 @@ void __init smp_store_cpu_info(int id) | |||
125 | cpu_data(id).ecache_size, cpu_data(id).ecache_line_size); | 123 | cpu_data(id).ecache_size, cpu_data(id).ecache_line_size); |
126 | } | 124 | } |
127 | 125 | ||
128 | static void smp_setup_percpu_timer(void); | 126 | extern void setup_sparc64_timer(void); |
129 | 127 | ||
130 | static volatile unsigned long callin_flag = 0; | 128 | static volatile unsigned long callin_flag = 0; |
131 | 129 | ||
@@ -140,7 +138,7 @@ void __init smp_callin(void) | |||
140 | 138 | ||
141 | __flush_tlb_all(); | 139 | __flush_tlb_all(); |
142 | 140 | ||
143 | smp_setup_percpu_timer(); | 141 | setup_sparc64_timer(); |
144 | 142 | ||
145 | if (cheetah_pcache_forced_on) | 143 | if (cheetah_pcache_forced_on) |
146 | cheetah_enable_pcache(); | 144 | cheetah_enable_pcache(); |
@@ -177,8 +175,6 @@ void cpu_panic(void) | |||
177 | panic("SMP bolixed\n"); | 175 | panic("SMP bolixed\n"); |
178 | } | 176 | } |
179 | 177 | ||
180 | static unsigned long current_tick_offset __read_mostly; | ||
181 | |||
182 | /* This tick register synchronization scheme is taken entirely from | 178 | /* This tick register synchronization scheme is taken entirely from |
183 | * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. | 179 | * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. |
184 | * | 180 | * |
@@ -261,7 +257,7 @@ void smp_synchronize_tick_client(void) | |||
261 | } else | 257 | } else |
262 | adj = -delta; | 258 | adj = -delta; |
263 | 259 | ||
264 | tick_ops->add_tick(adj, current_tick_offset); | 260 | tick_ops->add_tick(adj); |
265 | } | 261 | } |
266 | #if DEBUG_TICK_SYNC | 262 | #if DEBUG_TICK_SYNC |
267 | t[i].rt = rt; | 263 | t[i].rt = rt; |
@@ -1180,117 +1176,15 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs) | |||
1180 | preempt_enable(); | 1176 | preempt_enable(); |
1181 | } | 1177 | } |
1182 | 1178 | ||
1183 | #define prof_multiplier(__cpu) cpu_data(__cpu).multiplier | ||
1184 | #define prof_counter(__cpu) cpu_data(__cpu).counter | ||
1185 | |||
1186 | void smp_percpu_timer_interrupt(struct pt_regs *regs) | ||
1187 | { | ||
1188 | unsigned long compare, tick, pstate; | ||
1189 | int cpu = smp_processor_id(); | ||
1190 | int user = user_mode(regs); | ||
1191 | struct pt_regs *old_regs; | ||
1192 | |||
1193 | /* | ||
1194 | * Check for level 14 softint. | ||
1195 | */ | ||
1196 | { | ||
1197 | unsigned long tick_mask = tick_ops->softint_mask; | ||
1198 | |||
1199 | if (!(get_softint() & tick_mask)) { | ||
1200 | extern void handler_irq(int, struct pt_regs *); | ||
1201 | |||
1202 | handler_irq(14, regs); | ||
1203 | return; | ||
1204 | } | ||
1205 | clear_softint(tick_mask); | ||
1206 | } | ||
1207 | |||
1208 | old_regs = set_irq_regs(regs); | ||
1209 | do { | ||
1210 | profile_tick(CPU_PROFILING); | ||
1211 | if (!--prof_counter(cpu)) { | ||
1212 | irq_enter(); | ||
1213 | |||
1214 | if (cpu == boot_cpu_id) { | ||
1215 | kstat_this_cpu.irqs[0]++; | ||
1216 | timer_tick_interrupt(regs); | ||
1217 | } | ||
1218 | |||
1219 | update_process_times(user); | ||
1220 | |||
1221 | irq_exit(); | ||
1222 | |||
1223 | prof_counter(cpu) = prof_multiplier(cpu); | ||
1224 | } | ||
1225 | |||
1226 | /* Guarantee that the following sequences execute | ||
1227 | * uninterrupted. | ||
1228 | */ | ||
1229 | __asm__ __volatile__("rdpr %%pstate, %0\n\t" | ||
1230 | "wrpr %0, %1, %%pstate" | ||
1231 | : "=r" (pstate) | ||
1232 | : "i" (PSTATE_IE)); | ||
1233 | |||
1234 | compare = tick_ops->add_compare(current_tick_offset); | ||
1235 | tick = tick_ops->get_tick(); | ||
1236 | |||
1237 | /* Restore PSTATE_IE. */ | ||
1238 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | ||
1239 | : /* no outputs */ | ||
1240 | : "r" (pstate)); | ||
1241 | } while (time_after_eq(tick, compare)); | ||
1242 | set_irq_regs(old_regs); | ||
1243 | } | ||
1244 | |||
1245 | static void __init smp_setup_percpu_timer(void) | ||
1246 | { | ||
1247 | int cpu = smp_processor_id(); | ||
1248 | unsigned long pstate; | ||
1249 | |||
1250 | prof_counter(cpu) = prof_multiplier(cpu) = 1; | ||
1251 | |||
1252 | /* Guarantee that the following sequences execute | ||
1253 | * uninterrupted. | ||
1254 | */ | ||
1255 | __asm__ __volatile__("rdpr %%pstate, %0\n\t" | ||
1256 | "wrpr %0, %1, %%pstate" | ||
1257 | : "=r" (pstate) | ||
1258 | : "i" (PSTATE_IE)); | ||
1259 | |||
1260 | tick_ops->init_tick(current_tick_offset); | ||
1261 | |||
1262 | /* Restore PSTATE_IE. */ | ||
1263 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | ||
1264 | : /* no outputs */ | ||
1265 | : "r" (pstate)); | ||
1266 | } | ||
1267 | |||
1268 | void __init smp_tick_init(void) | 1179 | void __init smp_tick_init(void) |
1269 | { | 1180 | { |
1270 | boot_cpu_id = hard_smp_processor_id(); | 1181 | boot_cpu_id = hard_smp_processor_id(); |
1271 | current_tick_offset = timer_tick_offset; | ||
1272 | |||
1273 | prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1; | ||
1274 | } | 1182 | } |
1275 | 1183 | ||
1276 | /* /proc/profile writes can call this, don't __init it please. */ | 1184 | /* /proc/profile writes can call this, don't __init it please. */ |
1277 | static DEFINE_SPINLOCK(prof_setup_lock); | ||
1278 | |||
1279 | int setup_profiling_timer(unsigned int multiplier) | 1185 | int setup_profiling_timer(unsigned int multiplier) |
1280 | { | 1186 | { |
1281 | unsigned long flags; | 1187 | return -EINVAL; |
1282 | int i; | ||
1283 | |||
1284 | if ((!multiplier) || (timer_tick_offset / multiplier) < 1000) | ||
1285 | return -EINVAL; | ||
1286 | |||
1287 | spin_lock_irqsave(&prof_setup_lock, flags); | ||
1288 | for_each_possible_cpu(i) | ||
1289 | prof_multiplier(i) = multiplier; | ||
1290 | current_tick_offset = (timer_tick_offset / multiplier); | ||
1291 | spin_unlock_irqrestore(&prof_setup_lock, flags); | ||
1292 | |||
1293 | return 0; | ||
1294 | } | 1188 | } |
1295 | 1189 | ||
1296 | static void __init smp_tune_scheduling(void) | 1190 | static void __init smp_tune_scheduling(void) |
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index beffc82a1e85..d00f51a5683f 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c | |||
@@ -212,7 +212,6 @@ EXPORT_SYMBOL(insl); | |||
212 | #ifdef CONFIG_PCI | 212 | #ifdef CONFIG_PCI |
213 | EXPORT_SYMBOL(ebus_chain); | 213 | EXPORT_SYMBOL(ebus_chain); |
214 | EXPORT_SYMBOL(isa_chain); | 214 | EXPORT_SYMBOL(isa_chain); |
215 | EXPORT_SYMBOL(pci_memspace_mask); | ||
216 | EXPORT_SYMBOL(pci_alloc_consistent); | 215 | EXPORT_SYMBOL(pci_alloc_consistent); |
217 | EXPORT_SYMBOL(pci_free_consistent); | 216 | EXPORT_SYMBOL(pci_free_consistent); |
218 | EXPORT_SYMBOL(pci_map_single); | 217 | EXPORT_SYMBOL(pci_map_single); |
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c index f84da4f1b706..259063f41f95 100644 --- a/arch/sparc64/kernel/time.c +++ b/arch/sparc64/kernel/time.c | |||
@@ -31,6 +31,9 @@ | |||
31 | #include <linux/profile.h> | 31 | #include <linux/profile.h> |
32 | #include <linux/miscdevice.h> | 32 | #include <linux/miscdevice.h> |
33 | #include <linux/rtc.h> | 33 | #include <linux/rtc.h> |
34 | #include <linux/kernel_stat.h> | ||
35 | #include <linux/clockchips.h> | ||
36 | #include <linux/clocksource.h> | ||
34 | 37 | ||
35 | #include <asm/oplib.h> | 38 | #include <asm/oplib.h> |
36 | #include <asm/mostek.h> | 39 | #include <asm/mostek.h> |
@@ -60,6 +63,7 @@ static void __iomem *mstk48t59_regs; | |||
60 | static int set_rtc_mmss(unsigned long); | 63 | static int set_rtc_mmss(unsigned long); |
61 | 64 | ||
62 | #define TICK_PRIV_BIT (1UL << 63) | 65 | #define TICK_PRIV_BIT (1UL << 63) |
66 | #define TICKCMP_IRQ_BIT (1UL << 63) | ||
63 | 67 | ||
64 | #ifdef CONFIG_SMP | 68 | #ifdef CONFIG_SMP |
65 | unsigned long profile_pc(struct pt_regs *regs) | 69 | unsigned long profile_pc(struct pt_regs *regs) |
@@ -93,21 +97,22 @@ static void tick_disable_protection(void) | |||
93 | : "g2"); | 97 | : "g2"); |
94 | } | 98 | } |
95 | 99 | ||
96 | static void tick_init_tick(unsigned long offset) | 100 | static void tick_disable_irq(void) |
97 | { | 101 | { |
98 | tick_disable_protection(); | ||
99 | |||
100 | __asm__ __volatile__( | 102 | __asm__ __volatile__( |
101 | " rd %%tick, %%g1\n" | ||
102 | " andn %%g1, %1, %%g1\n" | ||
103 | " ba,pt %%xcc, 1f\n" | 103 | " ba,pt %%xcc, 1f\n" |
104 | " add %%g1, %0, %%g1\n" | 104 | " nop\n" |
105 | " .align 64\n" | 105 | " .align 64\n" |
106 | "1: wr %%g1, 0x0, %%tick_cmpr\n" | 106 | "1: wr %0, 0x0, %%tick_cmpr\n" |
107 | " rd %%tick_cmpr, %%g0" | 107 | " rd %%tick_cmpr, %%g0" |
108 | : /* no outputs */ | 108 | : /* no outputs */ |
109 | : "r" (offset), "r" (TICK_PRIV_BIT) | 109 | : "r" (TICKCMP_IRQ_BIT)); |
110 | : "g1"); | 110 | } |
111 | |||
112 | static void tick_init_tick(void) | ||
113 | { | ||
114 | tick_disable_protection(); | ||
115 | tick_disable_irq(); | ||
111 | } | 116 | } |
112 | 117 | ||
113 | static unsigned long tick_get_tick(void) | 118 | static unsigned long tick_get_tick(void) |
@@ -121,20 +126,14 @@ static unsigned long tick_get_tick(void) | |||
121 | return ret & ~TICK_PRIV_BIT; | 126 | return ret & ~TICK_PRIV_BIT; |
122 | } | 127 | } |
123 | 128 | ||
124 | static unsigned long tick_get_compare(void) | 129 | static int tick_add_compare(unsigned long adj) |
125 | { | 130 | { |
126 | unsigned long ret; | 131 | unsigned long orig_tick, new_tick, new_compare; |
127 | 132 | ||
128 | __asm__ __volatile__("rd %%tick_cmpr, %0\n\t" | 133 | __asm__ __volatile__("rd %%tick, %0" |
129 | "mov %0, %0" | 134 | : "=r" (orig_tick)); |
130 | : "=r" (ret)); | ||
131 | 135 | ||
132 | return ret; | 136 | orig_tick &= ~TICKCMP_IRQ_BIT; |
133 | } | ||
134 | |||
135 | static unsigned long tick_add_compare(unsigned long adj) | ||
136 | { | ||
137 | unsigned long new_compare; | ||
138 | 137 | ||
139 | /* Workaround for Spitfire Errata (#54 I think??), I discovered | 138 | /* Workaround for Spitfire Errata (#54 I think??), I discovered |
140 | * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch | 139 | * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch |
@@ -145,44 +144,41 @@ static unsigned long tick_add_compare(unsigned long adj) | |||
145 | * at the start of an I-cache line, and perform a dummy | 144 | * at the start of an I-cache line, and perform a dummy |
146 | * read back from %tick_cmpr right after writing to it. -DaveM | 145 | * read back from %tick_cmpr right after writing to it. -DaveM |
147 | */ | 146 | */ |
148 | __asm__ __volatile__("rd %%tick_cmpr, %0\n\t" | 147 | __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" |
149 | "ba,pt %%xcc, 1f\n\t" | 148 | " add %1, %2, %0\n\t" |
150 | " add %0, %1, %0\n\t" | ||
151 | ".align 64\n" | 149 | ".align 64\n" |
152 | "1:\n\t" | 150 | "1:\n\t" |
153 | "wr %0, 0, %%tick_cmpr\n\t" | 151 | "wr %0, 0, %%tick_cmpr\n\t" |
154 | "rd %%tick_cmpr, %%g0" | 152 | "rd %%tick_cmpr, %%g0\n\t" |
155 | : "=&r" (new_compare) | 153 | : "=r" (new_compare) |
156 | : "r" (adj)); | 154 | : "r" (orig_tick), "r" (adj)); |
157 | 155 | ||
158 | return new_compare; | 156 | __asm__ __volatile__("rd %%tick, %0" |
157 | : "=r" (new_tick)); | ||
158 | new_tick &= ~TICKCMP_IRQ_BIT; | ||
159 | |||
160 | return ((long)(new_tick - (orig_tick+adj))) > 0L; | ||
159 | } | 161 | } |
160 | 162 | ||
161 | static unsigned long tick_add_tick(unsigned long adj, unsigned long offset) | 163 | static unsigned long tick_add_tick(unsigned long adj) |
162 | { | 164 | { |
163 | unsigned long new_tick, tmp; | 165 | unsigned long new_tick; |
164 | 166 | ||
165 | /* Also need to handle Blackbird bug here too. */ | 167 | /* Also need to handle Blackbird bug here too. */ |
166 | __asm__ __volatile__("rd %%tick, %0\n\t" | 168 | __asm__ __volatile__("rd %%tick, %0\n\t" |
167 | "add %0, %2, %0\n\t" | 169 | "add %0, %1, %0\n\t" |
168 | "wrpr %0, 0, %%tick\n\t" | 170 | "wrpr %0, 0, %%tick\n\t" |
169 | "andn %0, %4, %1\n\t" | 171 | : "=&r" (new_tick) |
170 | "ba,pt %%xcc, 1f\n\t" | 172 | : "r" (adj)); |
171 | " add %1, %3, %1\n\t" | ||
172 | ".align 64\n" | ||
173 | "1:\n\t" | ||
174 | "wr %1, 0, %%tick_cmpr\n\t" | ||
175 | "rd %%tick_cmpr, %%g0" | ||
176 | : "=&r" (new_tick), "=&r" (tmp) | ||
177 | : "r" (adj), "r" (offset), "r" (TICK_PRIV_BIT)); | ||
178 | 173 | ||
179 | return new_tick; | 174 | return new_tick; |
180 | } | 175 | } |
181 | 176 | ||
182 | static struct sparc64_tick_ops tick_operations __read_mostly = { | 177 | static struct sparc64_tick_ops tick_operations __read_mostly = { |
178 | .name = "tick", | ||
183 | .init_tick = tick_init_tick, | 179 | .init_tick = tick_init_tick, |
180 | .disable_irq = tick_disable_irq, | ||
184 | .get_tick = tick_get_tick, | 181 | .get_tick = tick_get_tick, |
185 | .get_compare = tick_get_compare, | ||
186 | .add_tick = tick_add_tick, | 182 | .add_tick = tick_add_tick, |
187 | .add_compare = tick_add_compare, | 183 | .add_compare = tick_add_compare, |
188 | .softint_mask = 1UL << 0, | 184 | .softint_mask = 1UL << 0, |
@@ -190,7 +186,15 @@ static struct sparc64_tick_ops tick_operations __read_mostly = { | |||
190 | 186 | ||
191 | struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations; | 187 | struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations; |
192 | 188 | ||
193 | static void stick_init_tick(unsigned long offset) | 189 | static void stick_disable_irq(void) |
190 | { | ||
191 | __asm__ __volatile__( | ||
192 | "wr %0, 0x0, %%asr25" | ||
193 | : /* no outputs */ | ||
194 | : "r" (TICKCMP_IRQ_BIT)); | ||
195 | } | ||
196 | |||
197 | static void stick_init_tick(void) | ||
194 | { | 198 | { |
195 | /* Writes to the %tick and %stick register are not | 199 | /* Writes to the %tick and %stick register are not |
196 | * allowed on sun4v. The Hypervisor controls that | 200 | * allowed on sun4v. The Hypervisor controls that |
@@ -198,6 +202,7 @@ static void stick_init_tick(unsigned long offset) | |||
198 | */ | 202 | */ |
199 | if (tlb_type != hypervisor) { | 203 | if (tlb_type != hypervisor) { |
200 | tick_disable_protection(); | 204 | tick_disable_protection(); |
205 | tick_disable_irq(); | ||
201 | 206 | ||
202 | /* Let the user get at STICK too. */ | 207 | /* Let the user get at STICK too. */ |
203 | __asm__ __volatile__( | 208 | __asm__ __volatile__( |
@@ -209,14 +214,7 @@ static void stick_init_tick(unsigned long offset) | |||
209 | : "g1", "g2"); | 214 | : "g1", "g2"); |
210 | } | 215 | } |
211 | 216 | ||
212 | __asm__ __volatile__( | 217 | stick_disable_irq(); |
213 | " rd %%asr24, %%g1\n" | ||
214 | " andn %%g1, %1, %%g1\n" | ||
215 | " add %%g1, %0, %%g1\n" | ||
216 | " wr %%g1, 0x0, %%asr25" | ||
217 | : /* no outputs */ | ||
218 | : "r" (offset), "r" (TICK_PRIV_BIT) | ||
219 | : "g1"); | ||
220 | } | 218 | } |
221 | 219 | ||
222 | static unsigned long stick_get_tick(void) | 220 | static unsigned long stick_get_tick(void) |
@@ -229,49 +227,43 @@ static unsigned long stick_get_tick(void) | |||
229 | return ret & ~TICK_PRIV_BIT; | 227 | return ret & ~TICK_PRIV_BIT; |
230 | } | 228 | } |
231 | 229 | ||
232 | static unsigned long stick_get_compare(void) | 230 | static unsigned long stick_add_tick(unsigned long adj) |
233 | { | 231 | { |
234 | unsigned long ret; | 232 | unsigned long new_tick; |
235 | |||
236 | __asm__ __volatile__("rd %%asr25, %0" | ||
237 | : "=r" (ret)); | ||
238 | |||
239 | return ret; | ||
240 | } | ||
241 | |||
242 | static unsigned long stick_add_tick(unsigned long adj, unsigned long offset) | ||
243 | { | ||
244 | unsigned long new_tick, tmp; | ||
245 | 233 | ||
246 | __asm__ __volatile__("rd %%asr24, %0\n\t" | 234 | __asm__ __volatile__("rd %%asr24, %0\n\t" |
247 | "add %0, %2, %0\n\t" | 235 | "add %0, %1, %0\n\t" |
248 | "wr %0, 0, %%asr24\n\t" | 236 | "wr %0, 0, %%asr24\n\t" |
249 | "andn %0, %4, %1\n\t" | 237 | : "=&r" (new_tick) |
250 | "add %1, %3, %1\n\t" | 238 | : "r" (adj)); |
251 | "wr %1, 0, %%asr25" | ||
252 | : "=&r" (new_tick), "=&r" (tmp) | ||
253 | : "r" (adj), "r" (offset), "r" (TICK_PRIV_BIT)); | ||
254 | 239 | ||
255 | return new_tick; | 240 | return new_tick; |
256 | } | 241 | } |
257 | 242 | ||
258 | static unsigned long stick_add_compare(unsigned long adj) | 243 | static int stick_add_compare(unsigned long adj) |
259 | { | 244 | { |
260 | unsigned long new_compare; | 245 | unsigned long orig_tick, new_tick; |
261 | 246 | ||
262 | __asm__ __volatile__("rd %%asr25, %0\n\t" | 247 | __asm__ __volatile__("rd %%asr24, %0" |
263 | "add %0, %1, %0\n\t" | 248 | : "=r" (orig_tick)); |
264 | "wr %0, 0, %%asr25" | 249 | orig_tick &= ~TICKCMP_IRQ_BIT; |
265 | : "=&r" (new_compare) | 250 | |
266 | : "r" (adj)); | 251 | __asm__ __volatile__("wr %0, 0, %%asr25" |
252 | : /* no outputs */ | ||
253 | : "r" (orig_tick + adj)); | ||
254 | |||
255 | __asm__ __volatile__("rd %%asr24, %0" | ||
256 | : "=r" (new_tick)); | ||
257 | new_tick &= ~TICKCMP_IRQ_BIT; | ||
267 | 258 | ||
268 | return new_compare; | 259 | return ((long)(new_tick - (orig_tick+adj))) > 0L; |
269 | } | 260 | } |
270 | 261 | ||
271 | static struct sparc64_tick_ops stick_operations __read_mostly = { | 262 | static struct sparc64_tick_ops stick_operations __read_mostly = { |
263 | .name = "stick", | ||
272 | .init_tick = stick_init_tick, | 264 | .init_tick = stick_init_tick, |
265 | .disable_irq = stick_disable_irq, | ||
273 | .get_tick = stick_get_tick, | 266 | .get_tick = stick_get_tick, |
274 | .get_compare = stick_get_compare, | ||
275 | .add_tick = stick_add_tick, | 267 | .add_tick = stick_add_tick, |
276 | .add_compare = stick_add_compare, | 268 | .add_compare = stick_add_compare, |
277 | .softint_mask = 1UL << 16, | 269 | .softint_mask = 1UL << 16, |
@@ -320,20 +312,6 @@ static unsigned long __hbird_read_stick(void) | |||
320 | return ret; | 312 | return ret; |
321 | } | 313 | } |
322 | 314 | ||
323 | static unsigned long __hbird_read_compare(void) | ||
324 | { | ||
325 | unsigned long low, high; | ||
326 | unsigned long addr = HBIRD_STICKCMP_ADDR; | ||
327 | |||
328 | __asm__ __volatile__("ldxa [%2] %3, %0\n\t" | ||
329 | "add %2, 0x8, %2\n\t" | ||
330 | "ldxa [%2] %3, %1" | ||
331 | : "=&r" (low), "=&r" (high), "=&r" (addr) | ||
332 | : "i" (ASI_PHYS_BYPASS_EC_E), "2" (addr)); | ||
333 | |||
334 | return (high << 32UL) | low; | ||
335 | } | ||
336 | |||
337 | static void __hbird_write_stick(unsigned long val) | 315 | static void __hbird_write_stick(unsigned long val) |
338 | { | 316 | { |
339 | unsigned long low = (val & 0xffffffffUL); | 317 | unsigned long low = (val & 0xffffffffUL); |
@@ -364,10 +342,13 @@ static void __hbird_write_compare(unsigned long val) | |||
364 | "i" (ASI_PHYS_BYPASS_EC_E)); | 342 | "i" (ASI_PHYS_BYPASS_EC_E)); |
365 | } | 343 | } |
366 | 344 | ||
367 | static void hbtick_init_tick(unsigned long offset) | 345 | static void hbtick_disable_irq(void) |
368 | { | 346 | { |
369 | unsigned long val; | 347 | __hbird_write_compare(TICKCMP_IRQ_BIT); |
348 | } | ||
370 | 349 | ||
350 | static void hbtick_init_tick(void) | ||
351 | { | ||
371 | tick_disable_protection(); | 352 | tick_disable_protection(); |
372 | 353 | ||
373 | /* XXX This seems to be necessary to 'jumpstart' Hummingbird | 354 | /* XXX This seems to be necessary to 'jumpstart' Hummingbird |
@@ -377,8 +358,7 @@ static void hbtick_init_tick(unsigned long offset) | |||
377 | */ | 358 | */ |
378 | __hbird_write_stick(__hbird_read_stick()); | 359 | __hbird_write_stick(__hbird_read_stick()); |
379 | 360 | ||
380 | val = __hbird_read_stick() & ~TICK_PRIV_BIT; | 361 | hbtick_disable_irq(); |
381 | __hbird_write_compare(val + offset); | ||
382 | } | 362 | } |
383 | 363 | ||
384 | static unsigned long hbtick_get_tick(void) | 364 | static unsigned long hbtick_get_tick(void) |
@@ -386,122 +366,95 @@ static unsigned long hbtick_get_tick(void) | |||
386 | return __hbird_read_stick() & ~TICK_PRIV_BIT; | 366 | return __hbird_read_stick() & ~TICK_PRIV_BIT; |
387 | } | 367 | } |
388 | 368 | ||
389 | static unsigned long hbtick_get_compare(void) | 369 | static unsigned long hbtick_add_tick(unsigned long adj) |
390 | { | ||
391 | return __hbird_read_compare(); | ||
392 | } | ||
393 | |||
394 | static unsigned long hbtick_add_tick(unsigned long adj, unsigned long offset) | ||
395 | { | 370 | { |
396 | unsigned long val; | 371 | unsigned long val; |
397 | 372 | ||
398 | val = __hbird_read_stick() + adj; | 373 | val = __hbird_read_stick() + adj; |
399 | __hbird_write_stick(val); | 374 | __hbird_write_stick(val); |
400 | 375 | ||
401 | val &= ~TICK_PRIV_BIT; | ||
402 | __hbird_write_compare(val + offset); | ||
403 | |||
404 | return val; | 376 | return val; |
405 | } | 377 | } |
406 | 378 | ||
407 | static unsigned long hbtick_add_compare(unsigned long adj) | 379 | static int hbtick_add_compare(unsigned long adj) |
408 | { | 380 | { |
409 | unsigned long val = __hbird_read_compare() + adj; | 381 | unsigned long val = __hbird_read_stick(); |
382 | unsigned long val2; | ||
410 | 383 | ||
411 | val &= ~TICK_PRIV_BIT; | 384 | val &= ~TICKCMP_IRQ_BIT; |
385 | val += adj; | ||
412 | __hbird_write_compare(val); | 386 | __hbird_write_compare(val); |
413 | 387 | ||
414 | return val; | 388 | val2 = __hbird_read_stick() & ~TICKCMP_IRQ_BIT; |
389 | |||
390 | return ((long)(val2 - val)) > 0L; | ||
415 | } | 391 | } |
416 | 392 | ||
417 | static struct sparc64_tick_ops hbtick_operations __read_mostly = { | 393 | static struct sparc64_tick_ops hbtick_operations __read_mostly = { |
394 | .name = "hbtick", | ||
418 | .init_tick = hbtick_init_tick, | 395 | .init_tick = hbtick_init_tick, |
396 | .disable_irq = hbtick_disable_irq, | ||
419 | .get_tick = hbtick_get_tick, | 397 | .get_tick = hbtick_get_tick, |
420 | .get_compare = hbtick_get_compare, | ||
421 | .add_tick = hbtick_add_tick, | 398 | .add_tick = hbtick_add_tick, |
422 | .add_compare = hbtick_add_compare, | 399 | .add_compare = hbtick_add_compare, |
423 | .softint_mask = 1UL << 0, | 400 | .softint_mask = 1UL << 0, |
424 | }; | 401 | }; |
425 | 402 | ||
426 | /* timer_interrupt() needs to keep up the real-time clock, | ||
427 | * as well as call the "do_timer()" routine every clocktick | ||
428 | * | ||
429 | * NOTE: On SUN5 systems the ticker interrupt comes in using 2 | ||
430 | * interrupts, one at level14 and one with softint bit 0. | ||
431 | */ | ||
432 | unsigned long timer_tick_offset __read_mostly; | ||
433 | |||
434 | static unsigned long timer_ticks_per_nsec_quotient __read_mostly; | 403 | static unsigned long timer_ticks_per_nsec_quotient __read_mostly; |
435 | 404 | ||
436 | #define TICK_SIZE (tick_nsec / 1000) | 405 | #define TICK_SIZE (tick_nsec / 1000) |
437 | 406 | ||
438 | static inline void timer_check_rtc(void) | 407 | #define USEC_AFTER 500000 |
439 | { | 408 | #define USEC_BEFORE 500000 |
440 | /* last time the cmos clock got updated */ | ||
441 | static long last_rtc_update; | ||
442 | |||
443 | /* Determine when to update the Mostek clock. */ | ||
444 | if (ntp_synced() && | ||
445 | xtime.tv_sec > last_rtc_update + 660 && | ||
446 | (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && | ||
447 | (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) { | ||
448 | if (set_rtc_mmss(xtime.tv_sec) == 0) | ||
449 | last_rtc_update = xtime.tv_sec; | ||
450 | else | ||
451 | last_rtc_update = xtime.tv_sec - 600; | ||
452 | /* do it again in 60 s */ | ||
453 | } | ||
454 | } | ||
455 | 409 | ||
456 | irqreturn_t timer_interrupt(int irq, void *dev_id) | 410 | static void sync_cmos_clock(unsigned long dummy); |
457 | { | ||
458 | unsigned long ticks, compare, pstate; | ||
459 | 411 | ||
460 | write_seqlock(&xtime_lock); | 412 | static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0); |
461 | 413 | ||
462 | do { | 414 | static void sync_cmos_clock(unsigned long dummy) |
463 | #ifndef CONFIG_SMP | 415 | { |
464 | profile_tick(CPU_PROFILING); | 416 | struct timeval now, next; |
465 | update_process_times(user_mode(get_irq_regs())); | 417 | int fail = 1; |
466 | #endif | ||
467 | do_timer(1); | ||
468 | 418 | ||
469 | /* Guarantee that the following sequences execute | 419 | /* |
470 | * uninterrupted. | 420 | * If we have an externally synchronized Linux clock, then update |
421 | * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be | ||
422 | * called as close as possible to 500 ms before the new second starts. | ||
423 | * This code is run on a timer. If the clock is set, that timer | ||
424 | * may not expire at the correct time. Thus, we adjust... | ||
425 | */ | ||
426 | if (!ntp_synced()) | ||
427 | /* | ||
428 | * Not synced, exit, do not restart a timer (if one is | ||
429 | * running, let it run out). | ||
471 | */ | 430 | */ |
472 | __asm__ __volatile__("rdpr %%pstate, %0\n\t" | 431 | return; |
473 | "wrpr %0, %1, %%pstate" | ||
474 | : "=r" (pstate) | ||
475 | : "i" (PSTATE_IE)); | ||
476 | 432 | ||
477 | compare = tick_ops->add_compare(timer_tick_offset); | 433 | do_gettimeofday(&now); |
478 | ticks = tick_ops->get_tick(); | 434 | if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 && |
435 | now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2) | ||
436 | fail = set_rtc_mmss(now.tv_sec); | ||
479 | 437 | ||
480 | /* Restore PSTATE_IE. */ | 438 | next.tv_usec = USEC_AFTER - now.tv_usec; |
481 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | 439 | if (next.tv_usec <= 0) |
482 | : /* no outputs */ | 440 | next.tv_usec += USEC_PER_SEC; |
483 | : "r" (pstate)); | ||
484 | } while (time_after_eq(ticks, compare)); | ||
485 | 441 | ||
486 | timer_check_rtc(); | 442 | if (!fail) |
443 | next.tv_sec = 659; | ||
444 | else | ||
445 | next.tv_sec = 0; | ||
487 | 446 | ||
488 | write_sequnlock(&xtime_lock); | 447 | if (next.tv_usec >= USEC_PER_SEC) { |
489 | 448 | next.tv_sec++; | |
490 | return IRQ_HANDLED; | 449 | next.tv_usec -= USEC_PER_SEC; |
450 | } | ||
451 | mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next)); | ||
491 | } | 452 | } |
492 | 453 | ||
493 | #ifdef CONFIG_SMP | 454 | void notify_arch_cmos_timer(void) |
494 | void timer_tick_interrupt(struct pt_regs *regs) | ||
495 | { | 455 | { |
496 | write_seqlock(&xtime_lock); | 456 | mod_timer(&sync_cmos_timer, jiffies + 1); |
497 | |||
498 | do_timer(1); | ||
499 | |||
500 | timer_check_rtc(); | ||
501 | |||
502 | write_sequnlock(&xtime_lock); | ||
503 | } | 457 | } |
504 | #endif | ||
505 | 458 | ||
506 | /* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */ | 459 | /* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */ |
507 | static void __init kick_start_clock(void) | 460 | static void __init kick_start_clock(void) |
@@ -751,7 +704,7 @@ retry: | |||
751 | return -EOPNOTSUPP; | 704 | return -EOPNOTSUPP; |
752 | } | 705 | } |
753 | 706 | ||
754 | static int __init clock_model_matches(char *model) | 707 | static int __init clock_model_matches(const char *model) |
755 | { | 708 | { |
756 | if (strcmp(model, "mk48t02") && | 709 | if (strcmp(model, "mk48t02") && |
757 | strcmp(model, "mk48t08") && | 710 | strcmp(model, "mk48t08") && |
@@ -768,7 +721,7 @@ static int __init clock_model_matches(char *model) | |||
768 | static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match) | 721 | static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match) |
769 | { | 722 | { |
770 | struct device_node *dp = op->node; | 723 | struct device_node *dp = op->node; |
771 | char *model = of_get_property(dp, "model", NULL); | 724 | const char *model = of_get_property(dp, "model", NULL); |
772 | unsigned long size, flags; | 725 | unsigned long size, flags; |
773 | void __iomem *regs; | 726 | void __iomem *regs; |
774 | 727 | ||
@@ -900,7 +853,6 @@ static unsigned long sparc64_init_timers(void) | |||
900 | prop = of_find_property(dp, "stick-frequency", NULL); | 853 | prop = of_find_property(dp, "stick-frequency", NULL); |
901 | } | 854 | } |
902 | clock = *(unsigned int *) prop->value; | 855 | clock = *(unsigned int *) prop->value; |
903 | timer_tick_offset = clock / HZ; | ||
904 | 856 | ||
905 | #ifdef CONFIG_SMP | 857 | #ifdef CONFIG_SMP |
906 | smp_tick_init(); | 858 | smp_tick_init(); |
@@ -909,26 +861,6 @@ static unsigned long sparc64_init_timers(void) | |||
909 | return clock; | 861 | return clock; |
910 | } | 862 | } |
911 | 863 | ||
912 | static void sparc64_start_timers(void) | ||
913 | { | ||
914 | unsigned long pstate; | ||
915 | |||
916 | /* Guarantee that the following sequences execute | ||
917 | * uninterrupted. | ||
918 | */ | ||
919 | __asm__ __volatile__("rdpr %%pstate, %0\n\t" | ||
920 | "wrpr %0, %1, %%pstate" | ||
921 | : "=r" (pstate) | ||
922 | : "i" (PSTATE_IE)); | ||
923 | |||
924 | tick_ops->init_tick(timer_tick_offset); | ||
925 | |||
926 | /* Restore PSTATE_IE. */ | ||
927 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | ||
928 | : /* no outputs */ | ||
929 | : "r" (pstate)); | ||
930 | } | ||
931 | |||
932 | struct freq_table { | 864 | struct freq_table { |
933 | unsigned long clock_tick_ref; | 865 | unsigned long clock_tick_ref; |
934 | unsigned int ref_freq; | 866 | unsigned int ref_freq; |
@@ -975,29 +907,148 @@ static struct notifier_block sparc64_cpufreq_notifier_block = { | |||
975 | 907 | ||
976 | #endif /* CONFIG_CPU_FREQ */ | 908 | #endif /* CONFIG_CPU_FREQ */ |
977 | 909 | ||
978 | static struct time_interpolator sparc64_cpu_interpolator = { | 910 | static int sparc64_next_event(unsigned long delta, |
979 | .source = TIME_SOURCE_CPU, | 911 | struct clock_event_device *evt) |
980 | .shift = 16, | 912 | { |
981 | .mask = 0xffffffffffffffffLL | 913 | return tick_ops->add_compare(delta) ? -ETIME : 0; |
914 | } | ||
915 | |||
916 | static void sparc64_timer_setup(enum clock_event_mode mode, | ||
917 | struct clock_event_device *evt) | ||
918 | { | ||
919 | switch (mode) { | ||
920 | case CLOCK_EVT_MODE_ONESHOT: | ||
921 | break; | ||
922 | |||
923 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
924 | tick_ops->disable_irq(); | ||
925 | break; | ||
926 | |||
927 | case CLOCK_EVT_MODE_PERIODIC: | ||
928 | case CLOCK_EVT_MODE_UNUSED: | ||
929 | WARN_ON(1); | ||
930 | break; | ||
931 | }; | ||
932 | } | ||
933 | |||
934 | static struct clock_event_device sparc64_clockevent = { | ||
935 | .features = CLOCK_EVT_FEAT_ONESHOT, | ||
936 | .set_mode = sparc64_timer_setup, | ||
937 | .set_next_event = sparc64_next_event, | ||
938 | .rating = 100, | ||
939 | .shift = 30, | ||
940 | .irq = -1, | ||
982 | }; | 941 | }; |
942 | static DEFINE_PER_CPU(struct clock_event_device, sparc64_events); | ||
983 | 943 | ||
984 | /* The quotient formula is taken from the IA64 port. */ | 944 | void timer_interrupt(int irq, struct pt_regs *regs) |
985 | #define SPARC64_NSEC_PER_CYC_SHIFT 10UL | ||
986 | void __init time_init(void) | ||
987 | { | 945 | { |
988 | unsigned long clock = sparc64_init_timers(); | 946 | struct pt_regs *old_regs = set_irq_regs(regs); |
947 | unsigned long tick_mask = tick_ops->softint_mask; | ||
948 | int cpu = smp_processor_id(); | ||
949 | struct clock_event_device *evt = &per_cpu(sparc64_events, cpu); | ||
950 | |||
951 | clear_softint(tick_mask); | ||
952 | |||
953 | irq_enter(); | ||
989 | 954 | ||
990 | sparc64_cpu_interpolator.frequency = clock; | 955 | kstat_this_cpu.irqs[0]++; |
991 | register_time_interpolator(&sparc64_cpu_interpolator); | ||
992 | 956 | ||
993 | /* Now that the interpolator is registered, it is | 957 | if (unlikely(!evt->event_handler)) { |
994 | * safe to start the timer ticking. | 958 | printk(KERN_WARNING |
959 | "Spurious SPARC64 timer interrupt on cpu %d\n", cpu); | ||
960 | } else | ||
961 | evt->event_handler(evt); | ||
962 | |||
963 | irq_exit(); | ||
964 | |||
965 | set_irq_regs(old_regs); | ||
966 | } | ||
967 | |||
968 | void __devinit setup_sparc64_timer(void) | ||
969 | { | ||
970 | struct clock_event_device *sevt; | ||
971 | unsigned long pstate; | ||
972 | |||
973 | /* Guarantee that the following sequences execute | ||
974 | * uninterrupted. | ||
995 | */ | 975 | */ |
996 | sparc64_start_timers(); | 976 | __asm__ __volatile__("rdpr %%pstate, %0\n\t" |
977 | "wrpr %0, %1, %%pstate" | ||
978 | : "=r" (pstate) | ||
979 | : "i" (PSTATE_IE)); | ||
980 | |||
981 | tick_ops->init_tick(); | ||
982 | |||
983 | /* Restore PSTATE_IE. */ | ||
984 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | ||
985 | : /* no outputs */ | ||
986 | : "r" (pstate)); | ||
987 | |||
988 | sevt = &__get_cpu_var(sparc64_events); | ||
989 | |||
990 | memcpy(sevt, &sparc64_clockevent, sizeof(*sevt)); | ||
991 | sevt->cpumask = cpumask_of_cpu(smp_processor_id()); | ||
992 | |||
993 | clockevents_register_device(sevt); | ||
994 | } | ||
995 | |||
996 | #define SPARC64_NSEC_PER_CYC_SHIFT 32UL | ||
997 | |||
998 | static struct clocksource clocksource_tick = { | ||
999 | .rating = 100, | ||
1000 | .mask = CLOCKSOURCE_MASK(64), | ||
1001 | .shift = 16, | ||
1002 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
1003 | }; | ||
1004 | |||
1005 | static void __init setup_clockevent_multiplier(unsigned long hz) | ||
1006 | { | ||
1007 | unsigned long mult, shift = 32; | ||
1008 | |||
1009 | while (1) { | ||
1010 | mult = div_sc(hz, NSEC_PER_SEC, shift); | ||
1011 | if (mult && (mult >> 32UL) == 0UL) | ||
1012 | break; | ||
1013 | |||
1014 | shift--; | ||
1015 | } | ||
1016 | |||
1017 | sparc64_clockevent.shift = shift; | ||
1018 | sparc64_clockevent.mult = mult; | ||
1019 | } | ||
1020 | |||
1021 | void __init time_init(void) | ||
1022 | { | ||
1023 | unsigned long clock = sparc64_init_timers(); | ||
997 | 1024 | ||
998 | timer_ticks_per_nsec_quotient = | 1025 | timer_ticks_per_nsec_quotient = |
999 | (((NSEC_PER_SEC << SPARC64_NSEC_PER_CYC_SHIFT) + | 1026 | clocksource_hz2mult(clock, SPARC64_NSEC_PER_CYC_SHIFT); |
1000 | (clock / 2)) / clock); | 1027 | |
1028 | clocksource_tick.name = tick_ops->name; | ||
1029 | clocksource_tick.mult = | ||
1030 | clocksource_hz2mult(clock, | ||
1031 | clocksource_tick.shift); | ||
1032 | clocksource_tick.read = tick_ops->get_tick; | ||
1033 | |||
1034 | printk("clocksource: mult[%x] shift[%d]\n", | ||
1035 | clocksource_tick.mult, clocksource_tick.shift); | ||
1036 | |||
1037 | clocksource_register(&clocksource_tick); | ||
1038 | |||
1039 | sparc64_clockevent.name = tick_ops->name; | ||
1040 | |||
1041 | setup_clockevent_multiplier(clock); | ||
1042 | |||
1043 | sparc64_clockevent.max_delta_ns = | ||
1044 | clockevent_delta2ns(0x7fffffffffffffff, &sparc64_clockevent); | ||
1045 | sparc64_clockevent.min_delta_ns = | ||
1046 | clockevent_delta2ns(0xF, &sparc64_clockevent); | ||
1047 | |||
1048 | printk("clockevent: mult[%lx] shift[%d]\n", | ||
1049 | sparc64_clockevent.mult, sparc64_clockevent.shift); | ||
1050 | |||
1051 | setup_sparc64_timer(); | ||
1001 | 1052 | ||
1002 | #ifdef CONFIG_CPU_FREQ | 1053 | #ifdef CONFIG_CPU_FREQ |
1003 | cpufreq_register_notifier(&sparc64_cpufreq_notifier_block, | 1054 | cpufreq_register_notifier(&sparc64_cpufreq_notifier_block, |
@@ -1126,10 +1177,6 @@ static int set_rtc_mmss(unsigned long nowtime) | |||
1126 | #define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */ | 1177 | #define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */ |
1127 | static unsigned char mini_rtc_status; /* bitmapped status byte. */ | 1178 | static unsigned char mini_rtc_status; /* bitmapped status byte. */ |
1128 | 1179 | ||
1129 | /* months start at 0 now */ | ||
1130 | static unsigned char days_in_mo[] = | ||
1131 | {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; | ||
1132 | |||
1133 | #define FEBRUARY 2 | 1180 | #define FEBRUARY 2 |
1134 | #define STARTOFTIME 1970 | 1181 | #define STARTOFTIME 1970 |
1135 | #define SECDAY 86400L | 1182 | #define SECDAY 86400L |
@@ -1278,8 +1325,7 @@ static int mini_rtc_ioctl(struct inode *inode, struct file *file, | |||
1278 | 1325 | ||
1279 | case RTC_SET_TIME: /* Set the RTC */ | 1326 | case RTC_SET_TIME: /* Set the RTC */ |
1280 | { | 1327 | { |
1281 | int year; | 1328 | int year, days; |
1282 | unsigned char leap_yr; | ||
1283 | 1329 | ||
1284 | if (!capable(CAP_SYS_TIME)) | 1330 | if (!capable(CAP_SYS_TIME)) |
1285 | return -EACCES; | 1331 | return -EACCES; |
@@ -1288,14 +1334,14 @@ static int mini_rtc_ioctl(struct inode *inode, struct file *file, | |||
1288 | return -EFAULT; | 1334 | return -EFAULT; |
1289 | 1335 | ||
1290 | year = wtime.tm_year + 1900; | 1336 | year = wtime.tm_year + 1900; |
1291 | leap_yr = ((!(year % 4) && (year % 100)) || | 1337 | days = month_days[wtime.tm_mon] + |
1292 | !(year % 400)); | 1338 | ((wtime.tm_mon == 1) && leapyear(year)); |
1293 | 1339 | ||
1294 | if ((wtime.tm_mon < 0 || wtime.tm_mon > 11) || (wtime.tm_mday < 1)) | 1340 | if ((wtime.tm_mon < 0 || wtime.tm_mon > 11) || |
1341 | (wtime.tm_mday < 1)) | ||
1295 | return -EINVAL; | 1342 | return -EINVAL; |
1296 | 1343 | ||
1297 | if (wtime.tm_mday < 0 || wtime.tm_mday > | 1344 | if (wtime.tm_mday < 0 || wtime.tm_mday > days) |
1298 | (days_in_mo[wtime.tm_mon] + ((wtime.tm_mon == 1) && leap_yr))) | ||
1299 | return -EINVAL; | 1345 | return -EINVAL; |
1300 | 1346 | ||
1301 | if (wtime.tm_hour < 0 || wtime.tm_hour >= 24 || | 1347 | if (wtime.tm_hour < 0 || wtime.tm_hour >= 24 || |
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S index d7d2a8bdc66e..7575aa371da8 100644 --- a/arch/sparc64/kernel/ttable.S +++ b/arch/sparc64/kernel/ttable.S | |||
@@ -60,11 +60,7 @@ tl0_irq4: BTRAP(0x44) | |||
60 | tl0_irq5: TRAP_IRQ(handler_irq, 5) | 60 | tl0_irq5: TRAP_IRQ(handler_irq, 5) |
61 | tl0_irq6: BTRAP(0x46) BTRAP(0x47) BTRAP(0x48) BTRAP(0x49) | 61 | tl0_irq6: BTRAP(0x46) BTRAP(0x47) BTRAP(0x48) BTRAP(0x49) |
62 | tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d) | 62 | tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d) |
63 | #ifndef CONFIG_SMP | 63 | tl0_irq14: TRAP_IRQ(timer_interrupt, 14) |
64 | tl0_irq14: TRAP_IRQ(timer_irq, 14) | ||
65 | #else | ||
66 | tl0_irq14: TICK_SMP_IRQ | ||
67 | #endif | ||
68 | tl0_irq15: TRAP_IRQ(handler_irq, 15) | 64 | tl0_irq15: TRAP_IRQ(handler_irq, 15) |
69 | tl0_resv050: BTRAP(0x50) BTRAP(0x51) BTRAP(0x52) BTRAP(0x53) BTRAP(0x54) BTRAP(0x55) | 65 | tl0_resv050: BTRAP(0x50) BTRAP(0x51) BTRAP(0x52) BTRAP(0x53) BTRAP(0x54) BTRAP(0x55) |
70 | tl0_resv056: BTRAP(0x56) BTRAP(0x57) BTRAP(0x58) BTRAP(0x59) BTRAP(0x5a) BTRAP(0x5b) | 66 | tl0_resv056: BTRAP(0x56) BTRAP(0x57) BTRAP(0x58) BTRAP(0x59) BTRAP(0x5a) BTRAP(0x5b) |
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index f146071a4b2a..cafadcbcdf38 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c | |||
@@ -122,24 +122,19 @@ static void __init read_obp_memory(const char *property, | |||
122 | size = 0UL; | 122 | size = 0UL; |
123 | base = new_base; | 123 | base = new_base; |
124 | } | 124 | } |
125 | regs[i].phys_addr = base; | 125 | if (size == 0UL) { |
126 | regs[i].reg_size = size; | 126 | /* If it is empty, simply get rid of it. |
127 | } | 127 | * This simplifies the logic of the other |
128 | 128 | * functions that process these arrays. | |
129 | for (i = 0; i < ents; i++) { | 129 | */ |
130 | if (regs[i].reg_size == 0UL) { | 130 | memmove(®s[i], ®s[i + 1], |
131 | int j; | 131 | (ents - i - 1) * sizeof(regs[0])); |
132 | |||
133 | for (j = i; j < ents - 1; j++) { | ||
134 | regs[j].phys_addr = | ||
135 | regs[j+1].phys_addr; | ||
136 | regs[j].reg_size = | ||
137 | regs[j+1].reg_size; | ||
138 | } | ||
139 | |||
140 | ents--; | ||
141 | i--; | 132 | i--; |
133 | ents--; | ||
134 | continue; | ||
142 | } | 135 | } |
136 | regs[i].phys_addr = base; | ||
137 | regs[i].reg_size = size; | ||
143 | } | 138 | } |
144 | 139 | ||
145 | *num_ents = ents; | 140 | *num_ents = ents; |
@@ -154,15 +149,6 @@ unsigned long *sparc64_valid_addr_bitmap __read_mostly; | |||
154 | unsigned long kern_base __read_mostly; | 149 | unsigned long kern_base __read_mostly; |
155 | unsigned long kern_size __read_mostly; | 150 | unsigned long kern_size __read_mostly; |
156 | 151 | ||
157 | /* get_new_mmu_context() uses "cache + 1". */ | ||
158 | DEFINE_SPINLOCK(ctx_alloc_lock); | ||
159 | unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; | ||
160 | #define CTX_BMAP_SLOTS (1UL << (CTX_NR_BITS - 6)) | ||
161 | unsigned long mmu_context_bmap[CTX_BMAP_SLOTS]; | ||
162 | |||
163 | /* References to special section boundaries */ | ||
164 | extern char _start[], _end[]; | ||
165 | |||
166 | /* Initial ramdisk setup */ | 152 | /* Initial ramdisk setup */ |
167 | extern unsigned long sparc_ramdisk_image64; | 153 | extern unsigned long sparc_ramdisk_image64; |
168 | extern unsigned int sparc_ramdisk_image; | 154 | extern unsigned int sparc_ramdisk_image; |
@@ -406,19 +392,70 @@ void __kprobes flush_icache_range(unsigned long start, unsigned long end) | |||
406 | if (tlb_type == spitfire) { | 392 | if (tlb_type == spitfire) { |
407 | unsigned long kaddr; | 393 | unsigned long kaddr; |
408 | 394 | ||
409 | for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) | 395 | /* This code only runs on Spitfire cpus so this is |
410 | __flush_icache_page(__get_phys(kaddr)); | 396 | * why we can assume _PAGE_PADDR_4U. |
397 | */ | ||
398 | for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) { | ||
399 | unsigned long paddr, mask = _PAGE_PADDR_4U; | ||
400 | |||
401 | if (kaddr >= PAGE_OFFSET) | ||
402 | paddr = kaddr & mask; | ||
403 | else { | ||
404 | pgd_t *pgdp = pgd_offset_k(kaddr); | ||
405 | pud_t *pudp = pud_offset(pgdp, kaddr); | ||
406 | pmd_t *pmdp = pmd_offset(pudp, kaddr); | ||
407 | pte_t *ptep = pte_offset_kernel(pmdp, kaddr); | ||
408 | |||
409 | paddr = pte_val(*ptep) & mask; | ||
410 | } | ||
411 | __flush_icache_page(paddr); | ||
412 | } | ||
411 | } | 413 | } |
412 | } | 414 | } |
413 | 415 | ||
414 | void show_mem(void) | 416 | void show_mem(void) |
415 | { | 417 | { |
416 | printk("Mem-info:\n"); | 418 | unsigned long total = 0, reserved = 0; |
419 | unsigned long shared = 0, cached = 0; | ||
420 | pg_data_t *pgdat; | ||
421 | |||
422 | printk(KERN_INFO "Mem-info:\n"); | ||
417 | show_free_areas(); | 423 | show_free_areas(); |
418 | printk("Free swap: %6ldkB\n", | 424 | printk(KERN_INFO "Free swap: %6ldkB\n", |
419 | nr_swap_pages << (PAGE_SHIFT-10)); | 425 | nr_swap_pages << (PAGE_SHIFT-10)); |
420 | printk("%ld pages of RAM\n", num_physpages); | 426 | for_each_online_pgdat(pgdat) { |
421 | printk("%lu free pages\n", nr_free_pages()); | 427 | unsigned long i, flags; |
428 | |||
429 | pgdat_resize_lock(pgdat, &flags); | ||
430 | for (i = 0; i < pgdat->node_spanned_pages; i++) { | ||
431 | struct page *page = pgdat_page_nr(pgdat, i); | ||
432 | total++; | ||
433 | if (PageReserved(page)) | ||
434 | reserved++; | ||
435 | else if (PageSwapCache(page)) | ||
436 | cached++; | ||
437 | else if (page_count(page)) | ||
438 | shared += page_count(page) - 1; | ||
439 | } | ||
440 | pgdat_resize_unlock(pgdat, &flags); | ||
441 | } | ||
442 | |||
443 | printk(KERN_INFO "%lu pages of RAM\n", total); | ||
444 | printk(KERN_INFO "%lu reserved pages\n", reserved); | ||
445 | printk(KERN_INFO "%lu pages shared\n", shared); | ||
446 | printk(KERN_INFO "%lu pages swap cached\n", cached); | ||
447 | |||
448 | printk(KERN_INFO "%lu pages dirty\n", | ||
449 | global_page_state(NR_FILE_DIRTY)); | ||
450 | printk(KERN_INFO "%lu pages writeback\n", | ||
451 | global_page_state(NR_WRITEBACK)); | ||
452 | printk(KERN_INFO "%lu pages mapped\n", | ||
453 | global_page_state(NR_FILE_MAPPED)); | ||
454 | printk(KERN_INFO "%lu pages slab\n", | ||
455 | global_page_state(NR_SLAB_RECLAIMABLE) + | ||
456 | global_page_state(NR_SLAB_UNRECLAIMABLE)); | ||
457 | printk(KERN_INFO "%lu pages pagetables\n", | ||
458 | global_page_state(NR_PAGETABLE)); | ||
422 | } | 459 | } |
423 | 460 | ||
424 | void mmu_info(struct seq_file *m) | 461 | void mmu_info(struct seq_file *m) |
@@ -658,6 +695,13 @@ void __flush_dcache_range(unsigned long start, unsigned long end) | |||
658 | } | 695 | } |
659 | #endif /* DCACHE_ALIASING_POSSIBLE */ | 696 | #endif /* DCACHE_ALIASING_POSSIBLE */ |
660 | 697 | ||
698 | /* get_new_mmu_context() uses "cache + 1". */ | ||
699 | DEFINE_SPINLOCK(ctx_alloc_lock); | ||
700 | unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; | ||
701 | #define MAX_CTX_NR (1UL << CTX_NR_BITS) | ||
702 | #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) | ||
703 | DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); | ||
704 | |||
661 | /* Caller does TLB context flushing on local CPU if necessary. | 705 | /* Caller does TLB context flushing on local CPU if necessary. |
662 | * The caller also ensures that CTX_VALID(mm->context) is false. | 706 | * The caller also ensures that CTX_VALID(mm->context) is false. |
663 | * | 707 | * |
@@ -717,95 +761,6 @@ out: | |||
717 | smp_new_mmu_context_version(); | 761 | smp_new_mmu_context_version(); |
718 | } | 762 | } |
719 | 763 | ||
720 | void sparc_ultra_dump_itlb(void) | ||
721 | { | ||
722 | int slot; | ||
723 | |||
724 | if (tlb_type == spitfire) { | ||
725 | printk ("Contents of itlb: "); | ||
726 | for (slot = 0; slot < 14; slot++) printk (" "); | ||
727 | printk ("%2x:%016lx,%016lx\n", | ||
728 | 0, | ||
729 | spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0)); | ||
730 | for (slot = 1; slot < 64; slot+=3) { | ||
731 | printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", | ||
732 | slot, | ||
733 | spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot), | ||
734 | slot+1, | ||
735 | spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1), | ||
736 | slot+2, | ||
737 | spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2)); | ||
738 | } | ||
739 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
740 | printk ("Contents of itlb0:\n"); | ||
741 | for (slot = 0; slot < 16; slot+=2) { | ||
742 | printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n", | ||
743 | slot, | ||
744 | cheetah_get_litlb_tag(slot), cheetah_get_litlb_data(slot), | ||
745 | slot+1, | ||
746 | cheetah_get_litlb_tag(slot+1), cheetah_get_litlb_data(slot+1)); | ||
747 | } | ||
748 | printk ("Contents of itlb2:\n"); | ||
749 | for (slot = 0; slot < 128; slot+=2) { | ||
750 | printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n", | ||
751 | slot, | ||
752 | cheetah_get_itlb_tag(slot), cheetah_get_itlb_data(slot), | ||
753 | slot+1, | ||
754 | cheetah_get_itlb_tag(slot+1), cheetah_get_itlb_data(slot+1)); | ||
755 | } | ||
756 | } | ||
757 | } | ||
758 | |||
759 | void sparc_ultra_dump_dtlb(void) | ||
760 | { | ||
761 | int slot; | ||
762 | |||
763 | if (tlb_type == spitfire) { | ||
764 | printk ("Contents of dtlb: "); | ||
765 | for (slot = 0; slot < 14; slot++) printk (" "); | ||
766 | printk ("%2x:%016lx,%016lx\n", 0, | ||
767 | spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0)); | ||
768 | for (slot = 1; slot < 64; slot+=3) { | ||
769 | printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", | ||
770 | slot, | ||
771 | spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot), | ||
772 | slot+1, | ||
773 | spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1), | ||
774 | slot+2, | ||
775 | spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2)); | ||
776 | } | ||
777 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
778 | printk ("Contents of dtlb0:\n"); | ||
779 | for (slot = 0; slot < 16; slot+=2) { | ||
780 | printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n", | ||
781 | slot, | ||
782 | cheetah_get_ldtlb_tag(slot), cheetah_get_ldtlb_data(slot), | ||
783 | slot+1, | ||
784 | cheetah_get_ldtlb_tag(slot+1), cheetah_get_ldtlb_data(slot+1)); | ||
785 | } | ||
786 | printk ("Contents of dtlb2:\n"); | ||
787 | for (slot = 0; slot < 512; slot+=2) { | ||
788 | printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n", | ||
789 | slot, | ||
790 | cheetah_get_dtlb_tag(slot, 2), cheetah_get_dtlb_data(slot, 2), | ||
791 | slot+1, | ||
792 | cheetah_get_dtlb_tag(slot+1, 2), cheetah_get_dtlb_data(slot+1, 2)); | ||
793 | } | ||
794 | if (tlb_type == cheetah_plus) { | ||
795 | printk ("Contents of dtlb3:\n"); | ||
796 | for (slot = 0; slot < 512; slot+=2) { | ||
797 | printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n", | ||
798 | slot, | ||
799 | cheetah_get_dtlb_tag(slot, 3), cheetah_get_dtlb_data(slot, 3), | ||
800 | slot+1, | ||
801 | cheetah_get_dtlb_tag(slot+1, 3), cheetah_get_dtlb_data(slot+1, 3)); | ||
802 | } | ||
803 | } | ||
804 | } | ||
805 | } | ||
806 | |||
807 | extern unsigned long cmdline_memory_size; | ||
808 | |||
809 | /* Find a free area for the bootmem map, avoiding the kernel image | 764 | /* Find a free area for the bootmem map, avoiding the kernel image |
810 | * and the initial ramdisk. | 765 | * and the initial ramdisk. |
811 | */ | 766 | */ |
@@ -815,8 +770,8 @@ static unsigned long __init choose_bootmap_pfn(unsigned long start_pfn, | |||
815 | unsigned long avoid_start, avoid_end, bootmap_size; | 770 | unsigned long avoid_start, avoid_end, bootmap_size; |
816 | int i; | 771 | int i; |
817 | 772 | ||
818 | bootmap_size = ((end_pfn - start_pfn) + 7) / 8; | 773 | bootmap_size = bootmem_bootmap_pages(end_pfn - start_pfn); |
819 | bootmap_size = ALIGN(bootmap_size, sizeof(long)); | 774 | bootmap_size <<= PAGE_SHIFT; |
820 | 775 | ||
821 | avoid_start = avoid_end = 0; | 776 | avoid_start = avoid_end = 0; |
822 | #ifdef CONFIG_BLK_DEV_INITRD | 777 | #ifdef CONFIG_BLK_DEV_INITRD |
@@ -983,6 +938,20 @@ static void __init trim_pavail(unsigned long *cur_size_p, | |||
983 | } | 938 | } |
984 | } | 939 | } |
985 | 940 | ||
941 | /* About pages_avail, this is the value we will use to calculate | ||
942 | * the zholes_size[] argument given to free_area_init_node(). The | ||
943 | * page allocator uses this to calculate nr_kernel_pages, | ||
944 | * nr_all_pages and zone->present_pages. On NUMA it is used | ||
945 | * to calculate zone->min_unmapped_pages and zone->min_slab_pages. | ||
946 | * | ||
947 | * So this number should really be set to what the page allocator | ||
948 | * actually ends up with. This means: | ||
949 | * 1) It should include bootmem map pages, we'll release those. | ||
950 | * 2) It should not include the kernel image, except for the | ||
951 | * __init sections which we will also release. | ||
952 | * 3) It should include the initrd image, since we'll release | ||
953 | * that too. | ||
954 | */ | ||
986 | static unsigned long __init bootmem_init(unsigned long *pages_avail, | 955 | static unsigned long __init bootmem_init(unsigned long *pages_avail, |
987 | unsigned long phys_base) | 956 | unsigned long phys_base) |
988 | { | 957 | { |
@@ -1069,7 +1038,6 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail, | |||
1069 | initrd_start, initrd_end); | 1038 | initrd_start, initrd_end); |
1070 | #endif | 1039 | #endif |
1071 | reserve_bootmem(initrd_start, size); | 1040 | reserve_bootmem(initrd_start, size); |
1072 | *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
1073 | 1041 | ||
1074 | initrd_start += PAGE_OFFSET; | 1042 | initrd_start += PAGE_OFFSET; |
1075 | initrd_end += PAGE_OFFSET; | 1043 | initrd_end += PAGE_OFFSET; |
@@ -1082,6 +1050,11 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail, | |||
1082 | reserve_bootmem(kern_base, kern_size); | 1050 | reserve_bootmem(kern_base, kern_size); |
1083 | *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT; | 1051 | *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT; |
1084 | 1052 | ||
1053 | /* Add back in the initmem pages. */ | ||
1054 | size = ((unsigned long)(__init_end) & PAGE_MASK) - | ||
1055 | PAGE_ALIGN((unsigned long)__init_begin); | ||
1056 | *pages_avail += size >> PAGE_SHIFT; | ||
1057 | |||
1085 | /* Reserve the bootmem map. We do not account for it | 1058 | /* Reserve the bootmem map. We do not account for it |
1086 | * in pages_avail because we will release that memory | 1059 | * in pages_avail because we will release that memory |
1087 | * in free_all_bootmem. | 1060 | * in free_all_bootmem. |
@@ -1092,7 +1065,6 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail, | |||
1092 | (bootmap_pfn << PAGE_SHIFT), size); | 1065 | (bootmap_pfn << PAGE_SHIFT), size); |
1093 | #endif | 1066 | #endif |
1094 | reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size); | 1067 | reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size); |
1095 | *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
1096 | 1068 | ||
1097 | for (i = 0; i < pavail_ents; i++) { | 1069 | for (i = 0; i < pavail_ents; i++) { |
1098 | unsigned long start_pfn, end_pfn; | 1070 | unsigned long start_pfn, end_pfn; |
@@ -1584,6 +1556,10 @@ void __init mem_init(void) | |||
1584 | #ifdef CONFIG_DEBUG_BOOTMEM | 1556 | #ifdef CONFIG_DEBUG_BOOTMEM |
1585 | prom_printf("mem_init: Calling free_all_bootmem().\n"); | 1557 | prom_printf("mem_init: Calling free_all_bootmem().\n"); |
1586 | #endif | 1558 | #endif |
1559 | |||
1560 | /* We subtract one to account for the mem_map_zero page | ||
1561 | * allocated below. | ||
1562 | */ | ||
1587 | totalram_pages = num_physpages = free_all_bootmem() - 1; | 1563 | totalram_pages = num_physpages = free_all_bootmem() - 1; |
1588 | 1564 | ||
1589 | /* | 1565 | /* |
@@ -1883,62 +1859,6 @@ static unsigned long kern_large_tte(unsigned long paddr) | |||
1883 | return val | paddr; | 1859 | return val | paddr; |
1884 | } | 1860 | } |
1885 | 1861 | ||
1886 | /* | ||
1887 | * Translate PROM's mapping we capture at boot time into physical address. | ||
1888 | * The second parameter is only set from prom_callback() invocations. | ||
1889 | */ | ||
1890 | unsigned long prom_virt_to_phys(unsigned long promva, int *error) | ||
1891 | { | ||
1892 | unsigned long mask; | ||
1893 | int i; | ||
1894 | |||
1895 | mask = _PAGE_PADDR_4U; | ||
1896 | if (tlb_type == hypervisor) | ||
1897 | mask = _PAGE_PADDR_4V; | ||
1898 | |||
1899 | for (i = 0; i < prom_trans_ents; i++) { | ||
1900 | struct linux_prom_translation *p = &prom_trans[i]; | ||
1901 | |||
1902 | if (promva >= p->virt && | ||
1903 | promva < (p->virt + p->size)) { | ||
1904 | unsigned long base = p->data & mask; | ||
1905 | |||
1906 | if (error) | ||
1907 | *error = 0; | ||
1908 | return base + (promva & (8192 - 1)); | ||
1909 | } | ||
1910 | } | ||
1911 | if (error) | ||
1912 | *error = 1; | ||
1913 | return 0UL; | ||
1914 | } | ||
1915 | |||
1916 | /* XXX We should kill off this ugly thing at so me point. XXX */ | ||
1917 | unsigned long sun4u_get_pte(unsigned long addr) | ||
1918 | { | ||
1919 | pgd_t *pgdp; | ||
1920 | pud_t *pudp; | ||
1921 | pmd_t *pmdp; | ||
1922 | pte_t *ptep; | ||
1923 | unsigned long mask = _PAGE_PADDR_4U; | ||
1924 | |||
1925 | if (tlb_type == hypervisor) | ||
1926 | mask = _PAGE_PADDR_4V; | ||
1927 | |||
1928 | if (addr >= PAGE_OFFSET) | ||
1929 | return addr & mask; | ||
1930 | |||
1931 | if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS)) | ||
1932 | return prom_virt_to_phys(addr, NULL); | ||
1933 | |||
1934 | pgdp = pgd_offset_k(addr); | ||
1935 | pudp = pud_offset(pgdp, addr); | ||
1936 | pmdp = pmd_offset(pudp, addr); | ||
1937 | ptep = pte_offset_kernel(pmdp, addr); | ||
1938 | |||
1939 | return pte_val(*ptep) & mask; | ||
1940 | } | ||
1941 | |||
1942 | /* If not locked, zap it. */ | 1862 | /* If not locked, zap it. */ |
1943 | void __flush_tlb_all(void) | 1863 | void __flush_tlb_all(void) |
1944 | { | 1864 | { |
diff --git a/arch/sparc64/solaris/misc.c b/arch/sparc64/solaris/misc.c index 9fcaad6dd11f..542c808ec2c8 100644 --- a/arch/sparc64/solaris/misc.c +++ b/arch/sparc64/solaris/misc.c | |||
@@ -224,7 +224,8 @@ static char *serial(char *buffer, int sz) | |||
224 | 224 | ||
225 | *buffer = 0; | 225 | *buffer = 0; |
226 | if (dp) { | 226 | if (dp) { |
227 | char *val = of_get_property(dp, "system-board-serial#", &len); | 227 | const char *val = |
228 | of_get_property(dp, "system-board-serial#", &len); | ||
228 | 229 | ||
229 | if (val && len > 0) { | 230 | if (val && len > 0) { |
230 | if (len > sz) | 231 | if (len > sz) |
diff --git a/arch/xtensa/lib/Makefile b/arch/xtensa/lib/Makefile index ed935b58e8a4..6c4fdd86acd8 100644 --- a/arch/xtensa/lib/Makefile +++ b/arch/xtensa/lib/Makefile | |||
@@ -2,6 +2,6 @@ | |||
2 | # Makefile for Xtensa-specific library files. | 2 | # Makefile for Xtensa-specific library files. |
3 | # | 3 | # |
4 | 4 | ||
5 | lib-y += memcopy.o memset.o checksum.o strcasecmp.o \ | 5 | lib-y += memcopy.o memset.o checksum.o \ |
6 | usercopy.o strncpy_user.o strnlen_user.o | 6 | usercopy.o strncpy_user.o strnlen_user.o |
7 | lib-$(CONFIG_PCI) += pci-auto.o | 7 | lib-$(CONFIG_PCI) += pci-auto.o |
diff --git a/arch/xtensa/lib/strcasecmp.c b/arch/xtensa/lib/strcasecmp.c deleted file mode 100644 index 165b2d6effa5..000000000000 --- a/arch/xtensa/lib/strcasecmp.c +++ /dev/null | |||
@@ -1,32 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/xtensa/lib/strcasecmp.c | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General | ||
5 | * Public License. See the file "COPYING" in the main directory of | ||
6 | * this archive for more details. | ||
7 | * | ||
8 | * Copyright (C) 2002 Tensilica Inc. | ||
9 | */ | ||
10 | |||
11 | #include <linux/string.h> | ||
12 | |||
13 | |||
14 | /* We handle nothing here except the C locale. Since this is used in | ||
15 | only one place, on strings known to contain only 7 bit ASCII, this | ||
16 | is ok. */ | ||
17 | |||
18 | int strcasecmp(const char *a, const char *b) | ||
19 | { | ||
20 | int ca, cb; | ||
21 | |||
22 | do { | ||
23 | ca = *a++ & 0xff; | ||
24 | cb = *b++ & 0xff; | ||
25 | if (ca >= 'A' && ca <= 'Z') | ||
26 | ca += 'a' - 'A'; | ||
27 | if (cb >= 'A' && cb <= 'Z') | ||
28 | cb += 'a' - 'A'; | ||
29 | } while (ca == cb && ca != '\0'); | ||
30 | |||
31 | return ca - cb; | ||
32 | } | ||
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c index 4db2055cee31..001af7f7ddda 100644 --- a/drivers/mtd/maps/sun_uflash.c +++ b/drivers/mtd/maps/sun_uflash.c | |||
@@ -39,7 +39,7 @@ MODULE_VERSION("2.0"); | |||
39 | 39 | ||
40 | static LIST_HEAD(device_list); | 40 | static LIST_HEAD(device_list); |
41 | struct uflash_dev { | 41 | struct uflash_dev { |
42 | char *name; /* device name */ | 42 | const char *name; /* device name */ |
43 | struct map_info map; /* mtd map info */ | 43 | struct map_info map; /* mtd map info */ |
44 | struct mtd_info *mtd; /* mtd info */ | 44 | struct mtd_info *mtd; /* mtd info */ |
45 | }; | 45 | }; |
@@ -80,7 +80,7 @@ int uflash_devinit(struct linux_ebus_device *edev, struct device_node *dp) | |||
80 | 80 | ||
81 | up->name = of_get_property(dp, "model", NULL); | 81 | up->name = of_get_property(dp, "model", NULL); |
82 | if (up->name && 0 < strlen(up->name)) | 82 | if (up->name && 0 < strlen(up->name)) |
83 | up->map.name = up->name; | 83 | up->map.name = (char *)up->name; |
84 | 84 | ||
85 | up->map.phys = res->start; | 85 | up->map.phys = res->start; |
86 | 86 | ||
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index 9df1038ec6bb..5da73212ac91 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c | |||
@@ -64,11 +64,9 @@ | |||
64 | #include <asm/uaccess.h> | 64 | #include <asm/uaccess.h> |
65 | #include <asm/irq.h> | 65 | #include <asm/irq.h> |
66 | 66 | ||
67 | #ifdef __sparc__ | 67 | #ifdef CONFIG_SPARC |
68 | #include <asm/idprom.h> | 68 | #include <asm/idprom.h> |
69 | #include <asm/openprom.h> | 69 | #include <asm/prom.h> |
70 | #include <asm/oplib.h> | ||
71 | #include <asm/pbm.h> | ||
72 | #endif | 70 | #endif |
73 | 71 | ||
74 | #ifdef CONFIG_PPC_PMAC | 72 | #ifdef CONFIG_PPC_PMAC |
@@ -2846,7 +2844,7 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
2846 | return rc; | 2844 | return rc; |
2847 | } | 2845 | } |
2848 | 2846 | ||
2849 | #if (!defined(__sparc__) && !defined(CONFIG_PPC_PMAC)) | 2847 | #if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC)) |
2850 | /* Fetch MAC address from vital product data of PCI ROM. */ | 2848 | /* Fetch MAC address from vital product data of PCI ROM. */ |
2851 | static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr) | 2849 | static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr) |
2852 | { | 2850 | { |
@@ -2901,36 +2899,19 @@ static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr) | |||
2901 | 2899 | ||
2902 | static int __devinit gem_get_device_address(struct gem *gp) | 2900 | static int __devinit gem_get_device_address(struct gem *gp) |
2903 | { | 2901 | { |
2904 | #if defined(__sparc__) || defined(CONFIG_PPC_PMAC) | 2902 | #if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC) |
2905 | struct net_device *dev = gp->dev; | 2903 | struct net_device *dev = gp->dev; |
2906 | #endif | ||
2907 | |||
2908 | #if defined(__sparc__) | ||
2909 | struct pci_dev *pdev = gp->pdev; | ||
2910 | struct pcidev_cookie *pcp = pdev->sysdata; | ||
2911 | int use_idprom = 1; | ||
2912 | |||
2913 | if (pcp != NULL) { | ||
2914 | unsigned char *addr; | ||
2915 | int len; | ||
2916 | |||
2917 | addr = of_get_property(pcp->prom_node, "local-mac-address", | ||
2918 | &len); | ||
2919 | if (addr && len == 6) { | ||
2920 | use_idprom = 0; | ||
2921 | memcpy(dev->dev_addr, addr, 6); | ||
2922 | } | ||
2923 | } | ||
2924 | if (use_idprom) | ||
2925 | memcpy(dev->dev_addr, idprom->id_ethaddr, 6); | ||
2926 | #elif defined(CONFIG_PPC_PMAC) | ||
2927 | const unsigned char *addr; | 2904 | const unsigned char *addr; |
2928 | 2905 | ||
2929 | addr = get_property(gp->of_node, "local-mac-address", NULL); | 2906 | addr = get_property(gp->of_node, "local-mac-address", NULL); |
2930 | if (addr == NULL) { | 2907 | if (addr == NULL) { |
2908 | #ifdef CONFIG_SPARC | ||
2909 | addr = idprom->id_ethaddr; | ||
2910 | #else | ||
2931 | printk("\n"); | 2911 | printk("\n"); |
2932 | printk(KERN_ERR "%s: can't get mac-address\n", dev->name); | 2912 | printk(KERN_ERR "%s: can't get mac-address\n", dev->name); |
2933 | return -1; | 2913 | return -1; |
2914 | #endif | ||
2934 | } | 2915 | } |
2935 | memcpy(dev->dev_addr, addr, 6); | 2916 | memcpy(dev->dev_addr, addr, 6); |
2936 | #else | 2917 | #else |
@@ -3088,7 +3069,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev, | |||
3088 | /* On Apple, we want a reference to the Open Firmware device-tree | 3069 | /* On Apple, we want a reference to the Open Firmware device-tree |
3089 | * node. We use it for clock control. | 3070 | * node. We use it for clock control. |
3090 | */ | 3071 | */ |
3091 | #ifdef CONFIG_PPC_PMAC | 3072 | #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC) |
3092 | gp->of_node = pci_device_to_OF_node(pdev); | 3073 | gp->of_node = pci_device_to_OF_node(pdev); |
3093 | #endif | 3074 | #endif |
3094 | 3075 | ||
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h index a70067c85cc9..58cf87c5751e 100644 --- a/drivers/net/sungem.h +++ b/drivers/net/sungem.h | |||
@@ -1025,7 +1025,7 @@ struct gem { | |||
1025 | 1025 | ||
1026 | struct pci_dev *pdev; | 1026 | struct pci_dev *pdev; |
1027 | struct net_device *dev; | 1027 | struct net_device *dev; |
1028 | #ifdef CONFIG_PPC_PMAC | 1028 | #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC) |
1029 | struct device_node *of_node; | 1029 | struct device_node *of_node; |
1030 | #endif | 1030 | #endif |
1031 | }; | 1031 | }; |
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index 5304d7b94e5e..51c3fe2108a3 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c | |||
@@ -55,9 +55,6 @@ | |||
55 | 55 | ||
56 | #ifdef CONFIG_PCI | 56 | #ifdef CONFIG_PCI |
57 | #include <linux/pci.h> | 57 | #include <linux/pci.h> |
58 | #ifdef CONFIG_SPARC | ||
59 | #include <asm/pbm.h> | ||
60 | #endif | ||
61 | #endif | 58 | #endif |
62 | 59 | ||
63 | #include "sunhme.h" | 60 | #include "sunhme.h" |
@@ -2701,7 +2698,7 @@ static int __devinit happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe | |||
2701 | dev->dev_addr[i] = macaddr[i]; | 2698 | dev->dev_addr[i] = macaddr[i]; |
2702 | macaddr[5]++; | 2699 | macaddr[5]++; |
2703 | } else { | 2700 | } else { |
2704 | unsigned char *addr; | 2701 | const unsigned char *addr; |
2705 | int len; | 2702 | int len; |
2706 | 2703 | ||
2707 | addr = of_get_property(dp, "local-mac-address", &len); | 2704 | addr = of_get_property(dp, "local-mac-address", &len); |
@@ -2983,7 +2980,7 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev, | |||
2983 | { | 2980 | { |
2984 | struct quattro *qp = NULL; | 2981 | struct quattro *qp = NULL; |
2985 | #ifdef CONFIG_SPARC | 2982 | #ifdef CONFIG_SPARC |
2986 | struct pcidev_cookie *pcp; | 2983 | struct device_node *dp; |
2987 | #endif | 2984 | #endif |
2988 | struct happy_meal *hp; | 2985 | struct happy_meal *hp; |
2989 | struct net_device *dev; | 2986 | struct net_device *dev; |
@@ -2995,13 +2992,8 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev, | |||
2995 | 2992 | ||
2996 | /* Now make sure pci_dev cookie is there. */ | 2993 | /* Now make sure pci_dev cookie is there. */ |
2997 | #ifdef CONFIG_SPARC | 2994 | #ifdef CONFIG_SPARC |
2998 | pcp = pdev->sysdata; | 2995 | dp = pci_device_to_OF_node(pdev); |
2999 | if (pcp == NULL) { | 2996 | strcpy(prom_name, dp->name); |
3000 | printk(KERN_ERR "happymeal(PCI): Some PCI device info missing\n"); | ||
3001 | return -ENODEV; | ||
3002 | } | ||
3003 | |||
3004 | strcpy(prom_name, pcp->prom_node->name); | ||
3005 | #else | 2997 | #else |
3006 | if (is_quattro_p(pdev)) | 2998 | if (is_quattro_p(pdev)) |
3007 | strcpy(prom_name, "SUNW,qfe"); | 2999 | strcpy(prom_name, "SUNW,qfe"); |
@@ -3078,11 +3070,11 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev, | |||
3078 | macaddr[5]++; | 3070 | macaddr[5]++; |
3079 | } else { | 3071 | } else { |
3080 | #ifdef CONFIG_SPARC | 3072 | #ifdef CONFIG_SPARC |
3081 | unsigned char *addr; | 3073 | const unsigned char *addr; |
3082 | int len; | 3074 | int len; |
3083 | 3075 | ||
3084 | if (qfe_slot != -1 && | 3076 | if (qfe_slot != -1 && |
3085 | (addr = of_get_property(pcp->prom_node, | 3077 | (addr = of_get_property(dp, |
3086 | "local-mac-address", &len)) != NULL | 3078 | "local-mac-address", &len)) != NULL |
3087 | && len == 6) { | 3079 | && len == 6) { |
3088 | memcpy(dev->dev_addr, addr, 6); | 3080 | memcpy(dev->dev_addr, addr, 6); |
@@ -3102,7 +3094,7 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev, | |||
3102 | hp->tcvregs = (hpreg_base + 0x7000UL); | 3094 | hp->tcvregs = (hpreg_base + 0x7000UL); |
3103 | 3095 | ||
3104 | #ifdef CONFIG_SPARC | 3096 | #ifdef CONFIG_SPARC |
3105 | hp->hm_revision = of_getintprop_default(pcp->prom_node, "hm-rev", 0xff); | 3097 | hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff); |
3106 | if (hp->hm_revision == 0xff) { | 3098 | if (hp->hm_revision == 0xff) { |
3107 | unsigned char prev; | 3099 | unsigned char prev; |
3108 | 3100 | ||
@@ -3297,7 +3289,7 @@ static int __devinit hme_sbus_probe(struct of_device *dev, const struct of_devic | |||
3297 | { | 3289 | { |
3298 | struct sbus_dev *sdev = to_sbus_device(&dev->dev); | 3290 | struct sbus_dev *sdev = to_sbus_device(&dev->dev); |
3299 | struct device_node *dp = dev->node; | 3291 | struct device_node *dp = dev->node; |
3300 | char *model = of_get_property(dp, "model", NULL); | 3292 | const char *model = of_get_property(dp, "model", NULL); |
3301 | int is_qfe = (match->data != NULL); | 3293 | int is_qfe = (match->data != NULL); |
3302 | 3294 | ||
3303 | if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe")) | 3295 | if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe")) |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 38383e4e07a1..9488f49ea569 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -47,10 +47,9 @@ | |||
47 | #include <asm/byteorder.h> | 47 | #include <asm/byteorder.h> |
48 | #include <asm/uaccess.h> | 48 | #include <asm/uaccess.h> |
49 | 49 | ||
50 | #ifdef CONFIG_SPARC64 | 50 | #ifdef CONFIG_SPARC |
51 | #include <asm/idprom.h> | 51 | #include <asm/idprom.h> |
52 | #include <asm/oplib.h> | 52 | #include <asm/prom.h> |
53 | #include <asm/pbm.h> | ||
54 | #endif | 53 | #endif |
55 | 54 | ||
56 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | 55 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
@@ -10987,24 +10986,20 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
10987 | return err; | 10986 | return err; |
10988 | } | 10987 | } |
10989 | 10988 | ||
10990 | #ifdef CONFIG_SPARC64 | 10989 | #ifdef CONFIG_SPARC |
10991 | static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp) | 10990 | static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp) |
10992 | { | 10991 | { |
10993 | struct net_device *dev = tp->dev; | 10992 | struct net_device *dev = tp->dev; |
10994 | struct pci_dev *pdev = tp->pdev; | 10993 | struct pci_dev *pdev = tp->pdev; |
10995 | struct pcidev_cookie *pcp = pdev->sysdata; | 10994 | struct device_node *dp = pci_device_to_OF_node(pdev); |
10996 | 10995 | const unsigned char *addr; | |
10997 | if (pcp != NULL) { | 10996 | int len; |
10998 | unsigned char *addr; | 10997 | |
10999 | int len; | 10998 | addr = of_get_property(dp, "local-mac-address", &len); |
11000 | 10999 | if (addr && len == 6) { | |
11001 | addr = of_get_property(pcp->prom_node, "local-mac-address", | 11000 | memcpy(dev->dev_addr, addr, 6); |
11002 | &len); | 11001 | memcpy(dev->perm_addr, dev->dev_addr, 6); |
11003 | if (addr && len == 6) { | 11002 | return 0; |
11004 | memcpy(dev->dev_addr, addr, 6); | ||
11005 | memcpy(dev->perm_addr, dev->dev_addr, 6); | ||
11006 | return 0; | ||
11007 | } | ||
11008 | } | 11003 | } |
11009 | return -ENODEV; | 11004 | return -ENODEV; |
11010 | } | 11005 | } |
@@ -11025,7 +11020,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp) | |||
11025 | u32 hi, lo, mac_offset; | 11020 | u32 hi, lo, mac_offset; |
11026 | int addr_ok = 0; | 11021 | int addr_ok = 0; |
11027 | 11022 | ||
11028 | #ifdef CONFIG_SPARC64 | 11023 | #ifdef CONFIG_SPARC |
11029 | if (!tg3_get_macaddr_sparc(tp)) | 11024 | if (!tg3_get_macaddr_sparc(tp)) |
11030 | return 0; | 11025 | return 0; |
11031 | #endif | 11026 | #endif |
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index d19f8568440f..861729806dc1 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c | |||
@@ -63,7 +63,7 @@ MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number"); | |||
63 | 63 | ||
64 | /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ | 64 | /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ |
65 | #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \ | 65 | #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \ |
66 | || defined(__sparc__) || defined(__ia64__) \ | 66 | || defined(CONFIG_SPARC) || defined(__ia64__) \ |
67 | || defined(__sh__) || defined(__mips__) | 67 | || defined(__sh__) || defined(__mips__) |
68 | static int rx_copybreak = 1518; | 68 | static int rx_copybreak = 1518; |
69 | #else | 69 | #else |
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c index e40ddb869583..62143f92c231 100644 --- a/drivers/net/tulip/de4x5.c +++ b/drivers/net/tulip/de4x5.c | |||
@@ -1160,7 +1160,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) | |||
1160 | sprintf(lp->adapter_name,"%s (%s)", name, gendev->bus_id); | 1160 | sprintf(lp->adapter_name,"%s (%s)", name, gendev->bus_id); |
1161 | 1161 | ||
1162 | lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc); | 1162 | lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc); |
1163 | #if defined(__alpha__) || defined(__powerpc__) || defined(__sparc_v9__) || defined(DE4X5_DO_MEMCPY) | 1163 | #if defined(__alpha__) || defined(__powerpc__) || defined(CONFIG_SPARC) || defined(DE4X5_DO_MEMCPY) |
1164 | lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN; | 1164 | lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN; |
1165 | #endif | 1165 | #endif |
1166 | lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size, | 1166 | lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size, |
@@ -1175,7 +1175,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) | |||
1175 | ** Set up the RX descriptor ring (Intels) | 1175 | ** Set up the RX descriptor ring (Intels) |
1176 | ** Allocate contiguous receive buffers, long word aligned (Alphas) | 1176 | ** Allocate contiguous receive buffers, long word aligned (Alphas) |
1177 | */ | 1177 | */ |
1178 | #if !defined(__alpha__) && !defined(__powerpc__) && !defined(__sparc_v9__) && !defined(DE4X5_DO_MEMCPY) | 1178 | #if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY) |
1179 | for (i=0; i<NUM_RX_DESC; i++) { | 1179 | for (i=0; i<NUM_RX_DESC; i++) { |
1180 | lp->rx_ring[i].status = 0; | 1180 | lp->rx_ring[i].status = 0; |
1181 | lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ); | 1181 | lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ); |
@@ -1252,11 +1252,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) | |||
1252 | mii_get_phy(dev); | 1252 | mii_get_phy(dev); |
1253 | } | 1253 | } |
1254 | 1254 | ||
1255 | #ifndef __sparc_v9__ | ||
1256 | printk(" and requires IRQ%d (provided by %s).\n", dev->irq, | 1255 | printk(" and requires IRQ%d (provided by %s).\n", dev->irq, |
1257 | #else | ||
1258 | printk(" and requires IRQ%x (provided by %s).\n", dev->irq, | ||
1259 | #endif | ||
1260 | ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG")); | 1256 | ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG")); |
1261 | } | 1257 | } |
1262 | 1258 | ||
@@ -3627,7 +3623,7 @@ de4x5_alloc_rx_buff(struct net_device *dev, int index, int len) | |||
3627 | struct de4x5_private *lp = netdev_priv(dev); | 3623 | struct de4x5_private *lp = netdev_priv(dev); |
3628 | struct sk_buff *p; | 3624 | struct sk_buff *p; |
3629 | 3625 | ||
3630 | #if !defined(__alpha__) && !defined(__powerpc__) && !defined(__sparc_v9__) && !defined(DE4X5_DO_MEMCPY) | 3626 | #if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY) |
3631 | struct sk_buff *ret; | 3627 | struct sk_buff *ret; |
3632 | u_long i=0, tmp; | 3628 | u_long i=0, tmp; |
3633 | 3629 | ||
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index e3774a522372..e9bf526ec534 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c | |||
@@ -36,8 +36,8 @@ | |||
36 | #include <asm/unaligned.h> | 36 | #include <asm/unaligned.h> |
37 | #include <asm/uaccess.h> | 37 | #include <asm/uaccess.h> |
38 | 38 | ||
39 | #ifdef __sparc__ | 39 | #ifdef CONFIG_SPARC |
40 | #include <asm/pbm.h> | 40 | #include <asm/prom.h> |
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | static char version[] __devinitdata = | 43 | static char version[] __devinitdata = |
@@ -67,7 +67,7 @@ const char * const medianame[32] = { | |||
67 | 67 | ||
68 | /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ | 68 | /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ |
69 | #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \ | 69 | #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \ |
70 | || defined(__sparc__) || defined(__ia64__) \ | 70 | || defined(CONFIG_SPARC) || defined(__ia64__) \ |
71 | || defined(__sh__) || defined(__mips__) | 71 | || defined(__sh__) || defined(__mips__) |
72 | static int rx_copybreak = 1518; | 72 | static int rx_copybreak = 1518; |
73 | #else | 73 | #else |
@@ -91,7 +91,7 @@ static int rx_copybreak = 100; | |||
91 | static int csr0 = 0x01A00000 | 0xE000; | 91 | static int csr0 = 0x01A00000 | 0xE000; |
92 | #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__) | 92 | #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__) |
93 | static int csr0 = 0x01A00000 | 0x8000; | 93 | static int csr0 = 0x01A00000 | 0x8000; |
94 | #elif defined(__sparc__) || defined(__hppa__) | 94 | #elif defined(CONFIG_SPARC) || defined(__hppa__) |
95 | /* The UltraSparc PCI controllers will disconnect at every 64-byte | 95 | /* The UltraSparc PCI controllers will disconnect at every 64-byte |
96 | * crossing anyways so it makes no sense to tell Tulip to burst | 96 | * crossing anyways so it makes no sense to tell Tulip to burst |
97 | * any more than that. | 97 | * any more than that. |
@@ -1315,7 +1315,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, | |||
1315 | /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */ | 1315 | /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */ |
1316 | if (tulip_uli_dm_quirk(pdev)) { | 1316 | if (tulip_uli_dm_quirk(pdev)) { |
1317 | csr0 &= ~0x01f100ff; | 1317 | csr0 &= ~0x01f100ff; |
1318 | #if defined(__sparc__) | 1318 | #if defined(CONFIG_SPARC) |
1319 | csr0 = (csr0 & ~0xff00) | 0xe000; | 1319 | csr0 = (csr0 & ~0xff00) | 0xe000; |
1320 | #endif | 1320 | #endif |
1321 | } | 1321 | } |
@@ -1535,23 +1535,19 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, | |||
1535 | Many PCI BIOSes also incorrectly report the IRQ line, so we correct | 1535 | Many PCI BIOSes also incorrectly report the IRQ line, so we correct |
1536 | that here as well. */ | 1536 | that here as well. */ |
1537 | if (sum == 0 || sum == 6*0xff) { | 1537 | if (sum == 0 || sum == 6*0xff) { |
1538 | #if defined(__sparc__) | 1538 | #if defined(CONFIG_SPARC) |
1539 | struct pcidev_cookie *pcp = pdev->sysdata; | 1539 | struct device_node *dp = pci_device_to_OF_node(pdev); |
1540 | const unsigned char *addr; | ||
1541 | int len; | ||
1540 | #endif | 1542 | #endif |
1541 | eeprom_missing = 1; | 1543 | eeprom_missing = 1; |
1542 | for (i = 0; i < 5; i++) | 1544 | for (i = 0; i < 5; i++) |
1543 | dev->dev_addr[i] = last_phys_addr[i]; | 1545 | dev->dev_addr[i] = last_phys_addr[i]; |
1544 | dev->dev_addr[i] = last_phys_addr[i] + 1; | 1546 | dev->dev_addr[i] = last_phys_addr[i] + 1; |
1545 | #if defined(__sparc__) | 1547 | #if defined(CONFIG_SPARC) |
1546 | if (pcp) { | 1548 | addr = of_get_property(dp, "local-mac-address", &len); |
1547 | unsigned char *addr; | 1549 | if (addr && len == 6) |
1548 | int len; | 1550 | memcpy(dev->dev_addr, addr, 6); |
1549 | |||
1550 | addr = of_get_property(pcp->prom_node, | ||
1551 | "local-mac-address", &len); | ||
1552 | if (addr && len == 6) | ||
1553 | memcpy(dev->dev_addr, addr, 6); | ||
1554 | } | ||
1555 | #endif | 1551 | #endif |
1556 | #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */ | 1552 | #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */ |
1557 | if (last_irq) | 1553 | if (last_irq) |
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c index d74fa871de11..5b71ac78bca2 100644 --- a/drivers/net/tulip/winbond-840.c +++ b/drivers/net/tulip/winbond-840.c | |||
@@ -902,7 +902,7 @@ static void init_registers(struct net_device *dev) | |||
902 | } | 902 | } |
903 | #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__) | 903 | #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__) |
904 | i |= 0xE000; | 904 | i |= 0xE000; |
905 | #elif defined(__sparc__) || defined (CONFIG_PARISC) | 905 | #elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) |
906 | i |= 0x4800; | 906 | i |= 0x4800; |
907 | #else | 907 | #else |
908 | #warning Processor architecture undefined | 908 | #warning Processor architecture undefined |
diff --git a/drivers/net/tulip/xircom_tulip_cb.c b/drivers/net/tulip/xircom_tulip_cb.c index 696b3b8aac8e..f64172927377 100644 --- a/drivers/net/tulip/xircom_tulip_cb.c +++ b/drivers/net/tulip/xircom_tulip_cb.c | |||
@@ -65,7 +65,7 @@ static int rx_copybreak = 100; | |||
65 | static int csr0 = 0x01A00000 | 0xE000; | 65 | static int csr0 = 0x01A00000 | 0xE000; |
66 | #elif defined(__powerpc__) | 66 | #elif defined(__powerpc__) |
67 | static int csr0 = 0x01B00000 | 0x8000; | 67 | static int csr0 = 0x01B00000 | 0x8000; |
68 | #elif defined(__sparc__) | 68 | #elif defined(CONFIG_SPARC) |
69 | static int csr0 = 0x01B00080 | 0x8000; | 69 | static int csr0 = 0x01B00080 | 0x8000; |
70 | #elif defined(__i386__) | 70 | #elif defined(__i386__) |
71 | static int csr0 = 0x01A00000 | 0x8000; | 71 | static int csr0 = 0x01A00000 | 0x8000; |
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c index 2cea4f5d2084..f2be2ead8742 100644 --- a/drivers/sbus/char/envctrl.c +++ b/drivers/sbus/char/envctrl.c | |||
@@ -726,7 +726,7 @@ static struct miscdevice envctrl_dev = { | |||
726 | * Return: None. | 726 | * Return: None. |
727 | */ | 727 | */ |
728 | static void envctrl_set_mon(struct i2c_child_t *pchild, | 728 | static void envctrl_set_mon(struct i2c_child_t *pchild, |
729 | char *chnl_desc, | 729 | const char *chnl_desc, |
730 | int chnl_no) | 730 | int chnl_no) |
731 | { | 731 | { |
732 | /* Firmware only has temperature type. It does not distinguish | 732 | /* Firmware only has temperature type. It does not distinguish |
@@ -763,8 +763,8 @@ static void envctrl_set_mon(struct i2c_child_t *pchild, | |||
763 | static void envctrl_init_adc(struct i2c_child_t *pchild, struct device_node *dp) | 763 | static void envctrl_init_adc(struct i2c_child_t *pchild, struct device_node *dp) |
764 | { | 764 | { |
765 | int i = 0, len; | 765 | int i = 0, len; |
766 | char *pos; | 766 | const char *pos; |
767 | unsigned int *pval; | 767 | const unsigned int *pval; |
768 | 768 | ||
769 | /* Firmware describe channels into a stream separated by a '\0'. */ | 769 | /* Firmware describe channels into a stream separated by a '\0'. */ |
770 | pos = of_get_property(dp, "channels-description", &len); | 770 | pos = of_get_property(dp, "channels-description", &len); |
@@ -859,7 +859,7 @@ static void envctrl_init_i2c_child(struct linux_ebus_child *edev_child, | |||
859 | { | 859 | { |
860 | int len, i, tbls_size = 0; | 860 | int len, i, tbls_size = 0; |
861 | struct device_node *dp = edev_child->prom_node; | 861 | struct device_node *dp = edev_child->prom_node; |
862 | void *pval; | 862 | const void *pval; |
863 | 863 | ||
864 | /* Get device address. */ | 864 | /* Get device address. */ |
865 | pval = of_get_property(dp, "reg", &len); | 865 | pval = of_get_property(dp, "reg", &len); |
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c index 6e99507aeb12..262f01e68592 100644 --- a/drivers/sbus/char/flash.c +++ b/drivers/sbus/char/flash.c | |||
@@ -190,7 +190,7 @@ static int __init flash_init(void) | |||
190 | } | 190 | } |
191 | if (!sdev) { | 191 | if (!sdev) { |
192 | #ifdef CONFIG_PCI | 192 | #ifdef CONFIG_PCI |
193 | struct linux_prom_registers *ebus_regs; | 193 | const struct linux_prom_registers *ebus_regs; |
194 | 194 | ||
195 | for_each_ebus(ebus) { | 195 | for_each_ebus(ebus) { |
196 | for_each_ebusdev(edev, ebus) { | 196 | for_each_ebusdev(edev, ebus) { |
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c index 5041c9dfbe3b..fbfeb89a6f32 100644 --- a/drivers/sbus/char/openprom.c +++ b/drivers/sbus/char/openprom.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #include <asm/openpromio.h> | 44 | #include <asm/openpromio.h> |
45 | #ifdef CONFIG_PCI | 45 | #ifdef CONFIG_PCI |
46 | #include <linux/pci.h> | 46 | #include <linux/pci.h> |
47 | #include <asm/pbm.h> | ||
48 | #endif | 47 | #endif |
49 | 48 | ||
50 | MODULE_AUTHOR("Thomas K. Dyas (tdyas@noc.rutgers.edu) and Eddie C. Dost (ecd@skynet.be)"); | 49 | MODULE_AUTHOR("Thomas K. Dyas (tdyas@noc.rutgers.edu) and Eddie C. Dost (ecd@skynet.be)"); |
@@ -141,7 +140,7 @@ static int copyout(void __user *info, struct openpromio *opp, int len) | |||
141 | 140 | ||
142 | static int opromgetprop(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize) | 141 | static int opromgetprop(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize) |
143 | { | 142 | { |
144 | void *pval; | 143 | const void *pval; |
145 | int len; | 144 | int len; |
146 | 145 | ||
147 | if (!dp || | 146 | if (!dp || |
@@ -248,18 +247,17 @@ static int oprompci2node(void __user *argp, struct device_node *dp, struct openp | |||
248 | if (bufsize >= 2*sizeof(int)) { | 247 | if (bufsize >= 2*sizeof(int)) { |
249 | #ifdef CONFIG_PCI | 248 | #ifdef CONFIG_PCI |
250 | struct pci_dev *pdev; | 249 | struct pci_dev *pdev; |
251 | struct pcidev_cookie *pcp; | 250 | struct device_node *dp; |
251 | |||
252 | pdev = pci_get_bus_and_slot (((int *) op->oprom_array)[0], | 252 | pdev = pci_get_bus_and_slot (((int *) op->oprom_array)[0], |
253 | ((int *) op->oprom_array)[1]); | 253 | ((int *) op->oprom_array)[1]); |
254 | 254 | ||
255 | pcp = pdev->sysdata; | 255 | dp = pci_device_to_OF_node(pdev); |
256 | if (pcp != NULL) { | 256 | data->current_node = dp; |
257 | dp = pcp->prom_node; | 257 | *((int *)op->oprom_array) = dp->node; |
258 | data->current_node = dp; | 258 | op->oprom_size = sizeof(int); |
259 | *((int *)op->oprom_array) = dp->node; | 259 | err = copyout(argp, op, bufsize + sizeof(int)); |
260 | op->oprom_size = sizeof(int); | 260 | |
261 | err = copyout(argp, op, bufsize + sizeof(int)); | ||
262 | } | ||
263 | pci_dev_put(pdev); | 261 | pci_dev_put(pdev); |
264 | #endif | 262 | #endif |
265 | } | 263 | } |
@@ -410,7 +408,7 @@ static int opiocget(void __user *argp, DATA *data) | |||
410 | struct opiocdesc op; | 408 | struct opiocdesc op; |
411 | struct device_node *dp; | 409 | struct device_node *dp; |
412 | char *str; | 410 | char *str; |
413 | void *pval; | 411 | const void *pval; |
414 | int err, len; | 412 | int err, len; |
415 | 413 | ||
416 | if (copy_from_user(&op, argp, sizeof(op))) | 414 | if (copy_from_user(&op, argp, sizeof(op))) |
diff --git a/drivers/sbus/sbus.c b/drivers/sbus/sbus.c index 6349dd617f85..eee590a51d8a 100644 --- a/drivers/sbus/sbus.c +++ b/drivers/sbus/sbus.c | |||
@@ -35,7 +35,7 @@ struct sbus_bus *sbus_root; | |||
35 | static void __init fill_sbus_device(struct device_node *dp, struct sbus_dev *sdev) | 35 | static void __init fill_sbus_device(struct device_node *dp, struct sbus_dev *sdev) |
36 | { | 36 | { |
37 | unsigned long base; | 37 | unsigned long base; |
38 | void *pval; | 38 | const void *pval; |
39 | int len, err; | 39 | int len, err; |
40 | 40 | ||
41 | sdev->prom_node = dp->node; | 41 | sdev->prom_node = dp->node; |
@@ -86,7 +86,7 @@ static void __init fill_sbus_device(struct device_node *dp, struct sbus_dev *sde | |||
86 | 86 | ||
87 | static void __init sbus_bus_ranges_init(struct device_node *dp, struct sbus_bus *sbus) | 87 | static void __init sbus_bus_ranges_init(struct device_node *dp, struct sbus_bus *sbus) |
88 | { | 88 | { |
89 | void *pval; | 89 | const void *pval; |
90 | int len; | 90 | int len; |
91 | 91 | ||
92 | pval = of_get_property(dp, "ranges", &len); | 92 | pval = of_get_property(dp, "ranges", &len); |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 4cd280e86966..fcc4cb6c7f46 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -1763,9 +1763,15 @@ config SUN3X_ESP | |||
1763 | The ESP was an on-board SCSI controller used on Sun 3/80 | 1763 | The ESP was an on-board SCSI controller used on Sun 3/80 |
1764 | machines. Say Y here to compile in support for it. | 1764 | machines. Say Y here to compile in support for it. |
1765 | 1765 | ||
1766 | config SCSI_ESP_CORE | ||
1767 | tristate "ESP Scsi Driver Core" | ||
1768 | depends on SCSI | ||
1769 | select SCSI_SPI_ATTRS | ||
1770 | |||
1766 | config SCSI_SUNESP | 1771 | config SCSI_SUNESP |
1767 | tristate "Sparc ESP Scsi Driver" | 1772 | tristate "Sparc ESP Scsi Driver" |
1768 | depends on SBUS && SCSI | 1773 | depends on SBUS && SCSI |
1774 | select SCSI_ESP_CORE | ||
1769 | help | 1775 | help |
1770 | This is the driver for the Sun ESP SCSI host adapter. The ESP | 1776 | This is the driver for the Sun ESP SCSI host adapter. The ESP |
1771 | chipset is present in most SPARC SBUS-based computers. | 1777 | chipset is present in most SPARC SBUS-based computers. |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 79ecf4ebe6eb..70cff4c599d7 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
@@ -106,7 +106,8 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o | |||
106 | obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ | 106 | obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ |
107 | obj-$(CONFIG_MEGARAID_SAS) += megaraid/ | 107 | obj-$(CONFIG_MEGARAID_SAS) += megaraid/ |
108 | obj-$(CONFIG_SCSI_ACARD) += atp870u.o | 108 | obj-$(CONFIG_SCSI_ACARD) += atp870u.o |
109 | obj-$(CONFIG_SCSI_SUNESP) += esp.o | 109 | obj-$(CONFIG_SCSI_ESP_CORE) += esp_scsi.o |
110 | obj-$(CONFIG_SCSI_SUNESP) += sun_esp.o | ||
110 | obj-$(CONFIG_SCSI_GDTH) += gdth.o | 111 | obj-$(CONFIG_SCSI_GDTH) += gdth.o |
111 | obj-$(CONFIG_SCSI_INITIO) += initio.o | 112 | obj-$(CONFIG_SCSI_INITIO) += initio.o |
112 | obj-$(CONFIG_SCSI_INIA100) += a100u2w.o | 113 | obj-$(CONFIG_SCSI_INIA100) += a100u2w.o |
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c deleted file mode 100644 index 2c2fe80bc42a..000000000000 --- a/drivers/scsi/esp.c +++ /dev/null | |||
@@ -1,4394 +0,0 @@ | |||
1 | /* esp.c: ESP Sun SCSI driver. | ||
2 | * | ||
3 | * Copyright (C) 1995, 1998, 2006 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | /* TODO: | ||
7 | * | ||
8 | * 1) Maybe disable parity checking in config register one for SCSI1 | ||
9 | * targets. (Gilmore says parity error on the SBus can lock up | ||
10 | * old sun4c's) | ||
11 | * 2) Add support for DMA2 pipelining. | ||
12 | * 3) Add tagged queueing. | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/blkdev.h> | ||
21 | #include <linux/proc_fs.h> | ||
22 | #include <linux/stat.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/module.h> | ||
27 | |||
28 | #include "esp.h" | ||
29 | |||
30 | #include <asm/sbus.h> | ||
31 | #include <asm/dma.h> | ||
32 | #include <asm/system.h> | ||
33 | #include <asm/ptrace.h> | ||
34 | #include <asm/pgtable.h> | ||
35 | #include <asm/oplib.h> | ||
36 | #include <asm/io.h> | ||
37 | #include <asm/irq.h> | ||
38 | #ifndef __sparc_v9__ | ||
39 | #include <asm/machines.h> | ||
40 | #include <asm/idprom.h> | ||
41 | #endif | ||
42 | |||
43 | #include <scsi/scsi.h> | ||
44 | #include <scsi/scsi_cmnd.h> | ||
45 | #include <scsi/scsi_device.h> | ||
46 | #include <scsi/scsi_eh.h> | ||
47 | #include <scsi/scsi_host.h> | ||
48 | #include <scsi/scsi_tcq.h> | ||
49 | |||
50 | #define DRV_VERSION "1.101" | ||
51 | |||
52 | #define DEBUG_ESP | ||
53 | /* #define DEBUG_ESP_HME */ | ||
54 | /* #define DEBUG_ESP_DATA */ | ||
55 | /* #define DEBUG_ESP_QUEUE */ | ||
56 | /* #define DEBUG_ESP_DISCONNECT */ | ||
57 | /* #define DEBUG_ESP_STATUS */ | ||
58 | /* #define DEBUG_ESP_PHASES */ | ||
59 | /* #define DEBUG_ESP_WORKBUS */ | ||
60 | /* #define DEBUG_STATE_MACHINE */ | ||
61 | /* #define DEBUG_ESP_CMDS */ | ||
62 | /* #define DEBUG_ESP_IRQS */ | ||
63 | /* #define DEBUG_SDTR */ | ||
64 | /* #define DEBUG_ESP_SG */ | ||
65 | |||
66 | /* Use the following to sprinkle debugging messages in a way which | ||
67 | * suits you if combinations of the above become too verbose when | ||
68 | * trying to track down a specific problem. | ||
69 | */ | ||
70 | /* #define DEBUG_ESP_MISC */ | ||
71 | |||
72 | #if defined(DEBUG_ESP) | ||
73 | #define ESPLOG(foo) printk foo | ||
74 | #else | ||
75 | #define ESPLOG(foo) | ||
76 | #endif /* (DEBUG_ESP) */ | ||
77 | |||
78 | #if defined(DEBUG_ESP_HME) | ||
79 | #define ESPHME(foo) printk foo | ||
80 | #else | ||
81 | #define ESPHME(foo) | ||
82 | #endif | ||
83 | |||
84 | #if defined(DEBUG_ESP_DATA) | ||
85 | #define ESPDATA(foo) printk foo | ||
86 | #else | ||
87 | #define ESPDATA(foo) | ||
88 | #endif | ||
89 | |||
90 | #if defined(DEBUG_ESP_QUEUE) | ||
91 | #define ESPQUEUE(foo) printk foo | ||
92 | #else | ||
93 | #define ESPQUEUE(foo) | ||
94 | #endif | ||
95 | |||
96 | #if defined(DEBUG_ESP_DISCONNECT) | ||
97 | #define ESPDISC(foo) printk foo | ||
98 | #else | ||
99 | #define ESPDISC(foo) | ||
100 | #endif | ||
101 | |||
102 | #if defined(DEBUG_ESP_STATUS) | ||
103 | #define ESPSTAT(foo) printk foo | ||
104 | #else | ||
105 | #define ESPSTAT(foo) | ||
106 | #endif | ||
107 | |||
108 | #if defined(DEBUG_ESP_PHASES) | ||
109 | #define ESPPHASE(foo) printk foo | ||
110 | #else | ||
111 | #define ESPPHASE(foo) | ||
112 | #endif | ||
113 | |||
114 | #if defined(DEBUG_ESP_WORKBUS) | ||
115 | #define ESPBUS(foo) printk foo | ||
116 | #else | ||
117 | #define ESPBUS(foo) | ||
118 | #endif | ||
119 | |||
120 | #if defined(DEBUG_ESP_IRQS) | ||
121 | #define ESPIRQ(foo) printk foo | ||
122 | #else | ||
123 | #define ESPIRQ(foo) | ||
124 | #endif | ||
125 | |||
126 | #if defined(DEBUG_SDTR) | ||
127 | #define ESPSDTR(foo) printk foo | ||
128 | #else | ||
129 | #define ESPSDTR(foo) | ||
130 | #endif | ||
131 | |||
132 | #if defined(DEBUG_ESP_MISC) | ||
133 | #define ESPMISC(foo) printk foo | ||
134 | #else | ||
135 | #define ESPMISC(foo) | ||
136 | #endif | ||
137 | |||
138 | /* Command phase enumeration. */ | ||
139 | enum { | ||
140 | not_issued = 0x00, /* Still in the issue_SC queue. */ | ||
141 | |||
142 | /* Various forms of selecting a target. */ | ||
143 | #define in_slct_mask 0x10 | ||
144 | in_slct_norm = 0x10, /* ESP is arbitrating, normal selection */ | ||
145 | in_slct_stop = 0x11, /* ESP will select, then stop with IRQ */ | ||
146 | in_slct_msg = 0x12, /* select, then send a message */ | ||
147 | in_slct_tag = 0x13, /* select and send tagged queue msg */ | ||
148 | in_slct_sneg = 0x14, /* select and acquire sync capabilities */ | ||
149 | |||
150 | /* Any post selection activity. */ | ||
151 | #define in_phases_mask 0x20 | ||
152 | in_datain = 0x20, /* Data is transferring from the bus */ | ||
153 | in_dataout = 0x21, /* Data is transferring to the bus */ | ||
154 | in_data_done = 0x22, /* Last DMA data operation done (maybe) */ | ||
155 | in_msgin = 0x23, /* Eating message from target */ | ||
156 | in_msgincont = 0x24, /* Eating more msg bytes from target */ | ||
157 | in_msgindone = 0x25, /* Decide what to do with what we got */ | ||
158 | in_msgout = 0x26, /* Sending message to target */ | ||
159 | in_msgoutdone = 0x27, /* Done sending msg out */ | ||
160 | in_cmdbegin = 0x28, /* Sending cmd after abnormal selection */ | ||
161 | in_cmdend = 0x29, /* Done sending slow cmd */ | ||
162 | in_status = 0x2a, /* Was in status phase, finishing cmd */ | ||
163 | in_freeing = 0x2b, /* freeing the bus for cmd cmplt or disc */ | ||
164 | in_the_dark = 0x2c, /* Don't know what bus phase we are in */ | ||
165 | |||
166 | /* Special states, ie. not normal bus transitions... */ | ||
167 | #define in_spec_mask 0x80 | ||
168 | in_abortone = 0x80, /* Aborting one command currently */ | ||
169 | in_abortall = 0x81, /* Blowing away all commands we have */ | ||
170 | in_resetdev = 0x82, /* SCSI target reset in progress */ | ||
171 | in_resetbus = 0x83, /* SCSI bus reset in progress */ | ||
172 | in_tgterror = 0x84, /* Target did something stupid */ | ||
173 | }; | ||
174 | |||
175 | enum { | ||
176 | /* Zero has special meaning, see skipahead[12]. */ | ||
177 | /*0*/ do_never, | ||
178 | |||
179 | /*1*/ do_phase_determine, | ||
180 | /*2*/ do_reset_bus, | ||
181 | /*3*/ do_reset_complete, | ||
182 | /*4*/ do_work_bus, | ||
183 | /*5*/ do_intr_end | ||
184 | }; | ||
185 | |||
186 | /* Forward declarations. */ | ||
187 | static irqreturn_t esp_intr(int irq, void *dev_id); | ||
188 | |||
189 | /* Debugging routines */ | ||
190 | struct esp_cmdstrings { | ||
191 | u8 cmdchar; | ||
192 | char *text; | ||
193 | } esp_cmd_strings[] = { | ||
194 | /* Miscellaneous */ | ||
195 | { ESP_CMD_NULL, "ESP_NOP", }, | ||
196 | { ESP_CMD_FLUSH, "FIFO_FLUSH", }, | ||
197 | { ESP_CMD_RC, "RSTESP", }, | ||
198 | { ESP_CMD_RS, "RSTSCSI", }, | ||
199 | /* Disconnected State Group */ | ||
200 | { ESP_CMD_RSEL, "RESLCTSEQ", }, | ||
201 | { ESP_CMD_SEL, "SLCTNATN", }, | ||
202 | { ESP_CMD_SELA, "SLCTATN", }, | ||
203 | { ESP_CMD_SELAS, "SLCTATNSTOP", }, | ||
204 | { ESP_CMD_ESEL, "ENSLCTRESEL", }, | ||
205 | { ESP_CMD_DSEL, "DISSELRESEL", }, | ||
206 | { ESP_CMD_SA3, "SLCTATN3", }, | ||
207 | { ESP_CMD_RSEL3, "RESLCTSEQ", }, | ||
208 | /* Target State Group */ | ||
209 | { ESP_CMD_SMSG, "SNDMSG", }, | ||
210 | { ESP_CMD_SSTAT, "SNDSTATUS", }, | ||
211 | { ESP_CMD_SDATA, "SNDDATA", }, | ||
212 | { ESP_CMD_DSEQ, "DISCSEQ", }, | ||
213 | { ESP_CMD_TSEQ, "TERMSEQ", }, | ||
214 | { ESP_CMD_TCCSEQ, "TRGTCMDCOMPSEQ", }, | ||
215 | { ESP_CMD_DCNCT, "DISC", }, | ||
216 | { ESP_CMD_RMSG, "RCVMSG", }, | ||
217 | { ESP_CMD_RCMD, "RCVCMD", }, | ||
218 | { ESP_CMD_RDATA, "RCVDATA", }, | ||
219 | { ESP_CMD_RCSEQ, "RCVCMDSEQ", }, | ||
220 | /* Initiator State Group */ | ||
221 | { ESP_CMD_TI, "TRANSINFO", }, | ||
222 | { ESP_CMD_ICCSEQ, "INICMDSEQCOMP", }, | ||
223 | { ESP_CMD_MOK, "MSGACCEPTED", }, | ||
224 | { ESP_CMD_TPAD, "TPAD", }, | ||
225 | { ESP_CMD_SATN, "SATN", }, | ||
226 | { ESP_CMD_RATN, "RATN", }, | ||
227 | }; | ||
228 | #define NUM_ESP_COMMANDS ((sizeof(esp_cmd_strings)) / (sizeof(struct esp_cmdstrings))) | ||
229 | |||
230 | /* Print textual representation of an ESP command */ | ||
231 | static inline void esp_print_cmd(u8 espcmd) | ||
232 | { | ||
233 | u8 dma_bit = espcmd & ESP_CMD_DMA; | ||
234 | int i; | ||
235 | |||
236 | espcmd &= ~dma_bit; | ||
237 | for (i = 0; i < NUM_ESP_COMMANDS; i++) | ||
238 | if (esp_cmd_strings[i].cmdchar == espcmd) | ||
239 | break; | ||
240 | if (i == NUM_ESP_COMMANDS) | ||
241 | printk("ESP_Unknown"); | ||
242 | else | ||
243 | printk("%s%s", esp_cmd_strings[i].text, | ||
244 | ((dma_bit) ? "+DMA" : "")); | ||
245 | } | ||
246 | |||
247 | /* Print the status register's value */ | ||
248 | static inline void esp_print_statreg(u8 statreg) | ||
249 | { | ||
250 | u8 phase; | ||
251 | |||
252 | printk("STATUS<"); | ||
253 | phase = statreg & ESP_STAT_PMASK; | ||
254 | printk("%s,", (phase == ESP_DOP ? "DATA-OUT" : | ||
255 | (phase == ESP_DIP ? "DATA-IN" : | ||
256 | (phase == ESP_CMDP ? "COMMAND" : | ||
257 | (phase == ESP_STATP ? "STATUS" : | ||
258 | (phase == ESP_MOP ? "MSG-OUT" : | ||
259 | (phase == ESP_MIP ? "MSG_IN" : | ||
260 | "unknown"))))))); | ||
261 | if (statreg & ESP_STAT_TDONE) | ||
262 | printk("TRANS_DONE,"); | ||
263 | if (statreg & ESP_STAT_TCNT) | ||
264 | printk("TCOUNT_ZERO,"); | ||
265 | if (statreg & ESP_STAT_PERR) | ||
266 | printk("P_ERROR,"); | ||
267 | if (statreg & ESP_STAT_SPAM) | ||
268 | printk("SPAM,"); | ||
269 | if (statreg & ESP_STAT_INTR) | ||
270 | printk("IRQ,"); | ||
271 | printk(">"); | ||
272 | } | ||
273 | |||
274 | /* Print the interrupt register's value */ | ||
275 | static inline void esp_print_ireg(u8 intreg) | ||
276 | { | ||
277 | printk("INTREG< "); | ||
278 | if (intreg & ESP_INTR_S) | ||
279 | printk("SLCT_NATN "); | ||
280 | if (intreg & ESP_INTR_SATN) | ||
281 | printk("SLCT_ATN "); | ||
282 | if (intreg & ESP_INTR_RSEL) | ||
283 | printk("RSLCT "); | ||
284 | if (intreg & ESP_INTR_FDONE) | ||
285 | printk("FDONE "); | ||
286 | if (intreg & ESP_INTR_BSERV) | ||
287 | printk("BSERV "); | ||
288 | if (intreg & ESP_INTR_DC) | ||
289 | printk("DISCNCT "); | ||
290 | if (intreg & ESP_INTR_IC) | ||
291 | printk("ILL_CMD "); | ||
292 | if (intreg & ESP_INTR_SR) | ||
293 | printk("SCSI_BUS_RESET "); | ||
294 | printk(">"); | ||
295 | } | ||
296 | |||
297 | /* Print the sequence step registers contents */ | ||
298 | static inline void esp_print_seqreg(u8 stepreg) | ||
299 | { | ||
300 | stepreg &= ESP_STEP_VBITS; | ||
301 | printk("STEP<%s>", | ||
302 | (stepreg == ESP_STEP_ASEL ? "SLCT_ARB_CMPLT" : | ||
303 | (stepreg == ESP_STEP_SID ? "1BYTE_MSG_SENT" : | ||
304 | (stepreg == ESP_STEP_NCMD ? "NOT_IN_CMD_PHASE" : | ||
305 | (stepreg == ESP_STEP_PPC ? "CMD_BYTES_LOST" : | ||
306 | (stepreg == ESP_STEP_FINI4 ? "CMD_SENT_OK" : | ||
307 | "UNKNOWN")))))); | ||
308 | } | ||
309 | |||
310 | static char *phase_string(int phase) | ||
311 | { | ||
312 | switch (phase) { | ||
313 | case not_issued: | ||
314 | return "UNISSUED"; | ||
315 | case in_slct_norm: | ||
316 | return "SLCTNORM"; | ||
317 | case in_slct_stop: | ||
318 | return "SLCTSTOP"; | ||
319 | case in_slct_msg: | ||
320 | return "SLCTMSG"; | ||
321 | case in_slct_tag: | ||
322 | return "SLCTTAG"; | ||
323 | case in_slct_sneg: | ||
324 | return "SLCTSNEG"; | ||
325 | case in_datain: | ||
326 | return "DATAIN"; | ||
327 | case in_dataout: | ||
328 | return "DATAOUT"; | ||
329 | case in_data_done: | ||
330 | return "DATADONE"; | ||
331 | case in_msgin: | ||
332 | return "MSGIN"; | ||
333 | case in_msgincont: | ||
334 | return "MSGINCONT"; | ||
335 | case in_msgindone: | ||
336 | return "MSGINDONE"; | ||
337 | case in_msgout: | ||
338 | return "MSGOUT"; | ||
339 | case in_msgoutdone: | ||
340 | return "MSGOUTDONE"; | ||
341 | case in_cmdbegin: | ||
342 | return "CMDBEGIN"; | ||
343 | case in_cmdend: | ||
344 | return "CMDEND"; | ||
345 | case in_status: | ||
346 | return "STATUS"; | ||
347 | case in_freeing: | ||
348 | return "FREEING"; | ||
349 | case in_the_dark: | ||
350 | return "CLUELESS"; | ||
351 | case in_abortone: | ||
352 | return "ABORTONE"; | ||
353 | case in_abortall: | ||
354 | return "ABORTALL"; | ||
355 | case in_resetdev: | ||
356 | return "RESETDEV"; | ||
357 | case in_resetbus: | ||
358 | return "RESETBUS"; | ||
359 | case in_tgterror: | ||
360 | return "TGTERROR"; | ||
361 | default: | ||
362 | return "UNKNOWN"; | ||
363 | }; | ||
364 | } | ||
365 | |||
366 | #ifdef DEBUG_STATE_MACHINE | ||
367 | static inline void esp_advance_phase(struct scsi_cmnd *s, int newphase) | ||
368 | { | ||
369 | ESPLOG(("<%s>", phase_string(newphase))); | ||
370 | s->SCp.sent_command = s->SCp.phase; | ||
371 | s->SCp.phase = newphase; | ||
372 | } | ||
373 | #else | ||
374 | #define esp_advance_phase(__s, __newphase) \ | ||
375 | (__s)->SCp.sent_command = (__s)->SCp.phase; \ | ||
376 | (__s)->SCp.phase = (__newphase); | ||
377 | #endif | ||
378 | |||
379 | #ifdef DEBUG_ESP_CMDS | ||
380 | static inline void esp_cmd(struct esp *esp, u8 cmd) | ||
381 | { | ||
382 | esp->espcmdlog[esp->espcmdent] = cmd; | ||
383 | esp->espcmdent = (esp->espcmdent + 1) & 31; | ||
384 | sbus_writeb(cmd, esp->eregs + ESP_CMD); | ||
385 | } | ||
386 | #else | ||
387 | #define esp_cmd(__esp, __cmd) \ | ||
388 | sbus_writeb((__cmd), ((__esp)->eregs) + ESP_CMD) | ||
389 | #endif | ||
390 | |||
391 | #define ESP_INTSOFF(__dregs) \ | ||
392 | sbus_writel(sbus_readl((__dregs)+DMA_CSR)&~(DMA_INT_ENAB), (__dregs)+DMA_CSR) | ||
393 | #define ESP_INTSON(__dregs) \ | ||
394 | sbus_writel(sbus_readl((__dregs)+DMA_CSR)|DMA_INT_ENAB, (__dregs)+DMA_CSR) | ||
395 | #define ESP_IRQ_P(__dregs) \ | ||
396 | (sbus_readl((__dregs)+DMA_CSR) & (DMA_HNDL_INTR|DMA_HNDL_ERROR)) | ||
397 | |||
398 | /* How we use the various Linux SCSI data structures for operation. | ||
399 | * | ||
400 | * struct scsi_cmnd: | ||
401 | * | ||
402 | * We keep track of the synchronous capabilities of a target | ||
403 | * in the device member, using sync_min_period and | ||
404 | * sync_max_offset. These are the values we directly write | ||
405 | * into the ESP registers while running a command. If offset | ||
406 | * is zero the ESP will use asynchronous transfers. | ||
407 | * If the borken flag is set we assume we shouldn't even bother | ||
408 | * trying to negotiate for synchronous transfer as this target | ||
409 | * is really stupid. If we notice the target is dropping the | ||
410 | * bus, and we have been allowing it to disconnect, we clear | ||
411 | * the disconnect flag. | ||
412 | */ | ||
413 | |||
414 | |||
415 | /* Manipulation of the ESP command queues. Thanks to the aha152x driver | ||
416 | * and its author, Juergen E. Fischer, for the methods used here. | ||
417 | * Note that these are per-ESP queues, not global queues like | ||
418 | * the aha152x driver uses. | ||
419 | */ | ||
420 | static inline void append_SC(struct scsi_cmnd **SC, struct scsi_cmnd *new_SC) | ||
421 | { | ||
422 | struct scsi_cmnd *end; | ||
423 | |||
424 | new_SC->host_scribble = (unsigned char *) NULL; | ||
425 | if (!*SC) | ||
426 | *SC = new_SC; | ||
427 | else { | ||
428 | for (end=*SC;end->host_scribble;end=(struct scsi_cmnd *)end->host_scribble) | ||
429 | ; | ||
430 | end->host_scribble = (unsigned char *) new_SC; | ||
431 | } | ||
432 | } | ||
433 | |||
434 | static inline void prepend_SC(struct scsi_cmnd **SC, struct scsi_cmnd *new_SC) | ||
435 | { | ||
436 | new_SC->host_scribble = (unsigned char *) *SC; | ||
437 | *SC = new_SC; | ||
438 | } | ||
439 | |||
440 | static inline struct scsi_cmnd *remove_first_SC(struct scsi_cmnd **SC) | ||
441 | { | ||
442 | struct scsi_cmnd *ptr; | ||
443 | ptr = *SC; | ||
444 | if (ptr) | ||
445 | *SC = (struct scsi_cmnd *) (*SC)->host_scribble; | ||
446 | return ptr; | ||
447 | } | ||
448 | |||
449 | static inline struct scsi_cmnd *remove_SC(struct scsi_cmnd **SC, int target, int lun) | ||
450 | { | ||
451 | struct scsi_cmnd *ptr, *prev; | ||
452 | |||
453 | for (ptr = *SC, prev = NULL; | ||
454 | ptr && ((ptr->device->id != target) || (ptr->device->lun != lun)); | ||
455 | prev = ptr, ptr = (struct scsi_cmnd *) ptr->host_scribble) | ||
456 | ; | ||
457 | if (ptr) { | ||
458 | if (prev) | ||
459 | prev->host_scribble=ptr->host_scribble; | ||
460 | else | ||
461 | *SC=(struct scsi_cmnd *)ptr->host_scribble; | ||
462 | } | ||
463 | return ptr; | ||
464 | } | ||
465 | |||
466 | /* Resetting various pieces of the ESP scsi driver chipset/buses. */ | ||
467 | static void esp_reset_dma(struct esp *esp) | ||
468 | { | ||
469 | int can_do_burst16, can_do_burst32, can_do_burst64; | ||
470 | int can_do_sbus64; | ||
471 | u32 tmp; | ||
472 | |||
473 | can_do_burst16 = (esp->bursts & DMA_BURST16) != 0; | ||
474 | can_do_burst32 = (esp->bursts & DMA_BURST32) != 0; | ||
475 | can_do_burst64 = 0; | ||
476 | can_do_sbus64 = 0; | ||
477 | if (sbus_can_dma_64bit(esp->sdev)) | ||
478 | can_do_sbus64 = 1; | ||
479 | if (sbus_can_burst64(esp->sdev)) | ||
480 | can_do_burst64 = (esp->bursts & DMA_BURST64) != 0; | ||
481 | |||
482 | /* Punt the DVMA into a known state. */ | ||
483 | if (esp->dma->revision != dvmahme) { | ||
484 | tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
485 | sbus_writel(tmp | DMA_RST_SCSI, esp->dregs + DMA_CSR); | ||
486 | sbus_writel(tmp & ~DMA_RST_SCSI, esp->dregs + DMA_CSR); | ||
487 | } | ||
488 | switch (esp->dma->revision) { | ||
489 | case dvmahme: | ||
490 | /* This is the HME DVMA gate array. */ | ||
491 | |||
492 | sbus_writel(DMA_RESET_FAS366, esp->dregs + DMA_CSR); | ||
493 | sbus_writel(DMA_RST_SCSI, esp->dregs + DMA_CSR); | ||
494 | |||
495 | esp->prev_hme_dmacsr = (DMA_PARITY_OFF|DMA_2CLKS|DMA_SCSI_DISAB|DMA_INT_ENAB); | ||
496 | esp->prev_hme_dmacsr &= ~(DMA_ENABLE|DMA_ST_WRITE|DMA_BRST_SZ); | ||
497 | |||
498 | if (can_do_burst64) | ||
499 | esp->prev_hme_dmacsr |= DMA_BRST64; | ||
500 | else if (can_do_burst32) | ||
501 | esp->prev_hme_dmacsr |= DMA_BRST32; | ||
502 | |||
503 | if (can_do_sbus64) { | ||
504 | esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64; | ||
505 | sbus_set_sbus64(esp->sdev, esp->bursts); | ||
506 | } | ||
507 | |||
508 | /* This chip is horrible. */ | ||
509 | while (sbus_readl(esp->dregs + DMA_CSR) & DMA_PEND_READ) | ||
510 | udelay(1); | ||
511 | |||
512 | sbus_writel(0, esp->dregs + DMA_CSR); | ||
513 | sbus_writel(esp->prev_hme_dmacsr, esp->dregs + DMA_CSR); | ||
514 | |||
515 | /* This is necessary to avoid having the SCSI channel | ||
516 | * engine lock up on us. | ||
517 | */ | ||
518 | sbus_writel(0, esp->dregs + DMA_ADDR); | ||
519 | |||
520 | break; | ||
521 | case dvmarev2: | ||
522 | /* This is the gate array found in the sun4m | ||
523 | * NCR SBUS I/O subsystem. | ||
524 | */ | ||
525 | if (esp->erev != esp100) { | ||
526 | tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
527 | sbus_writel(tmp | DMA_3CLKS, esp->dregs + DMA_CSR); | ||
528 | } | ||
529 | break; | ||
530 | case dvmarev3: | ||
531 | tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
532 | tmp &= ~DMA_3CLKS; | ||
533 | tmp |= DMA_2CLKS; | ||
534 | if (can_do_burst32) { | ||
535 | tmp &= ~DMA_BRST_SZ; | ||
536 | tmp |= DMA_BRST32; | ||
537 | } | ||
538 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
539 | break; | ||
540 | case dvmaesc1: | ||
541 | /* This is the DMA unit found on SCSI/Ether cards. */ | ||
542 | tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
543 | tmp |= DMA_ADD_ENABLE; | ||
544 | tmp &= ~DMA_BCNT_ENAB; | ||
545 | if (!can_do_burst32 && can_do_burst16) { | ||
546 | tmp |= DMA_ESC_BURST; | ||
547 | } else { | ||
548 | tmp &= ~(DMA_ESC_BURST); | ||
549 | } | ||
550 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
551 | break; | ||
552 | default: | ||
553 | break; | ||
554 | }; | ||
555 | ESP_INTSON(esp->dregs); | ||
556 | } | ||
557 | |||
558 | /* Reset the ESP chip, _not_ the SCSI bus. */ | ||
559 | static void __init esp_reset_esp(struct esp *esp) | ||
560 | { | ||
561 | u8 family_code, version; | ||
562 | int i; | ||
563 | |||
564 | /* Now reset the ESP chip */ | ||
565 | esp_cmd(esp, ESP_CMD_RC); | ||
566 | esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); | ||
567 | esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); | ||
568 | |||
569 | /* Reload the configuration registers */ | ||
570 | sbus_writeb(esp->cfact, esp->eregs + ESP_CFACT); | ||
571 | esp->prev_stp = 0; | ||
572 | sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP); | ||
573 | esp->prev_soff = 0; | ||
574 | sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF); | ||
575 | sbus_writeb(esp->neg_defp, esp->eregs + ESP_TIMEO); | ||
576 | |||
577 | /* This is the only point at which it is reliable to read | ||
578 | * the ID-code for a fast ESP chip variants. | ||
579 | */ | ||
580 | esp->max_period = ((35 * esp->ccycle) / 1000); | ||
581 | if (esp->erev == fast) { | ||
582 | version = sbus_readb(esp->eregs + ESP_UID); | ||
583 | family_code = (version & 0xf8) >> 3; | ||
584 | if (family_code == 0x02) | ||
585 | esp->erev = fas236; | ||
586 | else if (family_code == 0x0a) | ||
587 | esp->erev = fashme; /* Version is usually '5'. */ | ||
588 | else | ||
589 | esp->erev = fas100a; | ||
590 | ESPMISC(("esp%d: FAST chip is %s (family=%d, version=%d)\n", | ||
591 | esp->esp_id, | ||
592 | (esp->erev == fas236) ? "fas236" : | ||
593 | ((esp->erev == fas100a) ? "fas100a" : | ||
594 | "fasHME"), family_code, (version & 7))); | ||
595 | |||
596 | esp->min_period = ((4 * esp->ccycle) / 1000); | ||
597 | } else { | ||
598 | esp->min_period = ((5 * esp->ccycle) / 1000); | ||
599 | } | ||
600 | esp->max_period = (esp->max_period + 3)>>2; | ||
601 | esp->min_period = (esp->min_period + 3)>>2; | ||
602 | |||
603 | sbus_writeb(esp->config1, esp->eregs + ESP_CFG1); | ||
604 | switch (esp->erev) { | ||
605 | case esp100: | ||
606 | /* nothing to do */ | ||
607 | break; | ||
608 | case esp100a: | ||
609 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
610 | break; | ||
611 | case esp236: | ||
612 | /* Slow 236 */ | ||
613 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
614 | esp->prev_cfg3 = esp->config3[0]; | ||
615 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
616 | break; | ||
617 | case fashme: | ||
618 | esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB); | ||
619 | /* fallthrough... */ | ||
620 | case fas236: | ||
621 | /* Fast 236 or HME */ | ||
622 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
623 | for (i = 0; i < 16; i++) { | ||
624 | if (esp->erev == fashme) { | ||
625 | u8 cfg3; | ||
626 | |||
627 | cfg3 = ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH; | ||
628 | if (esp->scsi_id >= 8) | ||
629 | cfg3 |= ESP_CONFIG3_IDBIT3; | ||
630 | esp->config3[i] |= cfg3; | ||
631 | } else { | ||
632 | esp->config3[i] |= ESP_CONFIG3_FCLK; | ||
633 | } | ||
634 | } | ||
635 | esp->prev_cfg3 = esp->config3[0]; | ||
636 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
637 | if (esp->erev == fashme) { | ||
638 | esp->radelay = 80; | ||
639 | } else { | ||
640 | if (esp->diff) | ||
641 | esp->radelay = 0; | ||
642 | else | ||
643 | esp->radelay = 96; | ||
644 | } | ||
645 | break; | ||
646 | case fas100a: | ||
647 | /* Fast 100a */ | ||
648 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
649 | for (i = 0; i < 16; i++) | ||
650 | esp->config3[i] |= ESP_CONFIG3_FCLOCK; | ||
651 | esp->prev_cfg3 = esp->config3[0]; | ||
652 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
653 | esp->radelay = 32; | ||
654 | break; | ||
655 | default: | ||
656 | panic("esp: what could it be... I wonder..."); | ||
657 | break; | ||
658 | }; | ||
659 | |||
660 | /* Eat any bitrot in the chip */ | ||
661 | sbus_readb(esp->eregs + ESP_INTRPT); | ||
662 | udelay(100); | ||
663 | } | ||
664 | |||
665 | /* This places the ESP into a known state at boot time. */ | ||
666 | static void __init esp_bootup_reset(struct esp *esp) | ||
667 | { | ||
668 | u8 tmp; | ||
669 | |||
670 | /* Reset the DMA */ | ||
671 | esp_reset_dma(esp); | ||
672 | |||
673 | /* Reset the ESP */ | ||
674 | esp_reset_esp(esp); | ||
675 | |||
676 | /* Reset the SCSI bus, but tell ESP not to generate an irq */ | ||
677 | tmp = sbus_readb(esp->eregs + ESP_CFG1); | ||
678 | tmp |= ESP_CONFIG1_SRRDISAB; | ||
679 | sbus_writeb(tmp, esp->eregs + ESP_CFG1); | ||
680 | |||
681 | esp_cmd(esp, ESP_CMD_RS); | ||
682 | udelay(400); | ||
683 | |||
684 | sbus_writeb(esp->config1, esp->eregs + ESP_CFG1); | ||
685 | |||
686 | /* Eat any bitrot in the chip and we are done... */ | ||
687 | sbus_readb(esp->eregs + ESP_INTRPT); | ||
688 | } | ||
689 | |||
690 | static int __init esp_find_dvma(struct esp *esp, struct sbus_dev *dma_sdev) | ||
691 | { | ||
692 | struct sbus_dev *sdev = esp->sdev; | ||
693 | struct sbus_dma *dma; | ||
694 | |||
695 | if (dma_sdev != NULL) { | ||
696 | for_each_dvma(dma) { | ||
697 | if (dma->sdev == dma_sdev) | ||
698 | break; | ||
699 | } | ||
700 | } else { | ||
701 | for_each_dvma(dma) { | ||
702 | /* If allocated already, can't use it. */ | ||
703 | if (dma->allocated) | ||
704 | continue; | ||
705 | |||
706 | if (dma->sdev == NULL) | ||
707 | break; | ||
708 | |||
709 | /* If bus + slot are the same and it has the | ||
710 | * correct OBP name, it's ours. | ||
711 | */ | ||
712 | if (sdev->bus == dma->sdev->bus && | ||
713 | sdev->slot == dma->sdev->slot && | ||
714 | (!strcmp(dma->sdev->prom_name, "dma") || | ||
715 | !strcmp(dma->sdev->prom_name, "espdma"))) | ||
716 | break; | ||
717 | } | ||
718 | } | ||
719 | |||
720 | /* If we don't know how to handle the dvma, | ||
721 | * do not use this device. | ||
722 | */ | ||
723 | if (dma == NULL) { | ||
724 | printk("Cannot find dvma for ESP%d's SCSI\n", esp->esp_id); | ||
725 | return -1; | ||
726 | } | ||
727 | if (dma->allocated) { | ||
728 | printk("esp%d: can't use my espdma\n", esp->esp_id); | ||
729 | return -1; | ||
730 | } | ||
731 | dma->allocated = 1; | ||
732 | esp->dma = dma; | ||
733 | esp->dregs = dma->regs; | ||
734 | |||
735 | return 0; | ||
736 | } | ||
737 | |||
738 | static int __init esp_map_regs(struct esp *esp, int hme) | ||
739 | { | ||
740 | struct sbus_dev *sdev = esp->sdev; | ||
741 | struct resource *res; | ||
742 | |||
743 | /* On HME, two reg sets exist, first is DVMA, | ||
744 | * second is ESP registers. | ||
745 | */ | ||
746 | if (hme) | ||
747 | res = &sdev->resource[1]; | ||
748 | else | ||
749 | res = &sdev->resource[0]; | ||
750 | |||
751 | esp->eregs = sbus_ioremap(res, 0, ESP_REG_SIZE, "ESP Registers"); | ||
752 | |||
753 | if (esp->eregs == 0) | ||
754 | return -1; | ||
755 | return 0; | ||
756 | } | ||
757 | |||
758 | static int __init esp_map_cmdarea(struct esp *esp) | ||
759 | { | ||
760 | struct sbus_dev *sdev = esp->sdev; | ||
761 | |||
762 | esp->esp_command = sbus_alloc_consistent(sdev, 16, | ||
763 | &esp->esp_command_dvma); | ||
764 | if (esp->esp_command == NULL || | ||
765 | esp->esp_command_dvma == 0) | ||
766 | return -1; | ||
767 | return 0; | ||
768 | } | ||
769 | |||
770 | static int __init esp_register_irq(struct esp *esp) | ||
771 | { | ||
772 | esp->ehost->irq = esp->irq = esp->sdev->irqs[0]; | ||
773 | |||
774 | /* We used to try various overly-clever things to | ||
775 | * reduce the interrupt processing overhead on | ||
776 | * sun4c/sun4m when multiple ESP's shared the | ||
777 | * same IRQ. It was too complex and messy to | ||
778 | * sanely maintain. | ||
779 | */ | ||
780 | if (request_irq(esp->ehost->irq, esp_intr, | ||
781 | IRQF_SHARED, "ESP SCSI", esp)) { | ||
782 | printk("esp%d: Cannot acquire irq line\n", | ||
783 | esp->esp_id); | ||
784 | return -1; | ||
785 | } | ||
786 | |||
787 | printk("esp%d: IRQ %d ", esp->esp_id, | ||
788 | esp->ehost->irq); | ||
789 | |||
790 | return 0; | ||
791 | } | ||
792 | |||
793 | static void __init esp_get_scsi_id(struct esp *esp) | ||
794 | { | ||
795 | struct sbus_dev *sdev = esp->sdev; | ||
796 | struct device_node *dp = sdev->ofdev.node; | ||
797 | |||
798 | esp->scsi_id = of_getintprop_default(dp, | ||
799 | "initiator-id", | ||
800 | -1); | ||
801 | if (esp->scsi_id == -1) | ||
802 | esp->scsi_id = of_getintprop_default(dp, | ||
803 | "scsi-initiator-id", | ||
804 | -1); | ||
805 | if (esp->scsi_id == -1) | ||
806 | esp->scsi_id = (sdev->bus == NULL) ? 7 : | ||
807 | of_getintprop_default(sdev->bus->ofdev.node, | ||
808 | "scsi-initiator-id", | ||
809 | 7); | ||
810 | esp->ehost->this_id = esp->scsi_id; | ||
811 | esp->scsi_id_mask = (1 << esp->scsi_id); | ||
812 | |||
813 | } | ||
814 | |||
815 | static void __init esp_get_clock_params(struct esp *esp) | ||
816 | { | ||
817 | struct sbus_dev *sdev = esp->sdev; | ||
818 | int prom_node = esp->prom_node; | ||
819 | int sbus_prom_node; | ||
820 | unsigned int fmhz; | ||
821 | u8 ccf; | ||
822 | |||
823 | if (sdev != NULL && sdev->bus != NULL) | ||
824 | sbus_prom_node = sdev->bus->prom_node; | ||
825 | else | ||
826 | sbus_prom_node = 0; | ||
827 | |||
828 | /* This is getting messy but it has to be done | ||
829 | * correctly or else you get weird behavior all | ||
830 | * over the place. We are trying to basically | ||
831 | * figure out three pieces of information. | ||
832 | * | ||
833 | * a) Clock Conversion Factor | ||
834 | * | ||
835 | * This is a representation of the input | ||
836 | * crystal clock frequency going into the | ||
837 | * ESP on this machine. Any operation whose | ||
838 | * timing is longer than 400ns depends on this | ||
839 | * value being correct. For example, you'll | ||
840 | * get blips for arbitration/selection during | ||
841 | * high load or with multiple targets if this | ||
842 | * is not set correctly. | ||
843 | * | ||
844 | * b) Selection Time-Out | ||
845 | * | ||
846 | * The ESP isn't very bright and will arbitrate | ||
847 | * for the bus and try to select a target | ||
848 | * forever if you let it. This value tells | ||
849 | * the ESP when it has taken too long to | ||
850 | * negotiate and that it should interrupt | ||
851 | * the CPU so we can see what happened. | ||
852 | * The value is computed as follows (from | ||
853 | * NCR/Symbios chip docs). | ||
854 | * | ||
855 | * (Time Out Period) * (Input Clock) | ||
856 | * STO = ---------------------------------- | ||
857 | * (8192) * (Clock Conversion Factor) | ||
858 | * | ||
859 | * You usually want the time out period to be | ||
860 | * around 250ms, I think we'll set it a little | ||
861 | * bit higher to account for fully loaded SCSI | ||
862 | * bus's and slow devices that don't respond so | ||
863 | * quickly to selection attempts. (yeah, I know | ||
864 | * this is out of spec. but there is a lot of | ||
865 | * buggy pieces of firmware out there so bite me) | ||
866 | * | ||
867 | * c) Imperical constants for synchronous offset | ||
868 | * and transfer period register values | ||
869 | * | ||
870 | * This entails the smallest and largest sync | ||
871 | * period we could ever handle on this ESP. | ||
872 | */ | ||
873 | |||
874 | fmhz = prom_getintdefault(prom_node, "clock-frequency", -1); | ||
875 | if (fmhz == -1) | ||
876 | fmhz = (!sbus_prom_node) ? 0 : | ||
877 | prom_getintdefault(sbus_prom_node, "clock-frequency", -1); | ||
878 | |||
879 | if (fmhz <= (5000000)) | ||
880 | ccf = 0; | ||
881 | else | ||
882 | ccf = (((5000000 - 1) + (fmhz))/(5000000)); | ||
883 | |||
884 | if (!ccf || ccf > 8) { | ||
885 | /* If we can't find anything reasonable, | ||
886 | * just assume 20MHZ. This is the clock | ||
887 | * frequency of the older sun4c's where I've | ||
888 | * been unable to find the clock-frequency | ||
889 | * PROM property. All other machines provide | ||
890 | * useful values it seems. | ||
891 | */ | ||
892 | ccf = ESP_CCF_F4; | ||
893 | fmhz = (20000000); | ||
894 | } | ||
895 | |||
896 | if (ccf == (ESP_CCF_F7 + 1)) | ||
897 | esp->cfact = ESP_CCF_F0; | ||
898 | else if (ccf == ESP_CCF_NEVER) | ||
899 | esp->cfact = ESP_CCF_F2; | ||
900 | else | ||
901 | esp->cfact = ccf; | ||
902 | esp->raw_cfact = ccf; | ||
903 | |||
904 | esp->cfreq = fmhz; | ||
905 | esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz); | ||
906 | esp->ctick = ESP_TICK(ccf, esp->ccycle); | ||
907 | esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf); | ||
908 | esp->sync_defp = SYNC_DEFP_SLOW; | ||
909 | |||
910 | printk("SCSI ID %d Clk %dMHz CCYC=%d CCF=%d TOut %d ", | ||
911 | esp->scsi_id, (fmhz / 1000000), | ||
912 | (int)esp->ccycle, (int)ccf, (int) esp->neg_defp); | ||
913 | } | ||
914 | |||
915 | static void __init esp_get_bursts(struct esp *esp, struct sbus_dev *dma) | ||
916 | { | ||
917 | struct sbus_dev *sdev = esp->sdev; | ||
918 | u8 bursts; | ||
919 | |||
920 | bursts = prom_getintdefault(esp->prom_node, "burst-sizes", 0xff); | ||
921 | |||
922 | if (dma) { | ||
923 | u8 tmp = prom_getintdefault(dma->prom_node, | ||
924 | "burst-sizes", 0xff); | ||
925 | if (tmp != 0xff) | ||
926 | bursts &= tmp; | ||
927 | } | ||
928 | |||
929 | if (sdev->bus) { | ||
930 | u8 tmp = prom_getintdefault(sdev->bus->prom_node, | ||
931 | "burst-sizes", 0xff); | ||
932 | if (tmp != 0xff) | ||
933 | bursts &= tmp; | ||
934 | } | ||
935 | |||
936 | if (bursts == 0xff || | ||
937 | (bursts & DMA_BURST16) == 0 || | ||
938 | (bursts & DMA_BURST32) == 0) | ||
939 | bursts = (DMA_BURST32 - 1); | ||
940 | |||
941 | esp->bursts = bursts; | ||
942 | } | ||
943 | |||
944 | static void __init esp_get_revision(struct esp *esp) | ||
945 | { | ||
946 | u8 tmp; | ||
947 | |||
948 | esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7)); | ||
949 | esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY); | ||
950 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
951 | |||
952 | tmp = sbus_readb(esp->eregs + ESP_CFG2); | ||
953 | tmp &= ~ESP_CONFIG2_MAGIC; | ||
954 | if (tmp != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) { | ||
955 | /* If what we write to cfg2 does not come back, cfg2 | ||
956 | * is not implemented, therefore this must be a plain | ||
957 | * esp100. | ||
958 | */ | ||
959 | esp->erev = esp100; | ||
960 | printk("NCR53C90(esp100)\n"); | ||
961 | } else { | ||
962 | esp->config2 = 0; | ||
963 | esp->prev_cfg3 = esp->config3[0] = 5; | ||
964 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
965 | sbus_writeb(0, esp->eregs + ESP_CFG3); | ||
966 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
967 | |||
968 | tmp = sbus_readb(esp->eregs + ESP_CFG3); | ||
969 | if (tmp != 5) { | ||
970 | /* The cfg2 register is implemented, however | ||
971 | * cfg3 is not, must be esp100a. | ||
972 | */ | ||
973 | esp->erev = esp100a; | ||
974 | printk("NCR53C90A(esp100a)\n"); | ||
975 | } else { | ||
976 | int target; | ||
977 | |||
978 | for (target = 0; target < 16; target++) | ||
979 | esp->config3[target] = 0; | ||
980 | esp->prev_cfg3 = 0; | ||
981 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
982 | |||
983 | /* All of cfg{1,2,3} implemented, must be one of | ||
984 | * the fas variants, figure out which one. | ||
985 | */ | ||
986 | if (esp->raw_cfact > ESP_CCF_F5) { | ||
987 | esp->erev = fast; | ||
988 | esp->sync_defp = SYNC_DEFP_FAST; | ||
989 | printk("NCR53C9XF(espfast)\n"); | ||
990 | } else { | ||
991 | esp->erev = esp236; | ||
992 | printk("NCR53C9x(esp236)\n"); | ||
993 | } | ||
994 | esp->config2 = 0; | ||
995 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
996 | } | ||
997 | } | ||
998 | } | ||
999 | |||
1000 | static void __init esp_init_swstate(struct esp *esp) | ||
1001 | { | ||
1002 | int i; | ||
1003 | |||
1004 | /* Command queues... */ | ||
1005 | esp->current_SC = NULL; | ||
1006 | esp->disconnected_SC = NULL; | ||
1007 | esp->issue_SC = NULL; | ||
1008 | |||
1009 | /* Target and current command state... */ | ||
1010 | esp->targets_present = 0; | ||
1011 | esp->resetting_bus = 0; | ||
1012 | esp->snip = 0; | ||
1013 | |||
1014 | init_waitqueue_head(&esp->reset_queue); | ||
1015 | |||
1016 | /* Debugging... */ | ||
1017 | for(i = 0; i < 32; i++) | ||
1018 | esp->espcmdlog[i] = 0; | ||
1019 | esp->espcmdent = 0; | ||
1020 | |||
1021 | /* MSG phase state... */ | ||
1022 | for(i = 0; i < 16; i++) { | ||
1023 | esp->cur_msgout[i] = 0; | ||
1024 | esp->cur_msgin[i] = 0; | ||
1025 | } | ||
1026 | esp->prevmsgout = esp->prevmsgin = 0; | ||
1027 | esp->msgout_len = esp->msgin_len = 0; | ||
1028 | |||
1029 | /* Clear the one behind caches to hold unmatchable values. */ | ||
1030 | esp->prev_soff = esp->prev_stp = esp->prev_cfg3 = 0xff; | ||
1031 | esp->prev_hme_dmacsr = 0xffffffff; | ||
1032 | } | ||
1033 | |||
1034 | static int __init detect_one_esp(struct scsi_host_template *tpnt, | ||
1035 | struct device *dev, | ||
1036 | struct sbus_dev *esp_dev, | ||
1037 | struct sbus_dev *espdma, | ||
1038 | struct sbus_bus *sbus, | ||
1039 | int hme) | ||
1040 | { | ||
1041 | static int instance; | ||
1042 | struct Scsi_Host *esp_host = scsi_host_alloc(tpnt, sizeof(struct esp)); | ||
1043 | struct esp *esp; | ||
1044 | |||
1045 | if (!esp_host) | ||
1046 | return -ENOMEM; | ||
1047 | |||
1048 | if (hme) | ||
1049 | esp_host->max_id = 16; | ||
1050 | esp = (struct esp *) esp_host->hostdata; | ||
1051 | esp->ehost = esp_host; | ||
1052 | esp->sdev = esp_dev; | ||
1053 | esp->esp_id = instance; | ||
1054 | esp->prom_node = esp_dev->prom_node; | ||
1055 | prom_getstring(esp->prom_node, "name", esp->prom_name, | ||
1056 | sizeof(esp->prom_name)); | ||
1057 | |||
1058 | if (esp_find_dvma(esp, espdma) < 0) | ||
1059 | goto fail_unlink; | ||
1060 | if (esp_map_regs(esp, hme) < 0) { | ||
1061 | printk("ESP registers unmappable"); | ||
1062 | goto fail_dvma_release; | ||
1063 | } | ||
1064 | if (esp_map_cmdarea(esp) < 0) { | ||
1065 | printk("ESP DVMA transport area unmappable"); | ||
1066 | goto fail_unmap_regs; | ||
1067 | } | ||
1068 | if (esp_register_irq(esp) < 0) | ||
1069 | goto fail_unmap_cmdarea; | ||
1070 | |||
1071 | esp_get_scsi_id(esp); | ||
1072 | |||
1073 | esp->diff = prom_getbool(esp->prom_node, "differential"); | ||
1074 | if (esp->diff) | ||
1075 | printk("Differential "); | ||
1076 | |||
1077 | esp_get_clock_params(esp); | ||
1078 | esp_get_bursts(esp, espdma); | ||
1079 | esp_get_revision(esp); | ||
1080 | esp_init_swstate(esp); | ||
1081 | |||
1082 | esp_bootup_reset(esp); | ||
1083 | |||
1084 | if (scsi_add_host(esp_host, dev)) | ||
1085 | goto fail_free_irq; | ||
1086 | |||
1087 | dev_set_drvdata(&esp_dev->ofdev.dev, esp); | ||
1088 | |||
1089 | scsi_scan_host(esp_host); | ||
1090 | instance++; | ||
1091 | |||
1092 | return 0; | ||
1093 | |||
1094 | fail_free_irq: | ||
1095 | free_irq(esp->ehost->irq, esp); | ||
1096 | |||
1097 | fail_unmap_cmdarea: | ||
1098 | sbus_free_consistent(esp->sdev, 16, | ||
1099 | (void *) esp->esp_command, | ||
1100 | esp->esp_command_dvma); | ||
1101 | |||
1102 | fail_unmap_regs: | ||
1103 | sbus_iounmap(esp->eregs, ESP_REG_SIZE); | ||
1104 | |||
1105 | fail_dvma_release: | ||
1106 | esp->dma->allocated = 0; | ||
1107 | |||
1108 | fail_unlink: | ||
1109 | scsi_host_put(esp_host); | ||
1110 | return -1; | ||
1111 | } | ||
1112 | |||
1113 | /* Detecting ESP chips on the machine. This is the simple and easy | ||
1114 | * version. | ||
1115 | */ | ||
1116 | static int __devexit esp_remove_common(struct esp *esp) | ||
1117 | { | ||
1118 | unsigned int irq = esp->ehost->irq; | ||
1119 | |||
1120 | scsi_remove_host(esp->ehost); | ||
1121 | |||
1122 | ESP_INTSOFF(esp->dregs); | ||
1123 | #if 0 | ||
1124 | esp_reset_dma(esp); | ||
1125 | esp_reset_esp(esp); | ||
1126 | #endif | ||
1127 | |||
1128 | free_irq(irq, esp); | ||
1129 | sbus_free_consistent(esp->sdev, 16, | ||
1130 | (void *) esp->esp_command, esp->esp_command_dvma); | ||
1131 | sbus_iounmap(esp->eregs, ESP_REG_SIZE); | ||
1132 | esp->dma->allocated = 0; | ||
1133 | |||
1134 | scsi_host_put(esp->ehost); | ||
1135 | |||
1136 | return 0; | ||
1137 | } | ||
1138 | |||
1139 | |||
1140 | #ifdef CONFIG_SUN4 | ||
1141 | |||
1142 | #include <asm/sun4paddr.h> | ||
1143 | |||
1144 | static struct sbus_dev sun4_esp_dev; | ||
1145 | |||
1146 | static int __init esp_sun4_probe(struct scsi_host_template *tpnt) | ||
1147 | { | ||
1148 | if (sun4_esp_physaddr) { | ||
1149 | memset(&sun4_esp_dev, 0, sizeof(sun4_esp_dev)); | ||
1150 | sun4_esp_dev.reg_addrs[0].phys_addr = sun4_esp_physaddr; | ||
1151 | sun4_esp_dev.irqs[0] = 4; | ||
1152 | sun4_esp_dev.resource[0].start = sun4_esp_physaddr; | ||
1153 | sun4_esp_dev.resource[0].end = | ||
1154 | sun4_esp_physaddr + ESP_REG_SIZE - 1; | ||
1155 | sun4_esp_dev.resource[0].flags = IORESOURCE_IO; | ||
1156 | |||
1157 | return detect_one_esp(tpnt, NULL, | ||
1158 | &sun4_esp_dev, NULL, NULL, 0); | ||
1159 | } | ||
1160 | return 0; | ||
1161 | } | ||
1162 | |||
1163 | static int __devexit esp_sun4_remove(void) | ||
1164 | { | ||
1165 | struct of_device *dev = &sun4_esp_dev.ofdev; | ||
1166 | struct esp *esp = dev_get_drvdata(&dev->dev); | ||
1167 | |||
1168 | return esp_remove_common(esp); | ||
1169 | } | ||
1170 | |||
1171 | #else /* !CONFIG_SUN4 */ | ||
1172 | |||
1173 | static int __devinit esp_sbus_probe(struct of_device *dev, const struct of_device_id *match) | ||
1174 | { | ||
1175 | struct sbus_dev *sdev = to_sbus_device(&dev->dev); | ||
1176 | struct device_node *dp = dev->node; | ||
1177 | struct sbus_dev *dma_sdev = NULL; | ||
1178 | int hme = 0; | ||
1179 | |||
1180 | if (dp->parent && | ||
1181 | (!strcmp(dp->parent->name, "espdma") || | ||
1182 | !strcmp(dp->parent->name, "dma"))) | ||
1183 | dma_sdev = sdev->parent; | ||
1184 | else if (!strcmp(dp->name, "SUNW,fas")) { | ||
1185 | dma_sdev = sdev; | ||
1186 | hme = 1; | ||
1187 | } | ||
1188 | |||
1189 | return detect_one_esp(match->data, &dev->dev, | ||
1190 | sdev, dma_sdev, sdev->bus, hme); | ||
1191 | } | ||
1192 | |||
1193 | static int __devexit esp_sbus_remove(struct of_device *dev) | ||
1194 | { | ||
1195 | struct esp *esp = dev_get_drvdata(&dev->dev); | ||
1196 | |||
1197 | return esp_remove_common(esp); | ||
1198 | } | ||
1199 | |||
1200 | #endif /* !CONFIG_SUN4 */ | ||
1201 | |||
1202 | /* The info function will return whatever useful | ||
1203 | * information the developer sees fit. If not provided, then | ||
1204 | * the name field will be used instead. | ||
1205 | */ | ||
1206 | static const char *esp_info(struct Scsi_Host *host) | ||
1207 | { | ||
1208 | struct esp *esp; | ||
1209 | |||
1210 | esp = (struct esp *) host->hostdata; | ||
1211 | switch (esp->erev) { | ||
1212 | case esp100: | ||
1213 | return "Sparc ESP100 (NCR53C90)"; | ||
1214 | case esp100a: | ||
1215 | return "Sparc ESP100A (NCR53C90A)"; | ||
1216 | case esp236: | ||
1217 | return "Sparc ESP236"; | ||
1218 | case fas236: | ||
1219 | return "Sparc ESP236-FAST"; | ||
1220 | case fashme: | ||
1221 | return "Sparc ESP366-HME"; | ||
1222 | case fas100a: | ||
1223 | return "Sparc ESP100A-FAST"; | ||
1224 | default: | ||
1225 | return "Bogon ESP revision"; | ||
1226 | }; | ||
1227 | } | ||
1228 | |||
1229 | /* From Wolfgang Stanglmeier's NCR scsi driver. */ | ||
1230 | struct info_str | ||
1231 | { | ||
1232 | char *buffer; | ||
1233 | int length; | ||
1234 | int offset; | ||
1235 | int pos; | ||
1236 | }; | ||
1237 | |||
1238 | static void copy_mem_info(struct info_str *info, char *data, int len) | ||
1239 | { | ||
1240 | if (info->pos + len > info->length) | ||
1241 | len = info->length - info->pos; | ||
1242 | |||
1243 | if (info->pos + len < info->offset) { | ||
1244 | info->pos += len; | ||
1245 | return; | ||
1246 | } | ||
1247 | if (info->pos < info->offset) { | ||
1248 | data += (info->offset - info->pos); | ||
1249 | len -= (info->offset - info->pos); | ||
1250 | } | ||
1251 | |||
1252 | if (len > 0) { | ||
1253 | memcpy(info->buffer + info->pos, data, len); | ||
1254 | info->pos += len; | ||
1255 | } | ||
1256 | } | ||
1257 | |||
1258 | static int copy_info(struct info_str *info, char *fmt, ...) | ||
1259 | { | ||
1260 | va_list args; | ||
1261 | char buf[81]; | ||
1262 | int len; | ||
1263 | |||
1264 | va_start(args, fmt); | ||
1265 | len = vsprintf(buf, fmt, args); | ||
1266 | va_end(args); | ||
1267 | |||
1268 | copy_mem_info(info, buf, len); | ||
1269 | return len; | ||
1270 | } | ||
1271 | |||
1272 | static int esp_host_info(struct esp *esp, char *ptr, off_t offset, int len) | ||
1273 | { | ||
1274 | struct scsi_device *sdev; | ||
1275 | struct info_str info; | ||
1276 | int i; | ||
1277 | |||
1278 | info.buffer = ptr; | ||
1279 | info.length = len; | ||
1280 | info.offset = offset; | ||
1281 | info.pos = 0; | ||
1282 | |||
1283 | copy_info(&info, "Sparc ESP Host Adapter:\n"); | ||
1284 | copy_info(&info, "\tPROM node\t\t%08x\n", (unsigned int) esp->prom_node); | ||
1285 | copy_info(&info, "\tPROM name\t\t%s\n", esp->prom_name); | ||
1286 | copy_info(&info, "\tESP Model\t\t"); | ||
1287 | switch (esp->erev) { | ||
1288 | case esp100: | ||
1289 | copy_info(&info, "ESP100\n"); | ||
1290 | break; | ||
1291 | case esp100a: | ||
1292 | copy_info(&info, "ESP100A\n"); | ||
1293 | break; | ||
1294 | case esp236: | ||
1295 | copy_info(&info, "ESP236\n"); | ||
1296 | break; | ||
1297 | case fas236: | ||
1298 | copy_info(&info, "FAS236\n"); | ||
1299 | break; | ||
1300 | case fas100a: | ||
1301 | copy_info(&info, "FAS100A\n"); | ||
1302 | break; | ||
1303 | case fast: | ||
1304 | copy_info(&info, "FAST\n"); | ||
1305 | break; | ||
1306 | case fashme: | ||
1307 | copy_info(&info, "Happy Meal FAS\n"); | ||
1308 | break; | ||
1309 | case espunknown: | ||
1310 | default: | ||
1311 | copy_info(&info, "Unknown!\n"); | ||
1312 | break; | ||
1313 | }; | ||
1314 | copy_info(&info, "\tDMA Revision\t\t"); | ||
1315 | switch (esp->dma->revision) { | ||
1316 | case dvmarev0: | ||
1317 | copy_info(&info, "Rev 0\n"); | ||
1318 | break; | ||
1319 | case dvmaesc1: | ||
1320 | copy_info(&info, "ESC Rev 1\n"); | ||
1321 | break; | ||
1322 | case dvmarev1: | ||
1323 | copy_info(&info, "Rev 1\n"); | ||
1324 | break; | ||
1325 | case dvmarev2: | ||
1326 | copy_info(&info, "Rev 2\n"); | ||
1327 | break; | ||
1328 | case dvmarev3: | ||
1329 | copy_info(&info, "Rev 3\n"); | ||
1330 | break; | ||
1331 | case dvmarevplus: | ||
1332 | copy_info(&info, "Rev 1+\n"); | ||
1333 | break; | ||
1334 | case dvmahme: | ||
1335 | copy_info(&info, "Rev HME/FAS\n"); | ||
1336 | break; | ||
1337 | default: | ||
1338 | copy_info(&info, "Unknown!\n"); | ||
1339 | break; | ||
1340 | }; | ||
1341 | copy_info(&info, "\tLive Targets\t\t[ "); | ||
1342 | for (i = 0; i < 15; i++) { | ||
1343 | if (esp->targets_present & (1 << i)) | ||
1344 | copy_info(&info, "%d ", i); | ||
1345 | } | ||
1346 | copy_info(&info, "]\n\n"); | ||
1347 | |||
1348 | /* Now describe the state of each existing target. */ | ||
1349 | copy_info(&info, "Target #\tconfig3\t\tSync Capabilities\tDisconnect\tWide\n"); | ||
1350 | |||
1351 | shost_for_each_device(sdev, esp->ehost) { | ||
1352 | struct esp_device *esp_dev = sdev->hostdata; | ||
1353 | uint id = sdev->id; | ||
1354 | |||
1355 | if (!(esp->targets_present & (1 << id))) | ||
1356 | continue; | ||
1357 | |||
1358 | copy_info(&info, "%d\t\t", id); | ||
1359 | copy_info(&info, "%08lx\t", esp->config3[id]); | ||
1360 | copy_info(&info, "[%02lx,%02lx]\t\t\t", | ||
1361 | esp_dev->sync_max_offset, | ||
1362 | esp_dev->sync_min_period); | ||
1363 | copy_info(&info, "%s\t\t", | ||
1364 | esp_dev->disconnect ? "yes" : "no"); | ||
1365 | copy_info(&info, "%s\n", | ||
1366 | (esp->config3[id] & ESP_CONFIG3_EWIDE) ? "yes" : "no"); | ||
1367 | } | ||
1368 | return info.pos > info.offset? info.pos - info.offset : 0; | ||
1369 | } | ||
1370 | |||
1371 | /* ESP proc filesystem code. */ | ||
1372 | static int esp_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, | ||
1373 | int length, int inout) | ||
1374 | { | ||
1375 | struct esp *esp = (struct esp *) host->hostdata; | ||
1376 | |||
1377 | if (inout) | ||
1378 | return -EINVAL; /* not yet */ | ||
1379 | |||
1380 | if (start) | ||
1381 | *start = buffer; | ||
1382 | |||
1383 | return esp_host_info(esp, buffer, offset, length); | ||
1384 | } | ||
1385 | |||
1386 | static void esp_get_dmabufs(struct esp *esp, struct scsi_cmnd *sp) | ||
1387 | { | ||
1388 | if (sp->use_sg == 0) { | ||
1389 | sp->SCp.this_residual = sp->request_bufflen; | ||
1390 | sp->SCp.buffer = (struct scatterlist *) sp->request_buffer; | ||
1391 | sp->SCp.buffers_residual = 0; | ||
1392 | if (sp->request_bufflen) { | ||
1393 | sp->SCp.have_data_in = sbus_map_single(esp->sdev, sp->SCp.buffer, | ||
1394 | sp->SCp.this_residual, | ||
1395 | sp->sc_data_direction); | ||
1396 | sp->SCp.ptr = (char *) ((unsigned long)sp->SCp.have_data_in); | ||
1397 | } else { | ||
1398 | sp->SCp.ptr = NULL; | ||
1399 | } | ||
1400 | } else { | ||
1401 | sp->SCp.buffer = (struct scatterlist *) sp->request_buffer; | ||
1402 | sp->SCp.buffers_residual = sbus_map_sg(esp->sdev, | ||
1403 | sp->SCp.buffer, | ||
1404 | sp->use_sg, | ||
1405 | sp->sc_data_direction); | ||
1406 | sp->SCp.this_residual = sg_dma_len(sp->SCp.buffer); | ||
1407 | sp->SCp.ptr = (char *) ((unsigned long)sg_dma_address(sp->SCp.buffer)); | ||
1408 | } | ||
1409 | } | ||
1410 | |||
1411 | static void esp_release_dmabufs(struct esp *esp, struct scsi_cmnd *sp) | ||
1412 | { | ||
1413 | if (sp->use_sg) { | ||
1414 | sbus_unmap_sg(esp->sdev, sp->request_buffer, sp->use_sg, | ||
1415 | sp->sc_data_direction); | ||
1416 | } else if (sp->request_bufflen) { | ||
1417 | sbus_unmap_single(esp->sdev, | ||
1418 | sp->SCp.have_data_in, | ||
1419 | sp->request_bufflen, | ||
1420 | sp->sc_data_direction); | ||
1421 | } | ||
1422 | } | ||
1423 | |||
1424 | static void esp_restore_pointers(struct esp *esp, struct scsi_cmnd *sp) | ||
1425 | { | ||
1426 | struct esp_pointers *ep = &esp->data_pointers[sp->device->id]; | ||
1427 | |||
1428 | sp->SCp.ptr = ep->saved_ptr; | ||
1429 | sp->SCp.buffer = ep->saved_buffer; | ||
1430 | sp->SCp.this_residual = ep->saved_this_residual; | ||
1431 | sp->SCp.buffers_residual = ep->saved_buffers_residual; | ||
1432 | } | ||
1433 | |||
1434 | static void esp_save_pointers(struct esp *esp, struct scsi_cmnd *sp) | ||
1435 | { | ||
1436 | struct esp_pointers *ep = &esp->data_pointers[sp->device->id]; | ||
1437 | |||
1438 | ep->saved_ptr = sp->SCp.ptr; | ||
1439 | ep->saved_buffer = sp->SCp.buffer; | ||
1440 | ep->saved_this_residual = sp->SCp.this_residual; | ||
1441 | ep->saved_buffers_residual = sp->SCp.buffers_residual; | ||
1442 | } | ||
1443 | |||
1444 | /* Some rules: | ||
1445 | * | ||
1446 | * 1) Never ever panic while something is live on the bus. | ||
1447 | * If there is to be any chance of syncing the disks this | ||
1448 | * rule is to be obeyed. | ||
1449 | * | ||
1450 | * 2) Any target that causes a foul condition will no longer | ||
1451 | * have synchronous transfers done to it, no questions | ||
1452 | * asked. | ||
1453 | * | ||
1454 | * 3) Keep register accesses to a minimum. Think about some | ||
1455 | * day when we have Xbus machines this is running on and | ||
1456 | * the ESP chip is on the other end of the machine on a | ||
1457 | * different board from the cpu where this is running. | ||
1458 | */ | ||
1459 | |||
1460 | /* Fire off a command. We assume the bus is free and that the only | ||
1461 | * case where we could see an interrupt is where we have disconnected | ||
1462 | * commands active and they are trying to reselect us. | ||
1463 | */ | ||
1464 | static inline void esp_check_cmd(struct esp *esp, struct scsi_cmnd *sp) | ||
1465 | { | ||
1466 | switch (sp->cmd_len) { | ||
1467 | case 6: | ||
1468 | case 10: | ||
1469 | case 12: | ||
1470 | esp->esp_slowcmd = 0; | ||
1471 | break; | ||
1472 | |||
1473 | default: | ||
1474 | esp->esp_slowcmd = 1; | ||
1475 | esp->esp_scmdleft = sp->cmd_len; | ||
1476 | esp->esp_scmdp = &sp->cmnd[0]; | ||
1477 | break; | ||
1478 | }; | ||
1479 | } | ||
1480 | |||
1481 | static inline void build_sync_nego_msg(struct esp *esp, int period, int offset) | ||
1482 | { | ||
1483 | esp->cur_msgout[0] = EXTENDED_MESSAGE; | ||
1484 | esp->cur_msgout[1] = 3; | ||
1485 | esp->cur_msgout[2] = EXTENDED_SDTR; | ||
1486 | esp->cur_msgout[3] = period; | ||
1487 | esp->cur_msgout[4] = offset; | ||
1488 | esp->msgout_len = 5; | ||
1489 | } | ||
1490 | |||
1491 | /* SIZE is in bits, currently HME only supports 16 bit wide transfers. */ | ||
1492 | static inline void build_wide_nego_msg(struct esp *esp, int size) | ||
1493 | { | ||
1494 | esp->cur_msgout[0] = EXTENDED_MESSAGE; | ||
1495 | esp->cur_msgout[1] = 2; | ||
1496 | esp->cur_msgout[2] = EXTENDED_WDTR; | ||
1497 | switch (size) { | ||
1498 | case 32: | ||
1499 | esp->cur_msgout[3] = 2; | ||
1500 | break; | ||
1501 | case 16: | ||
1502 | esp->cur_msgout[3] = 1; | ||
1503 | break; | ||
1504 | case 8: | ||
1505 | default: | ||
1506 | esp->cur_msgout[3] = 0; | ||
1507 | break; | ||
1508 | }; | ||
1509 | |||
1510 | esp->msgout_len = 4; | ||
1511 | } | ||
1512 | |||
1513 | static void esp_exec_cmd(struct esp *esp) | ||
1514 | { | ||
1515 | struct scsi_cmnd *SCptr; | ||
1516 | struct scsi_device *SDptr; | ||
1517 | struct esp_device *esp_dev; | ||
1518 | volatile u8 *cmdp = esp->esp_command; | ||
1519 | u8 the_esp_command; | ||
1520 | int lun, target; | ||
1521 | int i; | ||
1522 | |||
1523 | /* Hold off if we have disconnected commands and | ||
1524 | * an IRQ is showing... | ||
1525 | */ | ||
1526 | if (esp->disconnected_SC && ESP_IRQ_P(esp->dregs)) | ||
1527 | return; | ||
1528 | |||
1529 | /* Grab first member of the issue queue. */ | ||
1530 | SCptr = esp->current_SC = remove_first_SC(&esp->issue_SC); | ||
1531 | |||
1532 | /* Safe to panic here because current_SC is null. */ | ||
1533 | if (!SCptr) | ||
1534 | panic("esp: esp_exec_cmd and issue queue is NULL"); | ||
1535 | |||
1536 | SDptr = SCptr->device; | ||
1537 | esp_dev = SDptr->hostdata; | ||
1538 | lun = SCptr->device->lun; | ||
1539 | target = SCptr->device->id; | ||
1540 | |||
1541 | esp->snip = 0; | ||
1542 | esp->msgout_len = 0; | ||
1543 | |||
1544 | /* Send it out whole, or piece by piece? The ESP | ||
1545 | * only knows how to automatically send out 6, 10, | ||
1546 | * and 12 byte commands. I used to think that the | ||
1547 | * Linux SCSI code would never throw anything other | ||
1548 | * than that to us, but then again there is the | ||
1549 | * SCSI generic driver which can send us anything. | ||
1550 | */ | ||
1551 | esp_check_cmd(esp, SCptr); | ||
1552 | |||
1553 | /* If arbitration/selection is successful, the ESP will leave | ||
1554 | * ATN asserted, causing the target to go into message out | ||
1555 | * phase. The ESP will feed the target the identify and then | ||
1556 | * the target can only legally go to one of command, | ||
1557 | * datain/out, status, or message in phase, or stay in message | ||
1558 | * out phase (should we be trying to send a sync negotiation | ||
1559 | * message after the identify). It is not allowed to drop | ||
1560 | * BSY, but some buggy targets do and we check for this | ||
1561 | * condition in the selection complete code. Most of the time | ||
1562 | * we'll make the command bytes available to the ESP and it | ||
1563 | * will not interrupt us until it finishes command phase, we | ||
1564 | * cannot do this for command sizes the ESP does not | ||
1565 | * understand and in this case we'll get interrupted right | ||
1566 | * when the target goes into command phase. | ||
1567 | * | ||
1568 | * It is absolutely _illegal_ in the presence of SCSI-2 devices | ||
1569 | * to use the ESP select w/o ATN command. When SCSI-2 devices are | ||
1570 | * present on the bus we _must_ always go straight to message out | ||
1571 | * phase with an identify message for the target. Being that | ||
1572 | * selection attempts in SCSI-1 w/o ATN was an option, doing SCSI-2 | ||
1573 | * selections should not confuse SCSI-1 we hope. | ||
1574 | */ | ||
1575 | |||
1576 | if (esp_dev->sync) { | ||
1577 | /* this targets sync is known */ | ||
1578 | #ifndef __sparc_v9__ | ||
1579 | do_sync_known: | ||
1580 | #endif | ||
1581 | if (esp_dev->disconnect) | ||
1582 | *cmdp++ = IDENTIFY(1, lun); | ||
1583 | else | ||
1584 | *cmdp++ = IDENTIFY(0, lun); | ||
1585 | |||
1586 | if (esp->esp_slowcmd) { | ||
1587 | the_esp_command = (ESP_CMD_SELAS | ESP_CMD_DMA); | ||
1588 | esp_advance_phase(SCptr, in_slct_stop); | ||
1589 | } else { | ||
1590 | the_esp_command = (ESP_CMD_SELA | ESP_CMD_DMA); | ||
1591 | esp_advance_phase(SCptr, in_slct_norm); | ||
1592 | } | ||
1593 | } else if (!(esp->targets_present & (1<<target)) || !(esp_dev->disconnect)) { | ||
1594 | /* After the bootup SCSI code sends both the | ||
1595 | * TEST_UNIT_READY and INQUIRY commands we want | ||
1596 | * to at least attempt allowing the device to | ||
1597 | * disconnect. | ||
1598 | */ | ||
1599 | ESPMISC(("esp: Selecting device for first time. target=%d " | ||
1600 | "lun=%d\n", target, SCptr->device->lun)); | ||
1601 | if (!SDptr->borken && !esp_dev->disconnect) | ||
1602 | esp_dev->disconnect = 1; | ||
1603 | |||
1604 | *cmdp++ = IDENTIFY(0, lun); | ||
1605 | esp->prevmsgout = NOP; | ||
1606 | esp_advance_phase(SCptr, in_slct_norm); | ||
1607 | the_esp_command = (ESP_CMD_SELA | ESP_CMD_DMA); | ||
1608 | |||
1609 | /* Take no chances... */ | ||
1610 | esp_dev->sync_max_offset = 0; | ||
1611 | esp_dev->sync_min_period = 0; | ||
1612 | } else { | ||
1613 | /* Sorry, I have had way too many problems with | ||
1614 | * various CDROM devices on ESP. -DaveM | ||
1615 | */ | ||
1616 | int cdrom_hwbug_wkaround = 0; | ||
1617 | |||
1618 | #ifndef __sparc_v9__ | ||
1619 | /* Never allow disconnects or synchronous transfers on | ||
1620 | * SparcStation1 and SparcStation1+. Allowing those | ||
1621 | * to be enabled seems to lockup the machine completely. | ||
1622 | */ | ||
1623 | if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) || | ||
1624 | (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) { | ||
1625 | /* But we are nice and allow tapes and removable | ||
1626 | * disks (but not CDROMs) to disconnect. | ||
1627 | */ | ||
1628 | if(SDptr->type == TYPE_TAPE || | ||
1629 | (SDptr->type != TYPE_ROM && SDptr->removable)) | ||
1630 | esp_dev->disconnect = 1; | ||
1631 | else | ||
1632 | esp_dev->disconnect = 0; | ||
1633 | esp_dev->sync_max_offset = 0; | ||
1634 | esp_dev->sync_min_period = 0; | ||
1635 | esp_dev->sync = 1; | ||
1636 | esp->snip = 0; | ||
1637 | goto do_sync_known; | ||
1638 | } | ||
1639 | #endif /* !(__sparc_v9__) */ | ||
1640 | |||
1641 | /* We've talked to this guy before, | ||
1642 | * but never negotiated. Let's try, | ||
1643 | * need to attempt WIDE first, before | ||
1644 | * sync nego, as per SCSI 2 standard. | ||
1645 | */ | ||
1646 | if (esp->erev == fashme && !esp_dev->wide) { | ||
1647 | if (!SDptr->borken && | ||
1648 | SDptr->type != TYPE_ROM && | ||
1649 | SDptr->removable == 0) { | ||
1650 | build_wide_nego_msg(esp, 16); | ||
1651 | esp_dev->wide = 1; | ||
1652 | esp->wnip = 1; | ||
1653 | goto after_nego_msg_built; | ||
1654 | } else { | ||
1655 | esp_dev->wide = 1; | ||
1656 | /* Fall through and try sync. */ | ||
1657 | } | ||
1658 | } | ||
1659 | |||
1660 | if (!SDptr->borken) { | ||
1661 | if ((SDptr->type == TYPE_ROM)) { | ||
1662 | /* Nice try sucker... */ | ||
1663 | ESPMISC(("esp%d: Disabling sync for buggy " | ||
1664 | "CDROM.\n", esp->esp_id)); | ||
1665 | cdrom_hwbug_wkaround = 1; | ||
1666 | build_sync_nego_msg(esp, 0, 0); | ||
1667 | } else if (SDptr->removable != 0) { | ||
1668 | ESPMISC(("esp%d: Not negotiating sync/wide but " | ||
1669 | "allowing disconnect for removable media.\n", | ||
1670 | esp->esp_id)); | ||
1671 | build_sync_nego_msg(esp, 0, 0); | ||
1672 | } else { | ||
1673 | build_sync_nego_msg(esp, esp->sync_defp, 15); | ||
1674 | } | ||
1675 | } else { | ||
1676 | build_sync_nego_msg(esp, 0, 0); | ||
1677 | } | ||
1678 | esp_dev->sync = 1; | ||
1679 | esp->snip = 1; | ||
1680 | |||
1681 | after_nego_msg_built: | ||
1682 | /* A fix for broken SCSI1 targets, when they disconnect | ||
1683 | * they lock up the bus and confuse ESP. So disallow | ||
1684 | * disconnects for SCSI1 targets for now until we | ||
1685 | * find a better fix. | ||
1686 | * | ||
1687 | * Addendum: This is funny, I figured out what was going | ||
1688 | * on. The blotzed SCSI1 target would disconnect, | ||
1689 | * one of the other SCSI2 targets or both would be | ||
1690 | * disconnected as well. The SCSI1 target would | ||
1691 | * stay disconnected long enough that we start | ||
1692 | * up a command on one of the SCSI2 targets. As | ||
1693 | * the ESP is arbitrating for the bus the SCSI1 | ||
1694 | * target begins to arbitrate as well to reselect | ||
1695 | * the ESP. The SCSI1 target refuses to drop it's | ||
1696 | * ID bit on the data bus even though the ESP is | ||
1697 | * at ID 7 and is the obvious winner for any | ||
1698 | * arbitration. The ESP is a poor sport and refuses | ||
1699 | * to lose arbitration, it will continue indefinitely | ||
1700 | * trying to arbitrate for the bus and can only be | ||
1701 | * stopped via a chip reset or SCSI bus reset. | ||
1702 | * Therefore _no_ disconnects for SCSI1 targets | ||
1703 | * thank you very much. ;-) | ||
1704 | */ | ||
1705 | if(((SDptr->scsi_level < 3) && | ||
1706 | (SDptr->type != TYPE_TAPE) && | ||
1707 | SDptr->removable == 0) || | ||
1708 | cdrom_hwbug_wkaround || SDptr->borken) { | ||
1709 | ESPMISC((KERN_INFO "esp%d: Disabling DISCONNECT for target %d " | ||
1710 | "lun %d\n", esp->esp_id, SCptr->device->id, SCptr->device->lun)); | ||
1711 | esp_dev->disconnect = 0; | ||
1712 | *cmdp++ = IDENTIFY(0, lun); | ||
1713 | } else { | ||
1714 | *cmdp++ = IDENTIFY(1, lun); | ||
1715 | } | ||
1716 | |||
1717 | /* ESP fifo is only so big... | ||
1718 | * Make this look like a slow command. | ||
1719 | */ | ||
1720 | esp->esp_slowcmd = 1; | ||
1721 | esp->esp_scmdleft = SCptr->cmd_len; | ||
1722 | esp->esp_scmdp = &SCptr->cmnd[0]; | ||
1723 | |||
1724 | the_esp_command = (ESP_CMD_SELAS | ESP_CMD_DMA); | ||
1725 | esp_advance_phase(SCptr, in_slct_msg); | ||
1726 | } | ||
1727 | |||
1728 | if (!esp->esp_slowcmd) | ||
1729 | for (i = 0; i < SCptr->cmd_len; i++) | ||
1730 | *cmdp++ = SCptr->cmnd[i]; | ||
1731 | |||
1732 | /* HME sucks... */ | ||
1733 | if (esp->erev == fashme) | ||
1734 | sbus_writeb((target & 0xf) | (ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT), | ||
1735 | esp->eregs + ESP_BUSID); | ||
1736 | else | ||
1737 | sbus_writeb(target & 7, esp->eregs + ESP_BUSID); | ||
1738 | if (esp->prev_soff != esp_dev->sync_max_offset || | ||
1739 | esp->prev_stp != esp_dev->sync_min_period || | ||
1740 | (esp->erev > esp100a && | ||
1741 | esp->prev_cfg3 != esp->config3[target])) { | ||
1742 | esp->prev_soff = esp_dev->sync_max_offset; | ||
1743 | esp->prev_stp = esp_dev->sync_min_period; | ||
1744 | sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF); | ||
1745 | sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP); | ||
1746 | if (esp->erev > esp100a) { | ||
1747 | esp->prev_cfg3 = esp->config3[target]; | ||
1748 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
1749 | } | ||
1750 | } | ||
1751 | i = (cmdp - esp->esp_command); | ||
1752 | |||
1753 | if (esp->erev == fashme) { | ||
1754 | esp_cmd(esp, ESP_CMD_FLUSH); /* Grrr! */ | ||
1755 | |||
1756 | /* Set up the DMA and HME counters */ | ||
1757 | sbus_writeb(i, esp->eregs + ESP_TCLOW); | ||
1758 | sbus_writeb(0, esp->eregs + ESP_TCMED); | ||
1759 | sbus_writeb(0, esp->eregs + FAS_RLO); | ||
1760 | sbus_writeb(0, esp->eregs + FAS_RHI); | ||
1761 | esp_cmd(esp, the_esp_command); | ||
1762 | |||
1763 | /* Talk about touchy hardware... */ | ||
1764 | esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr | | ||
1765 | (DMA_SCSI_DISAB | DMA_ENABLE)) & | ||
1766 | ~(DMA_ST_WRITE)); | ||
1767 | sbus_writel(16, esp->dregs + DMA_COUNT); | ||
1768 | sbus_writel(esp->esp_command_dvma, esp->dregs + DMA_ADDR); | ||
1769 | sbus_writel(esp->prev_hme_dmacsr, esp->dregs + DMA_CSR); | ||
1770 | } else { | ||
1771 | u32 tmp; | ||
1772 | |||
1773 | /* Set up the DMA and ESP counters */ | ||
1774 | sbus_writeb(i, esp->eregs + ESP_TCLOW); | ||
1775 | sbus_writeb(0, esp->eregs + ESP_TCMED); | ||
1776 | tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
1777 | tmp &= ~DMA_ST_WRITE; | ||
1778 | tmp |= DMA_ENABLE; | ||
1779 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
1780 | if (esp->dma->revision == dvmaesc1) { | ||
1781 | if (i) /* Workaround ESC gate array SBUS rerun bug. */ | ||
1782 | sbus_writel(PAGE_SIZE, esp->dregs + DMA_COUNT); | ||
1783 | } | ||
1784 | sbus_writel(esp->esp_command_dvma, esp->dregs + DMA_ADDR); | ||
1785 | |||
1786 | /* Tell ESP to "go". */ | ||
1787 | esp_cmd(esp, the_esp_command); | ||
1788 | } | ||
1789 | } | ||
1790 | |||
1791 | /* Queue a SCSI command delivered from the mid-level Linux SCSI code. */ | ||
1792 | static int esp_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | ||
1793 | { | ||
1794 | struct esp *esp; | ||
1795 | |||
1796 | /* Set up func ptr and initial driver cmd-phase. */ | ||
1797 | SCpnt->scsi_done = done; | ||
1798 | SCpnt->SCp.phase = not_issued; | ||
1799 | |||
1800 | /* We use the scratch area. */ | ||
1801 | ESPQUEUE(("esp_queue: target=%d lun=%d ", SCpnt->device->id, SCpnt->device->lun)); | ||
1802 | ESPDISC(("N<%02x,%02x>", SCpnt->device->id, SCpnt->device->lun)); | ||
1803 | |||
1804 | esp = (struct esp *) SCpnt->device->host->hostdata; | ||
1805 | esp_get_dmabufs(esp, SCpnt); | ||
1806 | esp_save_pointers(esp, SCpnt); /* FIXME for tag queueing */ | ||
1807 | |||
1808 | SCpnt->SCp.Status = CHECK_CONDITION; | ||
1809 | SCpnt->SCp.Message = 0xff; | ||
1810 | SCpnt->SCp.sent_command = 0; | ||
1811 | |||
1812 | /* Place into our queue. */ | ||
1813 | if (SCpnt->cmnd[0] == REQUEST_SENSE) { | ||
1814 | ESPQUEUE(("RQSENSE\n")); | ||
1815 | prepend_SC(&esp->issue_SC, SCpnt); | ||
1816 | } else { | ||
1817 | ESPQUEUE(("\n")); | ||
1818 | append_SC(&esp->issue_SC, SCpnt); | ||
1819 | } | ||
1820 | |||
1821 | /* Run it now if we can. */ | ||
1822 | if (!esp->current_SC && !esp->resetting_bus) | ||
1823 | esp_exec_cmd(esp); | ||
1824 | |||
1825 | return 0; | ||
1826 | } | ||
1827 | |||
1828 | /* Dump driver state. */ | ||
1829 | static void esp_dump_cmd(struct scsi_cmnd *SCptr) | ||
1830 | { | ||
1831 | ESPLOG(("[tgt<%02x> lun<%02x> " | ||
1832 | "pphase<%s> cphase<%s>]", | ||
1833 | SCptr->device->id, SCptr->device->lun, | ||
1834 | phase_string(SCptr->SCp.sent_command), | ||
1835 | phase_string(SCptr->SCp.phase))); | ||
1836 | } | ||
1837 | |||
1838 | static void esp_dump_state(struct esp *esp) | ||
1839 | { | ||
1840 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
1841 | #ifdef DEBUG_ESP_CMDS | ||
1842 | int i; | ||
1843 | #endif | ||
1844 | |||
1845 | ESPLOG(("esp%d: dumping state\n", esp->esp_id)); | ||
1846 | ESPLOG(("esp%d: dma -- cond_reg<%08x> addr<%08x>\n", | ||
1847 | esp->esp_id, | ||
1848 | sbus_readl(esp->dregs + DMA_CSR), | ||
1849 | sbus_readl(esp->dregs + DMA_ADDR))); | ||
1850 | ESPLOG(("esp%d: SW [sreg<%02x> sstep<%02x> ireg<%02x>]\n", | ||
1851 | esp->esp_id, esp->sreg, esp->seqreg, esp->ireg)); | ||
1852 | ESPLOG(("esp%d: HW reread [sreg<%02x> sstep<%02x> ireg<%02x>]\n", | ||
1853 | esp->esp_id, | ||
1854 | sbus_readb(esp->eregs + ESP_STATUS), | ||
1855 | sbus_readb(esp->eregs + ESP_SSTEP), | ||
1856 | sbus_readb(esp->eregs + ESP_INTRPT))); | ||
1857 | #ifdef DEBUG_ESP_CMDS | ||
1858 | printk("esp%d: last ESP cmds [", esp->esp_id); | ||
1859 | i = (esp->espcmdent - 1) & 31; | ||
1860 | printk("<"); esp_print_cmd(esp->espcmdlog[i]); printk(">"); | ||
1861 | i = (i - 1) & 31; | ||
1862 | printk("<"); esp_print_cmd(esp->espcmdlog[i]); printk(">"); | ||
1863 | i = (i - 1) & 31; | ||
1864 | printk("<"); esp_print_cmd(esp->espcmdlog[i]); printk(">"); | ||
1865 | i = (i - 1) & 31; | ||
1866 | printk("<"); esp_print_cmd(esp->espcmdlog[i]); printk(">"); | ||
1867 | printk("]\n"); | ||
1868 | #endif /* (DEBUG_ESP_CMDS) */ | ||
1869 | |||
1870 | if (SCptr) { | ||
1871 | ESPLOG(("esp%d: current command ", esp->esp_id)); | ||
1872 | esp_dump_cmd(SCptr); | ||
1873 | } | ||
1874 | ESPLOG(("\n")); | ||
1875 | SCptr = esp->disconnected_SC; | ||
1876 | ESPLOG(("esp%d: disconnected ", esp->esp_id)); | ||
1877 | while (SCptr) { | ||
1878 | esp_dump_cmd(SCptr); | ||
1879 | SCptr = (struct scsi_cmnd *) SCptr->host_scribble; | ||
1880 | } | ||
1881 | ESPLOG(("\n")); | ||
1882 | } | ||
1883 | |||
1884 | /* Abort a command. The host_lock is acquired by caller. */ | ||
1885 | static int esp_abort(struct scsi_cmnd *SCptr) | ||
1886 | { | ||
1887 | struct esp *esp = (struct esp *) SCptr->device->host->hostdata; | ||
1888 | int don; | ||
1889 | |||
1890 | ESPLOG(("esp%d: Aborting command\n", esp->esp_id)); | ||
1891 | esp_dump_state(esp); | ||
1892 | |||
1893 | /* Wheee, if this is the current command on the bus, the | ||
1894 | * best we can do is assert ATN and wait for msgout phase. | ||
1895 | * This should even fix a hung SCSI bus when we lose state | ||
1896 | * in the driver and timeout because the eventual phase change | ||
1897 | * will cause the ESP to (eventually) give an interrupt. | ||
1898 | */ | ||
1899 | if (esp->current_SC == SCptr) { | ||
1900 | esp->cur_msgout[0] = ABORT; | ||
1901 | esp->msgout_len = 1; | ||
1902 | esp->msgout_ctr = 0; | ||
1903 | esp_cmd(esp, ESP_CMD_SATN); | ||
1904 | return SUCCESS; | ||
1905 | } | ||
1906 | |||
1907 | /* If it is still in the issue queue then we can safely | ||
1908 | * call the completion routine and report abort success. | ||
1909 | */ | ||
1910 | don = (sbus_readl(esp->dregs + DMA_CSR) & DMA_INT_ENAB); | ||
1911 | if (don) { | ||
1912 | ESP_INTSOFF(esp->dregs); | ||
1913 | } | ||
1914 | if (esp->issue_SC) { | ||
1915 | struct scsi_cmnd **prev, *this; | ||
1916 | for (prev = (&esp->issue_SC), this = esp->issue_SC; | ||
1917 | this != NULL; | ||
1918 | prev = (struct scsi_cmnd **) &(this->host_scribble), | ||
1919 | this = (struct scsi_cmnd *) this->host_scribble) { | ||
1920 | |||
1921 | if (this == SCptr) { | ||
1922 | *prev = (struct scsi_cmnd *) this->host_scribble; | ||
1923 | this->host_scribble = NULL; | ||
1924 | |||
1925 | esp_release_dmabufs(esp, this); | ||
1926 | this->result = DID_ABORT << 16; | ||
1927 | this->scsi_done(this); | ||
1928 | |||
1929 | if (don) | ||
1930 | ESP_INTSON(esp->dregs); | ||
1931 | |||
1932 | return SUCCESS; | ||
1933 | } | ||
1934 | } | ||
1935 | } | ||
1936 | |||
1937 | /* Yuck, the command to abort is disconnected, it is not | ||
1938 | * worth trying to abort it now if something else is live | ||
1939 | * on the bus at this time. So, we let the SCSI code wait | ||
1940 | * a little bit and try again later. | ||
1941 | */ | ||
1942 | if (esp->current_SC) { | ||
1943 | if (don) | ||
1944 | ESP_INTSON(esp->dregs); | ||
1945 | return FAILED; | ||
1946 | } | ||
1947 | |||
1948 | /* It's disconnected, we have to reconnect to re-establish | ||
1949 | * the nexus and tell the device to abort. However, we really | ||
1950 | * cannot 'reconnect' per se. Don't try to be fancy, just | ||
1951 | * indicate failure, which causes our caller to reset the whole | ||
1952 | * bus. | ||
1953 | */ | ||
1954 | |||
1955 | if (don) | ||
1956 | ESP_INTSON(esp->dregs); | ||
1957 | |||
1958 | return FAILED; | ||
1959 | } | ||
1960 | |||
1961 | /* We've sent ESP_CMD_RS to the ESP, the interrupt had just | ||
1962 | * arrived indicating the end of the SCSI bus reset. Our job | ||
1963 | * is to clean out the command queues and begin re-execution | ||
1964 | * of SCSI commands once more. | ||
1965 | */ | ||
1966 | static int esp_finish_reset(struct esp *esp) | ||
1967 | { | ||
1968 | struct scsi_cmnd *sp = esp->current_SC; | ||
1969 | |||
1970 | /* Clean up currently executing command, if any. */ | ||
1971 | if (sp != NULL) { | ||
1972 | esp->current_SC = NULL; | ||
1973 | |||
1974 | esp_release_dmabufs(esp, sp); | ||
1975 | sp->result = (DID_RESET << 16); | ||
1976 | |||
1977 | sp->scsi_done(sp); | ||
1978 | } | ||
1979 | |||
1980 | /* Clean up disconnected queue, they have been invalidated | ||
1981 | * by the bus reset. | ||
1982 | */ | ||
1983 | if (esp->disconnected_SC) { | ||
1984 | while ((sp = remove_first_SC(&esp->disconnected_SC)) != NULL) { | ||
1985 | esp_release_dmabufs(esp, sp); | ||
1986 | sp->result = (DID_RESET << 16); | ||
1987 | |||
1988 | sp->scsi_done(sp); | ||
1989 | } | ||
1990 | } | ||
1991 | |||
1992 | /* SCSI bus reset is complete. */ | ||
1993 | esp->resetting_bus = 0; | ||
1994 | wake_up(&esp->reset_queue); | ||
1995 | |||
1996 | /* Ok, now it is safe to get commands going once more. */ | ||
1997 | if (esp->issue_SC) | ||
1998 | esp_exec_cmd(esp); | ||
1999 | |||
2000 | return do_intr_end; | ||
2001 | } | ||
2002 | |||
2003 | static int esp_do_resetbus(struct esp *esp) | ||
2004 | { | ||
2005 | ESPLOG(("esp%d: Resetting scsi bus\n", esp->esp_id)); | ||
2006 | esp->resetting_bus = 1; | ||
2007 | esp_cmd(esp, ESP_CMD_RS); | ||
2008 | |||
2009 | return do_intr_end; | ||
2010 | } | ||
2011 | |||
2012 | /* Reset ESP chip, reset hanging bus, then kill active and | ||
2013 | * disconnected commands for targets without soft reset. | ||
2014 | * | ||
2015 | * The host_lock is acquired by caller. | ||
2016 | */ | ||
2017 | static int esp_reset(struct scsi_cmnd *SCptr) | ||
2018 | { | ||
2019 | struct esp *esp = (struct esp *) SCptr->device->host->hostdata; | ||
2020 | |||
2021 | spin_lock_irq(esp->ehost->host_lock); | ||
2022 | (void) esp_do_resetbus(esp); | ||
2023 | spin_unlock_irq(esp->ehost->host_lock); | ||
2024 | |||
2025 | wait_event(esp->reset_queue, (esp->resetting_bus == 0)); | ||
2026 | |||
2027 | return SUCCESS; | ||
2028 | } | ||
2029 | |||
2030 | /* Internal ESP done function. */ | ||
2031 | static void esp_done(struct esp *esp, int error) | ||
2032 | { | ||
2033 | struct scsi_cmnd *done_SC = esp->current_SC; | ||
2034 | |||
2035 | esp->current_SC = NULL; | ||
2036 | |||
2037 | esp_release_dmabufs(esp, done_SC); | ||
2038 | done_SC->result = error; | ||
2039 | |||
2040 | done_SC->scsi_done(done_SC); | ||
2041 | |||
2042 | /* Bus is free, issue any commands in the queue. */ | ||
2043 | if (esp->issue_SC && !esp->current_SC) | ||
2044 | esp_exec_cmd(esp); | ||
2045 | |||
2046 | } | ||
2047 | |||
2048 | /* Wheee, ESP interrupt engine. */ | ||
2049 | |||
2050 | /* Forward declarations. */ | ||
2051 | static int esp_do_phase_determine(struct esp *esp); | ||
2052 | static int esp_do_data_finale(struct esp *esp); | ||
2053 | static int esp_select_complete(struct esp *esp); | ||
2054 | static int esp_do_status(struct esp *esp); | ||
2055 | static int esp_do_msgin(struct esp *esp); | ||
2056 | static int esp_do_msgindone(struct esp *esp); | ||
2057 | static int esp_do_msgout(struct esp *esp); | ||
2058 | static int esp_do_cmdbegin(struct esp *esp); | ||
2059 | |||
2060 | #define sreg_datainp(__sreg) (((__sreg) & ESP_STAT_PMASK) == ESP_DIP) | ||
2061 | #define sreg_dataoutp(__sreg) (((__sreg) & ESP_STAT_PMASK) == ESP_DOP) | ||
2062 | |||
2063 | /* Read any bytes found in the FAS366 fifo, storing them into | ||
2064 | * the ESP driver software state structure. | ||
2065 | */ | ||
2066 | static void hme_fifo_read(struct esp *esp) | ||
2067 | { | ||
2068 | u8 count = 0; | ||
2069 | u8 status = esp->sreg; | ||
2070 | |||
2071 | /* Cannot safely frob the fifo for these following cases, but | ||
2072 | * we must always read the fifo when the reselect interrupt | ||
2073 | * is pending. | ||
2074 | */ | ||
2075 | if (((esp->ireg & ESP_INTR_RSEL) == 0) && | ||
2076 | (sreg_datainp(status) || | ||
2077 | sreg_dataoutp(status) || | ||
2078 | (esp->current_SC && | ||
2079 | esp->current_SC->SCp.phase == in_data_done))) { | ||
2080 | ESPHME(("<wkaround_skipped>")); | ||
2081 | } else { | ||
2082 | unsigned long fcnt = sbus_readb(esp->eregs + ESP_FFLAGS) & ESP_FF_FBYTES; | ||
2083 | |||
2084 | /* The HME stores bytes in multiples of 2 in the fifo. */ | ||
2085 | ESPHME(("hme_fifo[fcnt=%d", (int)fcnt)); | ||
2086 | while (fcnt) { | ||
2087 | esp->hme_fifo_workaround_buffer[count++] = | ||
2088 | sbus_readb(esp->eregs + ESP_FDATA); | ||
2089 | esp->hme_fifo_workaround_buffer[count++] = | ||
2090 | sbus_readb(esp->eregs + ESP_FDATA); | ||
2091 | ESPHME(("<%02x,%02x>", esp->hme_fifo_workaround_buffer[count-2], esp->hme_fifo_workaround_buffer[count-1])); | ||
2092 | fcnt--; | ||
2093 | } | ||
2094 | if (sbus_readb(esp->eregs + ESP_STATUS2) & ESP_STAT2_F1BYTE) { | ||
2095 | ESPHME(("<poke_byte>")); | ||
2096 | sbus_writeb(0, esp->eregs + ESP_FDATA); | ||
2097 | esp->hme_fifo_workaround_buffer[count++] = | ||
2098 | sbus_readb(esp->eregs + ESP_FDATA); | ||
2099 | ESPHME(("<%02x,0x00>", esp->hme_fifo_workaround_buffer[count-1])); | ||
2100 | ESPHME(("CMD_FLUSH")); | ||
2101 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2102 | } else { | ||
2103 | ESPHME(("no_xtra_byte")); | ||
2104 | } | ||
2105 | } | ||
2106 | ESPHME(("wkarnd_cnt=%d]", (int)count)); | ||
2107 | esp->hme_fifo_workaround_count = count; | ||
2108 | } | ||
2109 | |||
2110 | static inline void hme_fifo_push(struct esp *esp, u8 *bytes, u8 count) | ||
2111 | { | ||
2112 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2113 | while (count) { | ||
2114 | u8 tmp = *bytes++; | ||
2115 | sbus_writeb(tmp, esp->eregs + ESP_FDATA); | ||
2116 | sbus_writeb(0, esp->eregs + ESP_FDATA); | ||
2117 | count--; | ||
2118 | } | ||
2119 | } | ||
2120 | |||
2121 | /* We try to avoid some interrupts by jumping ahead and see if the ESP | ||
2122 | * has gotten far enough yet. Hence the following. | ||
2123 | */ | ||
2124 | static inline int skipahead1(struct esp *esp, struct scsi_cmnd *scp, | ||
2125 | int prev_phase, int new_phase) | ||
2126 | { | ||
2127 | if (scp->SCp.sent_command != prev_phase) | ||
2128 | return 0; | ||
2129 | if (ESP_IRQ_P(esp->dregs)) { | ||
2130 | /* Yes, we are able to save an interrupt. */ | ||
2131 | if (esp->erev == fashme) | ||
2132 | esp->sreg2 = sbus_readb(esp->eregs + ESP_STATUS2); | ||
2133 | esp->sreg = (sbus_readb(esp->eregs + ESP_STATUS) & ~(ESP_STAT_INTR)); | ||
2134 | esp->ireg = sbus_readb(esp->eregs + ESP_INTRPT); | ||
2135 | if (esp->erev == fashme) { | ||
2136 | /* This chip is really losing. */ | ||
2137 | ESPHME(("HME[")); | ||
2138 | /* Must latch fifo before reading the interrupt | ||
2139 | * register else garbage ends up in the FIFO | ||
2140 | * which confuses the driver utterly. | ||
2141 | * Happy Meal indeed.... | ||
2142 | */ | ||
2143 | ESPHME(("fifo_workaround]")); | ||
2144 | if (!(esp->sreg2 & ESP_STAT2_FEMPTY) || | ||
2145 | (esp->sreg2 & ESP_STAT2_F1BYTE)) | ||
2146 | hme_fifo_read(esp); | ||
2147 | } | ||
2148 | if (!(esp->ireg & ESP_INTR_SR)) | ||
2149 | return 0; | ||
2150 | else | ||
2151 | return do_reset_complete; | ||
2152 | } | ||
2153 | /* Ho hum, target is taking forever... */ | ||
2154 | scp->SCp.sent_command = new_phase; /* so we don't recurse... */ | ||
2155 | return do_intr_end; | ||
2156 | } | ||
2157 | |||
2158 | static inline int skipahead2(struct esp *esp, struct scsi_cmnd *scp, | ||
2159 | int prev_phase1, int prev_phase2, int new_phase) | ||
2160 | { | ||
2161 | if (scp->SCp.sent_command != prev_phase1 && | ||
2162 | scp->SCp.sent_command != prev_phase2) | ||
2163 | return 0; | ||
2164 | if (ESP_IRQ_P(esp->dregs)) { | ||
2165 | /* Yes, we are able to save an interrupt. */ | ||
2166 | if (esp->erev == fashme) | ||
2167 | esp->sreg2 = sbus_readb(esp->eregs + ESP_STATUS2); | ||
2168 | esp->sreg = (sbus_readb(esp->eregs + ESP_STATUS) & ~(ESP_STAT_INTR)); | ||
2169 | esp->ireg = sbus_readb(esp->eregs + ESP_INTRPT); | ||
2170 | if (esp->erev == fashme) { | ||
2171 | /* This chip is really losing. */ | ||
2172 | ESPHME(("HME[")); | ||
2173 | |||
2174 | /* Must latch fifo before reading the interrupt | ||
2175 | * register else garbage ends up in the FIFO | ||
2176 | * which confuses the driver utterly. | ||
2177 | * Happy Meal indeed.... | ||
2178 | */ | ||
2179 | ESPHME(("fifo_workaround]")); | ||
2180 | if (!(esp->sreg2 & ESP_STAT2_FEMPTY) || | ||
2181 | (esp->sreg2 & ESP_STAT2_F1BYTE)) | ||
2182 | hme_fifo_read(esp); | ||
2183 | } | ||
2184 | if (!(esp->ireg & ESP_INTR_SR)) | ||
2185 | return 0; | ||
2186 | else | ||
2187 | return do_reset_complete; | ||
2188 | } | ||
2189 | /* Ho hum, target is taking forever... */ | ||
2190 | scp->SCp.sent_command = new_phase; /* so we don't recurse... */ | ||
2191 | return do_intr_end; | ||
2192 | } | ||
2193 | |||
2194 | /* Now some dma helpers. */ | ||
2195 | static void dma_setup(struct esp *esp, __u32 addr, int count, int write) | ||
2196 | { | ||
2197 | u32 nreg = sbus_readl(esp->dregs + DMA_CSR); | ||
2198 | |||
2199 | if (write) | ||
2200 | nreg |= DMA_ST_WRITE; | ||
2201 | else | ||
2202 | nreg &= ~(DMA_ST_WRITE); | ||
2203 | nreg |= DMA_ENABLE; | ||
2204 | sbus_writel(nreg, esp->dregs + DMA_CSR); | ||
2205 | if (esp->dma->revision == dvmaesc1) { | ||
2206 | /* This ESC gate array sucks! */ | ||
2207 | __u32 src = addr; | ||
2208 | __u32 dest = src + count; | ||
2209 | |||
2210 | if (dest & (PAGE_SIZE - 1)) | ||
2211 | count = PAGE_ALIGN(count); | ||
2212 | sbus_writel(count, esp->dregs + DMA_COUNT); | ||
2213 | } | ||
2214 | sbus_writel(addr, esp->dregs + DMA_ADDR); | ||
2215 | } | ||
2216 | |||
2217 | static void dma_drain(struct esp *esp) | ||
2218 | { | ||
2219 | u32 tmp; | ||
2220 | |||
2221 | if (esp->dma->revision == dvmahme) | ||
2222 | return; | ||
2223 | if ((tmp = sbus_readl(esp->dregs + DMA_CSR)) & DMA_FIFO_ISDRAIN) { | ||
2224 | switch (esp->dma->revision) { | ||
2225 | default: | ||
2226 | tmp |= DMA_FIFO_STDRAIN; | ||
2227 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
2228 | |||
2229 | case dvmarev3: | ||
2230 | case dvmaesc1: | ||
2231 | while (sbus_readl(esp->dregs + DMA_CSR) & DMA_FIFO_ISDRAIN) | ||
2232 | udelay(1); | ||
2233 | }; | ||
2234 | } | ||
2235 | } | ||
2236 | |||
2237 | static void dma_invalidate(struct esp *esp) | ||
2238 | { | ||
2239 | u32 tmp; | ||
2240 | |||
2241 | if (esp->dma->revision == dvmahme) { | ||
2242 | sbus_writel(DMA_RST_SCSI, esp->dregs + DMA_CSR); | ||
2243 | |||
2244 | esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr | | ||
2245 | (DMA_PARITY_OFF | DMA_2CLKS | | ||
2246 | DMA_SCSI_DISAB | DMA_INT_ENAB)) & | ||
2247 | ~(DMA_ST_WRITE | DMA_ENABLE)); | ||
2248 | |||
2249 | sbus_writel(0, esp->dregs + DMA_CSR); | ||
2250 | sbus_writel(esp->prev_hme_dmacsr, esp->dregs + DMA_CSR); | ||
2251 | |||
2252 | /* This is necessary to avoid having the SCSI channel | ||
2253 | * engine lock up on us. | ||
2254 | */ | ||
2255 | sbus_writel(0, esp->dregs + DMA_ADDR); | ||
2256 | } else { | ||
2257 | while ((tmp = sbus_readl(esp->dregs + DMA_CSR)) & DMA_PEND_READ) | ||
2258 | udelay(1); | ||
2259 | |||
2260 | tmp &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB); | ||
2261 | tmp |= DMA_FIFO_INV; | ||
2262 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
2263 | tmp &= ~DMA_FIFO_INV; | ||
2264 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
2265 | } | ||
2266 | } | ||
2267 | |||
2268 | static inline void dma_flashclear(struct esp *esp) | ||
2269 | { | ||
2270 | dma_drain(esp); | ||
2271 | dma_invalidate(esp); | ||
2272 | } | ||
2273 | |||
2274 | static int dma_can_transfer(struct esp *esp, struct scsi_cmnd *sp) | ||
2275 | { | ||
2276 | __u32 base, end, sz; | ||
2277 | |||
2278 | if (esp->dma->revision == dvmarev3) { | ||
2279 | sz = sp->SCp.this_residual; | ||
2280 | if (sz > 0x1000000) | ||
2281 | sz = 0x1000000; | ||
2282 | } else { | ||
2283 | base = ((__u32)((unsigned long)sp->SCp.ptr)); | ||
2284 | base &= (0x1000000 - 1); | ||
2285 | end = (base + sp->SCp.this_residual); | ||
2286 | if (end > 0x1000000) | ||
2287 | end = 0x1000000; | ||
2288 | sz = (end - base); | ||
2289 | } | ||
2290 | return sz; | ||
2291 | } | ||
2292 | |||
2293 | /* Misc. esp helper macros. */ | ||
2294 | #define esp_setcount(__eregs, __cnt, __hme) \ | ||
2295 | sbus_writeb(((__cnt)&0xff), (__eregs) + ESP_TCLOW); \ | ||
2296 | sbus_writeb((((__cnt)>>8)&0xff), (__eregs) + ESP_TCMED); \ | ||
2297 | if (__hme) { \ | ||
2298 | sbus_writeb((((__cnt)>>16)&0xff), (__eregs) + FAS_RLO); \ | ||
2299 | sbus_writeb(0, (__eregs) + FAS_RHI); \ | ||
2300 | } | ||
2301 | |||
2302 | #define esp_getcount(__eregs, __hme) \ | ||
2303 | ((sbus_readb((__eregs) + ESP_TCLOW)&0xff) | \ | ||
2304 | ((sbus_readb((__eregs) + ESP_TCMED)&0xff) << 8) | \ | ||
2305 | ((__hme) ? sbus_readb((__eregs) + FAS_RLO) << 16 : 0)) | ||
2306 | |||
2307 | #define fcount(__esp) \ | ||
2308 | (((__esp)->erev == fashme) ? \ | ||
2309 | (__esp)->hme_fifo_workaround_count : \ | ||
2310 | sbus_readb(((__esp)->eregs) + ESP_FFLAGS) & ESP_FF_FBYTES) | ||
2311 | |||
2312 | #define fnzero(__esp) \ | ||
2313 | (((__esp)->erev == fashme) ? 0 : \ | ||
2314 | sbus_readb(((__esp)->eregs) + ESP_FFLAGS) & ESP_FF_ONOTZERO) | ||
2315 | |||
2316 | /* XXX speculative nops unnecessary when continuing amidst a data phase | ||
2317 | * XXX even on esp100!!! another case of flooding the bus with I/O reg | ||
2318 | * XXX writes... | ||
2319 | */ | ||
2320 | #define esp_maybe_nop(__esp) \ | ||
2321 | if ((__esp)->erev == esp100) \ | ||
2322 | esp_cmd((__esp), ESP_CMD_NULL) | ||
2323 | |||
2324 | #define sreg_to_dataphase(__sreg) \ | ||
2325 | ((((__sreg) & ESP_STAT_PMASK) == ESP_DOP) ? in_dataout : in_datain) | ||
2326 | |||
2327 | /* The ESP100 when in synchronous data phase, can mistake a long final | ||
2328 | * REQ pulse from the target as an extra byte, it places whatever is on | ||
2329 | * the data lines into the fifo. For now, we will assume when this | ||
2330 | * happens that the target is a bit quirky and we don't want to | ||
2331 | * be talking synchronously to it anyways. Regardless, we need to | ||
2332 | * tell the ESP to eat the extraneous byte so that we can proceed | ||
2333 | * to the next phase. | ||
2334 | */ | ||
2335 | static int esp100_sync_hwbug(struct esp *esp, struct scsi_cmnd *sp, int fifocnt) | ||
2336 | { | ||
2337 | /* Do not touch this piece of code. */ | ||
2338 | if ((!(esp->erev == esp100)) || | ||
2339 | (!(sreg_datainp((esp->sreg = sbus_readb(esp->eregs + ESP_STATUS))) && | ||
2340 | !fifocnt) && | ||
2341 | !(sreg_dataoutp(esp->sreg) && !fnzero(esp)))) { | ||
2342 | if (sp->SCp.phase == in_dataout) | ||
2343 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2344 | return 0; | ||
2345 | } else { | ||
2346 | /* Async mode for this guy. */ | ||
2347 | build_sync_nego_msg(esp, 0, 0); | ||
2348 | |||
2349 | /* Ack the bogus byte, but set ATN first. */ | ||
2350 | esp_cmd(esp, ESP_CMD_SATN); | ||
2351 | esp_cmd(esp, ESP_CMD_MOK); | ||
2352 | return 1; | ||
2353 | } | ||
2354 | } | ||
2355 | |||
2356 | /* This closes the window during a selection with a reselect pending, because | ||
2357 | * we use DMA for the selection process the FIFO should hold the correct | ||
2358 | * contents if we get reselected during this process. So we just need to | ||
2359 | * ack the possible illegal cmd interrupt pending on the esp100. | ||
2360 | */ | ||
2361 | static inline int esp100_reconnect_hwbug(struct esp *esp) | ||
2362 | { | ||
2363 | u8 tmp; | ||
2364 | |||
2365 | if (esp->erev != esp100) | ||
2366 | return 0; | ||
2367 | tmp = sbus_readb(esp->eregs + ESP_INTRPT); | ||
2368 | if (tmp & ESP_INTR_SR) | ||
2369 | return 1; | ||
2370 | return 0; | ||
2371 | } | ||
2372 | |||
2373 | /* This verifies the BUSID bits during a reselection so that we know which | ||
2374 | * target is talking to us. | ||
2375 | */ | ||
2376 | static inline int reconnect_target(struct esp *esp) | ||
2377 | { | ||
2378 | int it, me = esp->scsi_id_mask, targ = 0; | ||
2379 | |||
2380 | if (2 != fcount(esp)) | ||
2381 | return -1; | ||
2382 | if (esp->erev == fashme) { | ||
2383 | /* HME does not latch it's own BUS ID bits during | ||
2384 | * a reselection. Also the target number is given | ||
2385 | * as an unsigned char, not as a sole bit number | ||
2386 | * like the other ESP's do. | ||
2387 | * Happy Meal indeed.... | ||
2388 | */ | ||
2389 | targ = esp->hme_fifo_workaround_buffer[0]; | ||
2390 | } else { | ||
2391 | it = sbus_readb(esp->eregs + ESP_FDATA); | ||
2392 | if (!(it & me)) | ||
2393 | return -1; | ||
2394 | it &= ~me; | ||
2395 | if (it & (it - 1)) | ||
2396 | return -1; | ||
2397 | while (!(it & 1)) | ||
2398 | targ++, it >>= 1; | ||
2399 | } | ||
2400 | return targ; | ||
2401 | } | ||
2402 | |||
2403 | /* This verifies the identify from the target so that we know which lun is | ||
2404 | * being reconnected. | ||
2405 | */ | ||
2406 | static inline int reconnect_lun(struct esp *esp) | ||
2407 | { | ||
2408 | int lun; | ||
2409 | |||
2410 | if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) | ||
2411 | return -1; | ||
2412 | if (esp->erev == fashme) | ||
2413 | lun = esp->hme_fifo_workaround_buffer[1]; | ||
2414 | else | ||
2415 | lun = sbus_readb(esp->eregs + ESP_FDATA); | ||
2416 | |||
2417 | /* Yes, you read this correctly. We report lun of zero | ||
2418 | * if we see parity error. ESP reports parity error for | ||
2419 | * the lun byte, and this is the only way to hope to recover | ||
2420 | * because the target is connected. | ||
2421 | */ | ||
2422 | if (esp->sreg & ESP_STAT_PERR) | ||
2423 | return 0; | ||
2424 | |||
2425 | /* Check for illegal bits being set in the lun. */ | ||
2426 | if ((lun & 0x40) || !(lun & 0x80)) | ||
2427 | return -1; | ||
2428 | |||
2429 | return lun & 7; | ||
2430 | } | ||
2431 | |||
2432 | /* This puts the driver in a state where it can revitalize a command that | ||
2433 | * is being continued due to reselection. | ||
2434 | */ | ||
2435 | static inline void esp_connect(struct esp *esp, struct scsi_cmnd *sp) | ||
2436 | { | ||
2437 | struct esp_device *esp_dev = sp->device->hostdata; | ||
2438 | |||
2439 | if (esp->prev_soff != esp_dev->sync_max_offset || | ||
2440 | esp->prev_stp != esp_dev->sync_min_period || | ||
2441 | (esp->erev > esp100a && | ||
2442 | esp->prev_cfg3 != esp->config3[sp->device->id])) { | ||
2443 | esp->prev_soff = esp_dev->sync_max_offset; | ||
2444 | esp->prev_stp = esp_dev->sync_min_period; | ||
2445 | sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF); | ||
2446 | sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP); | ||
2447 | if (esp->erev > esp100a) { | ||
2448 | esp->prev_cfg3 = esp->config3[sp->device->id]; | ||
2449 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
2450 | } | ||
2451 | } | ||
2452 | esp->current_SC = sp; | ||
2453 | } | ||
2454 | |||
2455 | /* This will place the current working command back into the issue queue | ||
2456 | * if we are to receive a reselection amidst a selection attempt. | ||
2457 | */ | ||
2458 | static inline void esp_reconnect(struct esp *esp, struct scsi_cmnd *sp) | ||
2459 | { | ||
2460 | if (!esp->disconnected_SC) | ||
2461 | ESPLOG(("esp%d: Weird, being reselected but disconnected " | ||
2462 | "command queue is empty.\n", esp->esp_id)); | ||
2463 | esp->snip = 0; | ||
2464 | esp->current_SC = NULL; | ||
2465 | sp->SCp.phase = not_issued; | ||
2466 | append_SC(&esp->issue_SC, sp); | ||
2467 | } | ||
2468 | |||
2469 | /* Begin message in phase. */ | ||
2470 | static int esp_do_msgin(struct esp *esp) | ||
2471 | { | ||
2472 | /* Must be very careful with the fifo on the HME */ | ||
2473 | if ((esp->erev != fashme) || | ||
2474 | !(sbus_readb(esp->eregs + ESP_STATUS2) & ESP_STAT2_FEMPTY)) | ||
2475 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2476 | esp_maybe_nop(esp); | ||
2477 | esp_cmd(esp, ESP_CMD_TI); | ||
2478 | esp->msgin_len = 1; | ||
2479 | esp->msgin_ctr = 0; | ||
2480 | esp_advance_phase(esp->current_SC, in_msgindone); | ||
2481 | return do_work_bus; | ||
2482 | } | ||
2483 | |||
2484 | /* This uses various DMA csr fields and the fifo flags count value to | ||
2485 | * determine how many bytes were successfully sent/received by the ESP. | ||
2486 | */ | ||
2487 | static inline int esp_bytes_sent(struct esp *esp, int fifo_count) | ||
2488 | { | ||
2489 | int rval = sbus_readl(esp->dregs + DMA_ADDR) - esp->esp_command_dvma; | ||
2490 | |||
2491 | if (esp->dma->revision == dvmarev1) | ||
2492 | rval -= (4 - ((sbus_readl(esp->dregs + DMA_CSR) & DMA_READ_AHEAD)>>11)); | ||
2493 | return rval - fifo_count; | ||
2494 | } | ||
2495 | |||
2496 | static inline void advance_sg(struct scsi_cmnd *sp) | ||
2497 | { | ||
2498 | ++sp->SCp.buffer; | ||
2499 | --sp->SCp.buffers_residual; | ||
2500 | sp->SCp.this_residual = sg_dma_len(sp->SCp.buffer); | ||
2501 | sp->SCp.ptr = (char *)((unsigned long)sg_dma_address(sp->SCp.buffer)); | ||
2502 | } | ||
2503 | |||
2504 | /* Please note that the way I've coded these routines is that I _always_ | ||
2505 | * check for a disconnect during any and all information transfer | ||
2506 | * phases. The SCSI standard states that the target _can_ cause a BUS | ||
2507 | * FREE condition by dropping all MSG/CD/IO/BSY signals. Also note | ||
2508 | * that during information transfer phases the target controls every | ||
2509 | * change in phase, the only thing the initiator can do is "ask" for | ||
2510 | * a message out phase by driving ATN true. The target can, and sometimes | ||
2511 | * will, completely ignore this request so we cannot assume anything when | ||
2512 | * we try to force a message out phase to abort/reset a target. Most of | ||
2513 | * the time the target will eventually be nice and go to message out, so | ||
2514 | * we may have to hold on to our state about what we want to tell the target | ||
2515 | * for some period of time. | ||
2516 | */ | ||
2517 | |||
2518 | /* I think I have things working here correctly. Even partial transfers | ||
2519 | * within a buffer or sub-buffer should not upset us at all no matter | ||
2520 | * how bad the target and/or ESP fucks things up. | ||
2521 | */ | ||
2522 | static int esp_do_data(struct esp *esp) | ||
2523 | { | ||
2524 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
2525 | int thisphase, hmuch; | ||
2526 | |||
2527 | ESPDATA(("esp_do_data: ")); | ||
2528 | esp_maybe_nop(esp); | ||
2529 | thisphase = sreg_to_dataphase(esp->sreg); | ||
2530 | esp_advance_phase(SCptr, thisphase); | ||
2531 | ESPDATA(("newphase<%s> ", (thisphase == in_datain) ? "DATAIN" : "DATAOUT")); | ||
2532 | hmuch = dma_can_transfer(esp, SCptr); | ||
2533 | if (hmuch > (64 * 1024) && (esp->erev != fashme)) | ||
2534 | hmuch = (64 * 1024); | ||
2535 | ESPDATA(("hmuch<%d> ", hmuch)); | ||
2536 | esp->current_transfer_size = hmuch; | ||
2537 | |||
2538 | if (esp->erev == fashme) { | ||
2539 | u32 tmp = esp->prev_hme_dmacsr; | ||
2540 | |||
2541 | /* Always set the ESP count registers first. */ | ||
2542 | esp_setcount(esp->eregs, hmuch, 1); | ||
2543 | |||
2544 | /* Get the DMA csr computed. */ | ||
2545 | tmp |= (DMA_SCSI_DISAB | DMA_ENABLE); | ||
2546 | if (thisphase == in_datain) | ||
2547 | tmp |= DMA_ST_WRITE; | ||
2548 | else | ||
2549 | tmp &= ~(DMA_ST_WRITE); | ||
2550 | esp->prev_hme_dmacsr = tmp; | ||
2551 | |||
2552 | ESPDATA(("DMA|TI --> do_intr_end\n")); | ||
2553 | if (thisphase == in_datain) { | ||
2554 | sbus_writel(hmuch, esp->dregs + DMA_COUNT); | ||
2555 | esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI); | ||
2556 | } else { | ||
2557 | esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI); | ||
2558 | sbus_writel(hmuch, esp->dregs + DMA_COUNT); | ||
2559 | } | ||
2560 | sbus_writel((__u32)((unsigned long)SCptr->SCp.ptr), esp->dregs+DMA_ADDR); | ||
2561 | sbus_writel(esp->prev_hme_dmacsr, esp->dregs + DMA_CSR); | ||
2562 | } else { | ||
2563 | esp_setcount(esp->eregs, hmuch, 0); | ||
2564 | dma_setup(esp, ((__u32)((unsigned long)SCptr->SCp.ptr)), | ||
2565 | hmuch, (thisphase == in_datain)); | ||
2566 | ESPDATA(("DMA|TI --> do_intr_end\n")); | ||
2567 | esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI); | ||
2568 | } | ||
2569 | return do_intr_end; | ||
2570 | } | ||
2571 | |||
2572 | /* See how successful the data transfer was. */ | ||
2573 | static int esp_do_data_finale(struct esp *esp) | ||
2574 | { | ||
2575 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
2576 | struct esp_device *esp_dev = SCptr->device->hostdata; | ||
2577 | int bogus_data = 0, bytes_sent = 0, fifocnt, ecount = 0; | ||
2578 | |||
2579 | ESPDATA(("esp_do_data_finale: ")); | ||
2580 | |||
2581 | if (SCptr->SCp.phase == in_datain) { | ||
2582 | if (esp->sreg & ESP_STAT_PERR) { | ||
2583 | /* Yuck, parity error. The ESP asserts ATN | ||
2584 | * so that we can go to message out phase | ||
2585 | * immediately and inform the target that | ||
2586 | * something bad happened. | ||
2587 | */ | ||
2588 | ESPLOG(("esp%d: data bad parity detected.\n", | ||
2589 | esp->esp_id)); | ||
2590 | esp->cur_msgout[0] = INITIATOR_ERROR; | ||
2591 | esp->msgout_len = 1; | ||
2592 | } | ||
2593 | dma_drain(esp); | ||
2594 | } | ||
2595 | dma_invalidate(esp); | ||
2596 | |||
2597 | /* This could happen for the above parity error case. */ | ||
2598 | if (esp->ireg != ESP_INTR_BSERV) { | ||
2599 | /* Please go to msgout phase, please please please... */ | ||
2600 | ESPLOG(("esp%d: !BSERV after data, probably to msgout\n", | ||
2601 | esp->esp_id)); | ||
2602 | return esp_do_phase_determine(esp); | ||
2603 | } | ||
2604 | |||
2605 | /* Check for partial transfers and other horrible events. | ||
2606 | * Note, here we read the real fifo flags register even | ||
2607 | * on HME broken adapters because we skip the HME fifo | ||
2608 | * workaround code in esp_handle() if we are doing data | ||
2609 | * phase things. We don't want to fuck directly with | ||
2610 | * the fifo like that, especially if doing synchronous | ||
2611 | * transfers! Also, will need to double the count on | ||
2612 | * HME if we are doing wide transfers, as the HME fifo | ||
2613 | * will move and count 16-bit quantities during wide data. | ||
2614 | * SMCC _and_ Qlogic can both bite me. | ||
2615 | */ | ||
2616 | fifocnt = (sbus_readb(esp->eregs + ESP_FFLAGS) & ESP_FF_FBYTES); | ||
2617 | if (esp->erev != fashme) | ||
2618 | ecount = esp_getcount(esp->eregs, 0); | ||
2619 | bytes_sent = esp->current_transfer_size; | ||
2620 | |||
2621 | ESPDATA(("trans_sz(%d), ", bytes_sent)); | ||
2622 | if (esp->erev == fashme) { | ||
2623 | if (!(esp->sreg & ESP_STAT_TCNT)) { | ||
2624 | ecount = esp_getcount(esp->eregs, 1); | ||
2625 | bytes_sent -= ecount; | ||
2626 | } | ||
2627 | |||
2628 | /* Always subtract any cruft remaining in the FIFO. */ | ||
2629 | if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE) | ||
2630 | fifocnt <<= 1; | ||
2631 | if (SCptr->SCp.phase == in_dataout) | ||
2632 | bytes_sent -= fifocnt; | ||
2633 | |||
2634 | /* I have an IBM disk which exhibits the following | ||
2635 | * behavior during writes to it. It disconnects in | ||
2636 | * the middle of a partial transfer, the current sglist | ||
2637 | * buffer is 1024 bytes, the disk stops data transfer | ||
2638 | * at 512 bytes. | ||
2639 | * | ||
2640 | * However the FAS366 reports that 32 more bytes were | ||
2641 | * transferred than really were. This is precisely | ||
2642 | * the size of a fully loaded FIFO in wide scsi mode. | ||
2643 | * The FIFO state recorded indicates that it is empty. | ||
2644 | * | ||
2645 | * I have no idea if this is a bug in the FAS366 chip | ||
2646 | * or a bug in the firmware on this IBM disk. In any | ||
2647 | * event the following seems to be a good workaround. -DaveM | ||
2648 | */ | ||
2649 | if (bytes_sent != esp->current_transfer_size && | ||
2650 | SCptr->SCp.phase == in_dataout) { | ||
2651 | int mask = (64 - 1); | ||
2652 | |||
2653 | if ((esp->prev_cfg3 & ESP_CONFIG3_EWIDE) == 0) | ||
2654 | mask >>= 1; | ||
2655 | |||
2656 | if (bytes_sent & mask) | ||
2657 | bytes_sent -= (bytes_sent & mask); | ||
2658 | } | ||
2659 | } else { | ||
2660 | if (!(esp->sreg & ESP_STAT_TCNT)) | ||
2661 | bytes_sent -= ecount; | ||
2662 | if (SCptr->SCp.phase == in_dataout) | ||
2663 | bytes_sent -= fifocnt; | ||
2664 | } | ||
2665 | |||
2666 | ESPDATA(("bytes_sent(%d), ", bytes_sent)); | ||
2667 | |||
2668 | /* If we were in synchronous mode, check for peculiarities. */ | ||
2669 | if (esp->erev == fashme) { | ||
2670 | if (esp_dev->sync_max_offset) { | ||
2671 | if (SCptr->SCp.phase == in_dataout) | ||
2672 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2673 | } else { | ||
2674 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2675 | } | ||
2676 | } else { | ||
2677 | if (esp_dev->sync_max_offset) | ||
2678 | bogus_data = esp100_sync_hwbug(esp, SCptr, fifocnt); | ||
2679 | else | ||
2680 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2681 | } | ||
2682 | |||
2683 | /* Until we are sure of what has happened, we are certainly | ||
2684 | * in the dark. | ||
2685 | */ | ||
2686 | esp_advance_phase(SCptr, in_the_dark); | ||
2687 | |||
2688 | if (bytes_sent < 0) { | ||
2689 | /* I've seen this happen due to lost state in this | ||
2690 | * driver. No idea why it happened, but allowing | ||
2691 | * this value to be negative caused things to | ||
2692 | * lock up. This allows greater chance of recovery. | ||
2693 | * In fact every time I've seen this, it has been | ||
2694 | * a driver bug without question. | ||
2695 | */ | ||
2696 | ESPLOG(("esp%d: yieee, bytes_sent < 0!\n", esp->esp_id)); | ||
2697 | ESPLOG(("esp%d: csz=%d fifocount=%d ecount=%d\n", | ||
2698 | esp->esp_id, | ||
2699 | esp->current_transfer_size, fifocnt, ecount)); | ||
2700 | ESPLOG(("esp%d: use_sg=%d ptr=%p this_residual=%d\n", | ||
2701 | esp->esp_id, | ||
2702 | SCptr->use_sg, SCptr->SCp.ptr, SCptr->SCp.this_residual)); | ||
2703 | ESPLOG(("esp%d: Forcing async for target %d\n", esp->esp_id, | ||
2704 | SCptr->device->id)); | ||
2705 | SCptr->device->borken = 1; | ||
2706 | esp_dev->sync = 0; | ||
2707 | bytes_sent = 0; | ||
2708 | } | ||
2709 | |||
2710 | /* Update the state of our transfer. */ | ||
2711 | SCptr->SCp.ptr += bytes_sent; | ||
2712 | SCptr->SCp.this_residual -= bytes_sent; | ||
2713 | if (SCptr->SCp.this_residual < 0) { | ||
2714 | /* shit */ | ||
2715 | ESPLOG(("esp%d: Data transfer overrun.\n", esp->esp_id)); | ||
2716 | SCptr->SCp.this_residual = 0; | ||
2717 | } | ||
2718 | |||
2719 | /* Maybe continue. */ | ||
2720 | if (!bogus_data) { | ||
2721 | ESPDATA(("!bogus_data, ")); | ||
2722 | |||
2723 | /* NO MATTER WHAT, we advance the scatterlist, | ||
2724 | * if the target should decide to disconnect | ||
2725 | * in between scatter chunks (which is common) | ||
2726 | * we could die horribly! I used to have the sg | ||
2727 | * advance occur only if we are going back into | ||
2728 | * (or are staying in) a data phase, you can | ||
2729 | * imagine the hell I went through trying to | ||
2730 | * figure this out. | ||
2731 | */ | ||
2732 | if (SCptr->use_sg && !SCptr->SCp.this_residual) | ||
2733 | advance_sg(SCptr); | ||
2734 | if (sreg_datainp(esp->sreg) || sreg_dataoutp(esp->sreg)) { | ||
2735 | ESPDATA(("to more data\n")); | ||
2736 | return esp_do_data(esp); | ||
2737 | } | ||
2738 | ESPDATA(("to new phase\n")); | ||
2739 | return esp_do_phase_determine(esp); | ||
2740 | } | ||
2741 | /* Bogus data, just wait for next interrupt. */ | ||
2742 | ESPLOG(("esp%d: bogus_data during end of data phase\n", | ||
2743 | esp->esp_id)); | ||
2744 | return do_intr_end; | ||
2745 | } | ||
2746 | |||
2747 | /* We received a non-good status return at the end of | ||
2748 | * running a SCSI command. This is used to decide if | ||
2749 | * we should clear our synchronous transfer state for | ||
2750 | * such a device when that happens. | ||
2751 | * | ||
2752 | * The idea is that when spinning up a disk or rewinding | ||
2753 | * a tape, we don't want to go into a loop re-negotiating | ||
2754 | * synchronous capabilities over and over. | ||
2755 | */ | ||
2756 | static int esp_should_clear_sync(struct scsi_cmnd *sp) | ||
2757 | { | ||
2758 | u8 cmd = sp->cmnd[0]; | ||
2759 | |||
2760 | /* These cases are for spinning up a disk and | ||
2761 | * waiting for that spinup to complete. | ||
2762 | */ | ||
2763 | if (cmd == START_STOP) | ||
2764 | return 0; | ||
2765 | |||
2766 | if (cmd == TEST_UNIT_READY) | ||
2767 | return 0; | ||
2768 | |||
2769 | /* One more special case for SCSI tape drives, | ||
2770 | * this is what is used to probe the device for | ||
2771 | * completion of a rewind or tape load operation. | ||
2772 | */ | ||
2773 | if (sp->device->type == TYPE_TAPE) { | ||
2774 | if (cmd == MODE_SENSE) | ||
2775 | return 0; | ||
2776 | } | ||
2777 | |||
2778 | return 1; | ||
2779 | } | ||
2780 | |||
2781 | /* Either a command is completing or a target is dropping off the bus | ||
2782 | * to continue the command in the background so we can do other work. | ||
2783 | */ | ||
2784 | static int esp_do_freebus(struct esp *esp) | ||
2785 | { | ||
2786 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
2787 | struct esp_device *esp_dev = SCptr->device->hostdata; | ||
2788 | int rval; | ||
2789 | |||
2790 | rval = skipahead2(esp, SCptr, in_status, in_msgindone, in_freeing); | ||
2791 | if (rval) | ||
2792 | return rval; | ||
2793 | if (esp->ireg != ESP_INTR_DC) { | ||
2794 | ESPLOG(("esp%d: Target will not disconnect\n", esp->esp_id)); | ||
2795 | return do_reset_bus; /* target will not drop BSY... */ | ||
2796 | } | ||
2797 | esp->msgout_len = 0; | ||
2798 | esp->prevmsgout = NOP; | ||
2799 | if (esp->prevmsgin == COMMAND_COMPLETE) { | ||
2800 | /* Normal end of nexus. */ | ||
2801 | if (esp->disconnected_SC || (esp->erev == fashme)) | ||
2802 | esp_cmd(esp, ESP_CMD_ESEL); | ||
2803 | |||
2804 | if (SCptr->SCp.Status != GOOD && | ||
2805 | SCptr->SCp.Status != CONDITION_GOOD && | ||
2806 | ((1<<SCptr->device->id) & esp->targets_present) && | ||
2807 | esp_dev->sync && | ||
2808 | esp_dev->sync_max_offset) { | ||
2809 | /* SCSI standard says that the synchronous capabilities | ||
2810 | * should be renegotiated at this point. Most likely | ||
2811 | * we are about to request sense from this target | ||
2812 | * in which case we want to avoid using sync | ||
2813 | * transfers until we are sure of the current target | ||
2814 | * state. | ||
2815 | */ | ||
2816 | ESPMISC(("esp: Status <%d> for target %d lun %d\n", | ||
2817 | SCptr->SCp.Status, SCptr->device->id, SCptr->device->lun)); | ||
2818 | |||
2819 | /* But don't do this when spinning up a disk at | ||
2820 | * boot time while we poll for completion as it | ||
2821 | * fills up the console with messages. Also, tapes | ||
2822 | * can report not ready many times right after | ||
2823 | * loading up a tape. | ||
2824 | */ | ||
2825 | if (esp_should_clear_sync(SCptr) != 0) | ||
2826 | esp_dev->sync = 0; | ||
2827 | } | ||
2828 | ESPDISC(("F<%02x,%02x>", SCptr->device->id, SCptr->device->lun)); | ||
2829 | esp_done(esp, ((SCptr->SCp.Status & 0xff) | | ||
2830 | ((SCptr->SCp.Message & 0xff)<<8) | | ||
2831 | (DID_OK << 16))); | ||
2832 | } else if (esp->prevmsgin == DISCONNECT) { | ||
2833 | /* Normal disconnect. */ | ||
2834 | esp_cmd(esp, ESP_CMD_ESEL); | ||
2835 | ESPDISC(("D<%02x,%02x>", SCptr->device->id, SCptr->device->lun)); | ||
2836 | append_SC(&esp->disconnected_SC, SCptr); | ||
2837 | esp->current_SC = NULL; | ||
2838 | if (esp->issue_SC) | ||
2839 | esp_exec_cmd(esp); | ||
2840 | } else { | ||
2841 | /* Driver bug, we do not expect a disconnect here | ||
2842 | * and should not have advanced the state engine | ||
2843 | * to in_freeing. | ||
2844 | */ | ||
2845 | ESPLOG(("esp%d: last msg not disc and not cmd cmplt.\n", | ||
2846 | esp->esp_id)); | ||
2847 | return do_reset_bus; | ||
2848 | } | ||
2849 | return do_intr_end; | ||
2850 | } | ||
2851 | |||
2852 | /* When a reselect occurs, and we cannot find the command to | ||
2853 | * reconnect to in our queues, we do this. | ||
2854 | */ | ||
2855 | static int esp_bad_reconnect(struct esp *esp) | ||
2856 | { | ||
2857 | struct scsi_cmnd *sp; | ||
2858 | |||
2859 | ESPLOG(("esp%d: Eieeee, reconnecting unknown command!\n", | ||
2860 | esp->esp_id)); | ||
2861 | ESPLOG(("QUEUE DUMP\n")); | ||
2862 | sp = esp->issue_SC; | ||
2863 | ESPLOG(("esp%d: issue_SC[", esp->esp_id)); | ||
2864 | while (sp) { | ||
2865 | ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun)); | ||
2866 | sp = (struct scsi_cmnd *) sp->host_scribble; | ||
2867 | } | ||
2868 | ESPLOG(("]\n")); | ||
2869 | sp = esp->current_SC; | ||
2870 | ESPLOG(("esp%d: current_SC[", esp->esp_id)); | ||
2871 | if (sp) | ||
2872 | ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun)); | ||
2873 | else | ||
2874 | ESPLOG(("<NULL>")); | ||
2875 | ESPLOG(("]\n")); | ||
2876 | sp = esp->disconnected_SC; | ||
2877 | ESPLOG(("esp%d: disconnected_SC[", esp->esp_id)); | ||
2878 | while (sp) { | ||
2879 | ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun)); | ||
2880 | sp = (struct scsi_cmnd *) sp->host_scribble; | ||
2881 | } | ||
2882 | ESPLOG(("]\n")); | ||
2883 | return do_reset_bus; | ||
2884 | } | ||
2885 | |||
2886 | /* Do the needy when a target tries to reconnect to us. */ | ||
2887 | static int esp_do_reconnect(struct esp *esp) | ||
2888 | { | ||
2889 | int lun, target; | ||
2890 | struct scsi_cmnd *SCptr; | ||
2891 | |||
2892 | /* Check for all bogus conditions first. */ | ||
2893 | target = reconnect_target(esp); | ||
2894 | if (target < 0) { | ||
2895 | ESPDISC(("bad bus bits\n")); | ||
2896 | return do_reset_bus; | ||
2897 | } | ||
2898 | lun = reconnect_lun(esp); | ||
2899 | if (lun < 0) { | ||
2900 | ESPDISC(("target=%2x, bad identify msg\n", target)); | ||
2901 | return do_reset_bus; | ||
2902 | } | ||
2903 | |||
2904 | /* Things look ok... */ | ||
2905 | ESPDISC(("R<%02x,%02x>", target, lun)); | ||
2906 | |||
2907 | /* Must not flush FIFO or DVMA on HME. */ | ||
2908 | if (esp->erev != fashme) { | ||
2909 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2910 | if (esp100_reconnect_hwbug(esp)) | ||
2911 | return do_reset_bus; | ||
2912 | esp_cmd(esp, ESP_CMD_NULL); | ||
2913 | } | ||
2914 | |||
2915 | SCptr = remove_SC(&esp->disconnected_SC, (u8) target, (u8) lun); | ||
2916 | if (!SCptr) | ||
2917 | return esp_bad_reconnect(esp); | ||
2918 | |||
2919 | esp_connect(esp, SCptr); | ||
2920 | esp_cmd(esp, ESP_CMD_MOK); | ||
2921 | |||
2922 | if (esp->erev == fashme) | ||
2923 | sbus_writeb(((SCptr->device->id & 0xf) | | ||
2924 | (ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT)), | ||
2925 | esp->eregs + ESP_BUSID); | ||
2926 | |||
2927 | /* Reconnect implies a restore pointers operation. */ | ||
2928 | esp_restore_pointers(esp, SCptr); | ||
2929 | |||
2930 | esp->snip = 0; | ||
2931 | esp_advance_phase(SCptr, in_the_dark); | ||
2932 | return do_intr_end; | ||
2933 | } | ||
2934 | |||
2935 | /* End of NEXUS (hopefully), pick up status + message byte then leave if | ||
2936 | * all goes well. | ||
2937 | */ | ||
2938 | static int esp_do_status(struct esp *esp) | ||
2939 | { | ||
2940 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
2941 | int intr, rval; | ||
2942 | |||
2943 | rval = skipahead1(esp, SCptr, in_the_dark, in_status); | ||
2944 | if (rval) | ||
2945 | return rval; | ||
2946 | intr = esp->ireg; | ||
2947 | ESPSTAT(("esp_do_status: ")); | ||
2948 | if (intr != ESP_INTR_DC) { | ||
2949 | int message_out = 0; /* for parity problems */ | ||
2950 | |||
2951 | /* Ack the message. */ | ||
2952 | ESPSTAT(("ack msg, ")); | ||
2953 | esp_cmd(esp, ESP_CMD_MOK); | ||
2954 | |||
2955 | if (esp->erev != fashme) { | ||
2956 | dma_flashclear(esp); | ||
2957 | |||
2958 | /* Wait till the first bits settle. */ | ||
2959 | while (esp->esp_command[0] == 0xff) | ||
2960 | udelay(1); | ||
2961 | } else { | ||
2962 | esp->esp_command[0] = esp->hme_fifo_workaround_buffer[0]; | ||
2963 | esp->esp_command[1] = esp->hme_fifo_workaround_buffer[1]; | ||
2964 | } | ||
2965 | |||
2966 | ESPSTAT(("got something, ")); | ||
2967 | /* ESP chimes in with one of | ||
2968 | * | ||
2969 | * 1) function done interrupt: | ||
2970 | * both status and message in bytes | ||
2971 | * are available | ||
2972 | * | ||
2973 | * 2) bus service interrupt: | ||
2974 | * only status byte was acquired | ||
2975 | * | ||
2976 | * 3) Anything else: | ||
2977 | * can't happen, but we test for it | ||
2978 | * anyways | ||
2979 | * | ||
2980 | * ALSO: If bad parity was detected on either | ||
2981 | * the status _or_ the message byte then | ||
2982 | * the ESP has asserted ATN on the bus | ||
2983 | * and we must therefore wait for the | ||
2984 | * next phase change. | ||
2985 | */ | ||
2986 | if (intr & ESP_INTR_FDONE) { | ||
2987 | /* We got it all, hallejulia. */ | ||
2988 | ESPSTAT(("got both, ")); | ||
2989 | SCptr->SCp.Status = esp->esp_command[0]; | ||
2990 | SCptr->SCp.Message = esp->esp_command[1]; | ||
2991 | esp->prevmsgin = SCptr->SCp.Message; | ||
2992 | esp->cur_msgin[0] = SCptr->SCp.Message; | ||
2993 | if (esp->sreg & ESP_STAT_PERR) { | ||
2994 | /* There was bad parity for the | ||
2995 | * message byte, the status byte | ||
2996 | * was ok. | ||
2997 | */ | ||
2998 | message_out = MSG_PARITY_ERROR; | ||
2999 | } | ||
3000 | } else if (intr == ESP_INTR_BSERV) { | ||
3001 | /* Only got status byte. */ | ||
3002 | ESPLOG(("esp%d: got status only, ", esp->esp_id)); | ||
3003 | if (!(esp->sreg & ESP_STAT_PERR)) { | ||
3004 | SCptr->SCp.Status = esp->esp_command[0]; | ||
3005 | SCptr->SCp.Message = 0xff; | ||
3006 | } else { | ||
3007 | /* The status byte had bad parity. | ||
3008 | * we leave the scsi_pointer Status | ||
3009 | * field alone as we set it to a default | ||
3010 | * of CHECK_CONDITION in esp_queue. | ||
3011 | */ | ||
3012 | message_out = INITIATOR_ERROR; | ||
3013 | } | ||
3014 | } else { | ||
3015 | /* This shouldn't happen ever. */ | ||
3016 | ESPSTAT(("got bolixed\n")); | ||
3017 | esp_advance_phase(SCptr, in_the_dark); | ||
3018 | return esp_do_phase_determine(esp); | ||
3019 | } | ||
3020 | |||
3021 | if (!message_out) { | ||
3022 | ESPSTAT(("status=%2x msg=%2x, ", SCptr->SCp.Status, | ||
3023 | SCptr->SCp.Message)); | ||
3024 | if (SCptr->SCp.Message == COMMAND_COMPLETE) { | ||
3025 | ESPSTAT(("and was COMMAND_COMPLETE\n")); | ||
3026 | esp_advance_phase(SCptr, in_freeing); | ||
3027 | return esp_do_freebus(esp); | ||
3028 | } else { | ||
3029 | ESPLOG(("esp%d: and _not_ COMMAND_COMPLETE\n", | ||
3030 | esp->esp_id)); | ||
3031 | esp->msgin_len = esp->msgin_ctr = 1; | ||
3032 | esp_advance_phase(SCptr, in_msgindone); | ||
3033 | return esp_do_msgindone(esp); | ||
3034 | } | ||
3035 | } else { | ||
3036 | /* With luck we'll be able to let the target | ||
3037 | * know that bad parity happened, it will know | ||
3038 | * which byte caused the problems and send it | ||
3039 | * again. For the case where the status byte | ||
3040 | * receives bad parity, I do not believe most | ||
3041 | * targets recover very well. We'll see. | ||
3042 | */ | ||
3043 | ESPLOG(("esp%d: bad parity somewhere mout=%2x\n", | ||
3044 | esp->esp_id, message_out)); | ||
3045 | esp->cur_msgout[0] = message_out; | ||
3046 | esp->msgout_len = esp->msgout_ctr = 1; | ||
3047 | esp_advance_phase(SCptr, in_the_dark); | ||
3048 | return esp_do_phase_determine(esp); | ||
3049 | } | ||
3050 | } else { | ||
3051 | /* If we disconnect now, all hell breaks loose. */ | ||
3052 | ESPLOG(("esp%d: whoops, disconnect\n", esp->esp_id)); | ||
3053 | esp_advance_phase(SCptr, in_the_dark); | ||
3054 | return esp_do_phase_determine(esp); | ||
3055 | } | ||
3056 | } | ||
3057 | |||
3058 | static int esp_enter_status(struct esp *esp) | ||
3059 | { | ||
3060 | u8 thecmd = ESP_CMD_ICCSEQ; | ||
3061 | |||
3062 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
3063 | if (esp->erev != fashme) { | ||
3064 | u32 tmp; | ||
3065 | |||
3066 | esp->esp_command[0] = esp->esp_command[1] = 0xff; | ||
3067 | sbus_writeb(2, esp->eregs + ESP_TCLOW); | ||
3068 | sbus_writeb(0, esp->eregs + ESP_TCMED); | ||
3069 | tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
3070 | tmp |= (DMA_ST_WRITE | DMA_ENABLE); | ||
3071 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
3072 | if (esp->dma->revision == dvmaesc1) | ||
3073 | sbus_writel(0x100, esp->dregs + DMA_COUNT); | ||
3074 | sbus_writel(esp->esp_command_dvma, esp->dregs + DMA_ADDR); | ||
3075 | thecmd |= ESP_CMD_DMA; | ||
3076 | } | ||
3077 | esp_cmd(esp, thecmd); | ||
3078 | esp_advance_phase(esp->current_SC, in_status); | ||
3079 | |||
3080 | return esp_do_status(esp); | ||
3081 | } | ||
3082 | |||
3083 | static int esp_disconnect_amidst_phases(struct esp *esp) | ||
3084 | { | ||
3085 | struct scsi_cmnd *sp = esp->current_SC; | ||
3086 | struct esp_device *esp_dev = sp->device->hostdata; | ||
3087 | |||
3088 | /* This means real problems if we see this | ||
3089 | * here. Unless we were actually trying | ||
3090 | * to force the device to abort/reset. | ||
3091 | */ | ||
3092 | ESPLOG(("esp%d Disconnect amidst phases, ", esp->esp_id)); | ||
3093 | ESPLOG(("pphase<%s> cphase<%s>, ", | ||
3094 | phase_string(sp->SCp.phase), | ||
3095 | phase_string(sp->SCp.sent_command))); | ||
3096 | |||
3097 | if (esp->disconnected_SC != NULL || (esp->erev == fashme)) | ||
3098 | esp_cmd(esp, ESP_CMD_ESEL); | ||
3099 | |||
3100 | switch (esp->cur_msgout[0]) { | ||
3101 | default: | ||
3102 | /* We didn't expect this to happen at all. */ | ||
3103 | ESPLOG(("device is bolixed\n")); | ||
3104 | esp_advance_phase(sp, in_tgterror); | ||
3105 | esp_done(esp, (DID_ERROR << 16)); | ||
3106 | break; | ||
3107 | |||
3108 | case BUS_DEVICE_RESET: | ||
3109 | ESPLOG(("device reset successful\n")); | ||
3110 | esp_dev->sync_max_offset = 0; | ||
3111 | esp_dev->sync_min_period = 0; | ||
3112 | esp_dev->sync = 0; | ||
3113 | esp_advance_phase(sp, in_resetdev); | ||
3114 | esp_done(esp, (DID_RESET << 16)); | ||
3115 | break; | ||
3116 | |||
3117 | case ABORT: | ||
3118 | ESPLOG(("device abort successful\n")); | ||
3119 | esp_advance_phase(sp, in_abortone); | ||
3120 | esp_done(esp, (DID_ABORT << 16)); | ||
3121 | break; | ||
3122 | |||
3123 | }; | ||
3124 | return do_intr_end; | ||
3125 | } | ||
3126 | |||
3127 | static int esp_enter_msgout(struct esp *esp) | ||
3128 | { | ||
3129 | esp_advance_phase(esp->current_SC, in_msgout); | ||
3130 | return esp_do_msgout(esp); | ||
3131 | } | ||
3132 | |||
3133 | static int esp_enter_msgin(struct esp *esp) | ||
3134 | { | ||
3135 | esp_advance_phase(esp->current_SC, in_msgin); | ||
3136 | return esp_do_msgin(esp); | ||
3137 | } | ||
3138 | |||
3139 | static int esp_enter_cmd(struct esp *esp) | ||
3140 | { | ||
3141 | esp_advance_phase(esp->current_SC, in_cmdbegin); | ||
3142 | return esp_do_cmdbegin(esp); | ||
3143 | } | ||
3144 | |||
3145 | static int esp_enter_badphase(struct esp *esp) | ||
3146 | { | ||
3147 | ESPLOG(("esp%d: Bizarre bus phase %2x.\n", esp->esp_id, | ||
3148 | esp->sreg & ESP_STAT_PMASK)); | ||
3149 | return do_reset_bus; | ||
3150 | } | ||
3151 | |||
3152 | typedef int (*espfunc_t)(struct esp *); | ||
3153 | |||
3154 | static espfunc_t phase_vector[] = { | ||
3155 | esp_do_data, /* ESP_DOP */ | ||
3156 | esp_do_data, /* ESP_DIP */ | ||
3157 | esp_enter_cmd, /* ESP_CMDP */ | ||
3158 | esp_enter_status, /* ESP_STATP */ | ||
3159 | esp_enter_badphase, /* ESP_STAT_PMSG */ | ||
3160 | esp_enter_badphase, /* ESP_STAT_PMSG | ESP_STAT_PIO */ | ||
3161 | esp_enter_msgout, /* ESP_MOP */ | ||
3162 | esp_enter_msgin, /* ESP_MIP */ | ||
3163 | }; | ||
3164 | |||
3165 | /* The target has control of the bus and we have to see where it has | ||
3166 | * taken us. | ||
3167 | */ | ||
3168 | static int esp_do_phase_determine(struct esp *esp) | ||
3169 | { | ||
3170 | if ((esp->ireg & ESP_INTR_DC) != 0) | ||
3171 | return esp_disconnect_amidst_phases(esp); | ||
3172 | return phase_vector[esp->sreg & ESP_STAT_PMASK](esp); | ||
3173 | } | ||
3174 | |||
3175 | /* First interrupt after exec'ing a cmd comes here. */ | ||
3176 | static int esp_select_complete(struct esp *esp) | ||
3177 | { | ||
3178 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
3179 | struct esp_device *esp_dev = SCptr->device->hostdata; | ||
3180 | int cmd_bytes_sent, fcnt; | ||
3181 | |||
3182 | if (esp->erev != fashme) | ||
3183 | esp->seqreg = (sbus_readb(esp->eregs + ESP_SSTEP) & ESP_STEP_VBITS); | ||
3184 | |||
3185 | if (esp->erev == fashme) | ||
3186 | fcnt = esp->hme_fifo_workaround_count; | ||
3187 | else | ||
3188 | fcnt = (sbus_readb(esp->eregs + ESP_FFLAGS) & ESP_FF_FBYTES); | ||
3189 | |||
3190 | cmd_bytes_sent = esp_bytes_sent(esp, fcnt); | ||
3191 | dma_invalidate(esp); | ||
3192 | |||
3193 | /* Let's check to see if a reselect happened | ||
3194 | * while we we're trying to select. This must | ||
3195 | * be checked first. | ||
3196 | */ | ||
3197 | if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) { | ||
3198 | esp_reconnect(esp, SCptr); | ||
3199 | return esp_do_reconnect(esp); | ||
3200 | } | ||
3201 | |||
3202 | /* Looks like things worked, we should see a bus service & | ||
3203 | * a function complete interrupt at this point. Note we | ||
3204 | * are doing a direct comparison because we don't want to | ||
3205 | * be fooled into thinking selection was successful if | ||
3206 | * ESP_INTR_DC is set, see below. | ||
3207 | */ | ||
3208 | if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) { | ||
3209 | /* target speaks... */ | ||
3210 | esp->targets_present |= (1<<SCptr->device->id); | ||
3211 | |||
3212 | /* What if the target ignores the sdtr? */ | ||
3213 | if (esp->snip) | ||
3214 | esp_dev->sync = 1; | ||
3215 | |||
3216 | /* See how far, if at all, we got in getting | ||
3217 | * the information out to the target. | ||
3218 | */ | ||
3219 | switch (esp->seqreg) { | ||
3220 | default: | ||
3221 | |||
3222 | case ESP_STEP_ASEL: | ||
3223 | /* Arbitration won, target selected, but | ||
3224 | * we are in some phase which is not command | ||
3225 | * phase nor is it message out phase. | ||
3226 | * | ||
3227 | * XXX We've confused the target, obviously. | ||
3228 | * XXX So clear it's state, but we also end | ||
3229 | * XXX up clearing everyone elses. That isn't | ||
3230 | * XXX so nice. I'd like to just reset this | ||
3231 | * XXX target, but if I cannot even get it's | ||
3232 | * XXX attention and finish selection to talk | ||
3233 | * XXX to it, there is not much more I can do. | ||
3234 | * XXX If we have a loaded bus we're going to | ||
3235 | * XXX spend the next second or so renegotiating | ||
3236 | * XXX for synchronous transfers. | ||
3237 | */ | ||
3238 | ESPLOG(("esp%d: STEP_ASEL for tgt %d\n", | ||
3239 | esp->esp_id, SCptr->device->id)); | ||
3240 | |||
3241 | case ESP_STEP_SID: | ||
3242 | /* Arbitration won, target selected, went | ||
3243 | * to message out phase, sent one message | ||
3244 | * byte, then we stopped. ATN is asserted | ||
3245 | * on the SCSI bus and the target is still | ||
3246 | * there hanging on. This is a legal | ||
3247 | * sequence step if we gave the ESP a select | ||
3248 | * and stop command. | ||
3249 | * | ||
3250 | * XXX See above, I could set the borken flag | ||
3251 | * XXX in the device struct and retry the | ||
3252 | * XXX command. But would that help for | ||
3253 | * XXX tagged capable targets? | ||
3254 | */ | ||
3255 | |||
3256 | case ESP_STEP_NCMD: | ||
3257 | /* Arbitration won, target selected, maybe | ||
3258 | * sent the one message byte in message out | ||
3259 | * phase, but we did not go to command phase | ||
3260 | * in the end. Actually, we could have sent | ||
3261 | * only some of the message bytes if we tried | ||
3262 | * to send out the entire identify and tag | ||
3263 | * message using ESP_CMD_SA3. | ||
3264 | */ | ||
3265 | cmd_bytes_sent = 0; | ||
3266 | break; | ||
3267 | |||
3268 | case ESP_STEP_PPC: | ||
3269 | /* No, not the powerPC pinhead. Arbitration | ||
3270 | * won, all message bytes sent if we went to | ||
3271 | * message out phase, went to command phase | ||
3272 | * but only part of the command was sent. | ||
3273 | * | ||
3274 | * XXX I've seen this, but usually in conjunction | ||
3275 | * XXX with a gross error which appears to have | ||
3276 | * XXX occurred between the time I told the | ||
3277 | * XXX ESP to arbitrate and when I got the | ||
3278 | * XXX interrupt. Could I have misloaded the | ||
3279 | * XXX command bytes into the fifo? Actually, | ||
3280 | * XXX I most likely missed a phase, and therefore | ||
3281 | * XXX went into never never land and didn't even | ||
3282 | * XXX know it. That was the old driver though. | ||
3283 | * XXX What is even more peculiar is that the ESP | ||
3284 | * XXX showed the proper function complete and | ||
3285 | * XXX bus service bits in the interrupt register. | ||
3286 | */ | ||
3287 | |||
3288 | case ESP_STEP_FINI4: | ||
3289 | case ESP_STEP_FINI5: | ||
3290 | case ESP_STEP_FINI6: | ||
3291 | case ESP_STEP_FINI7: | ||
3292 | /* Account for the identify message */ | ||
3293 | if (SCptr->SCp.phase == in_slct_norm) | ||
3294 | cmd_bytes_sent -= 1; | ||
3295 | }; | ||
3296 | |||
3297 | if (esp->erev != fashme) | ||
3298 | esp_cmd(esp, ESP_CMD_NULL); | ||
3299 | |||
3300 | /* Be careful, we could really get fucked during synchronous | ||
3301 | * data transfers if we try to flush the fifo now. | ||
3302 | */ | ||
3303 | if ((esp->erev != fashme) && /* not a Happy Meal and... */ | ||
3304 | !fcnt && /* Fifo is empty and... */ | ||
3305 | /* either we are not doing synchronous transfers or... */ | ||
3306 | (!esp_dev->sync_max_offset || | ||
3307 | /* We are not going into data in phase. */ | ||
3308 | ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP))) | ||
3309 | esp_cmd(esp, ESP_CMD_FLUSH); /* flush is safe */ | ||
3310 | |||
3311 | /* See how far we got if this is not a slow command. */ | ||
3312 | if (!esp->esp_slowcmd) { | ||
3313 | if (cmd_bytes_sent < 0) | ||
3314 | cmd_bytes_sent = 0; | ||
3315 | if (cmd_bytes_sent != SCptr->cmd_len) { | ||
3316 | /* Crapola, mark it as a slowcmd | ||
3317 | * so that we have some chance of | ||
3318 | * keeping the command alive with | ||
3319 | * good luck. | ||
3320 | * | ||
3321 | * XXX Actually, if we didn't send it all | ||
3322 | * XXX this means either we didn't set things | ||
3323 | * XXX up properly (driver bug) or the target | ||
3324 | * XXX or the ESP detected parity on one of | ||
3325 | * XXX the command bytes. This makes much | ||
3326 | * XXX more sense, and therefore this code | ||
3327 | * XXX should be changed to send out a | ||
3328 | * XXX parity error message or if the status | ||
3329 | * XXX register shows no parity error then | ||
3330 | * XXX just expect the target to bring the | ||
3331 | * XXX bus into message in phase so that it | ||
3332 | * XXX can send us the parity error message. | ||
3333 | * XXX SCSI sucks... | ||
3334 | */ | ||
3335 | esp->esp_slowcmd = 1; | ||
3336 | esp->esp_scmdp = &(SCptr->cmnd[cmd_bytes_sent]); | ||
3337 | esp->esp_scmdleft = (SCptr->cmd_len - cmd_bytes_sent); | ||
3338 | } | ||
3339 | } | ||
3340 | |||
3341 | /* Now figure out where we went. */ | ||
3342 | esp_advance_phase(SCptr, in_the_dark); | ||
3343 | return esp_do_phase_determine(esp); | ||
3344 | } | ||
3345 | |||
3346 | /* Did the target even make it? */ | ||
3347 | if (esp->ireg == ESP_INTR_DC) { | ||
3348 | /* wheee... nobody there or they didn't like | ||
3349 | * what we told it to do, clean up. | ||
3350 | */ | ||
3351 | |||
3352 | /* If anyone is off the bus, but working on | ||
3353 | * a command in the background for us, tell | ||
3354 | * the ESP to listen for them. | ||
3355 | */ | ||
3356 | if (esp->disconnected_SC) | ||
3357 | esp_cmd(esp, ESP_CMD_ESEL); | ||
3358 | |||
3359 | if (((1<<SCptr->device->id) & esp->targets_present) && | ||
3360 | esp->seqreg != 0 && | ||
3361 | (esp->cur_msgout[0] == EXTENDED_MESSAGE) && | ||
3362 | (SCptr->SCp.phase == in_slct_msg || | ||
3363 | SCptr->SCp.phase == in_slct_stop)) { | ||
3364 | /* shit */ | ||
3365 | esp->snip = 0; | ||
3366 | ESPLOG(("esp%d: Failed synchronous negotiation for target %d " | ||
3367 | "lun %d\n", esp->esp_id, SCptr->device->id, SCptr->device->lun)); | ||
3368 | esp_dev->sync_max_offset = 0; | ||
3369 | esp_dev->sync_min_period = 0; | ||
3370 | esp_dev->sync = 1; /* so we don't negotiate again */ | ||
3371 | |||
3372 | /* Run the command again, this time though we | ||
3373 | * won't try to negotiate for synchronous transfers. | ||
3374 | * | ||
3375 | * XXX I'd like to do something like send an | ||
3376 | * XXX INITIATOR_ERROR or ABORT message to the | ||
3377 | * XXX target to tell it, "Sorry I confused you, | ||
3378 | * XXX please come back and I will be nicer next | ||
3379 | * XXX time". But that requires having the target | ||
3380 | * XXX on the bus, and it has dropped BSY on us. | ||
3381 | */ | ||
3382 | esp->current_SC = NULL; | ||
3383 | esp_advance_phase(SCptr, not_issued); | ||
3384 | prepend_SC(&esp->issue_SC, SCptr); | ||
3385 | esp_exec_cmd(esp); | ||
3386 | return do_intr_end; | ||
3387 | } | ||
3388 | |||
3389 | /* Ok, this is normal, this is what we see during boot | ||
3390 | * or whenever when we are scanning the bus for targets. | ||
3391 | * But first make sure that is really what is happening. | ||
3392 | */ | ||
3393 | if (((1<<SCptr->device->id) & esp->targets_present)) { | ||
3394 | ESPLOG(("esp%d: Warning, live target %d not responding to " | ||
3395 | "selection.\n", esp->esp_id, SCptr->device->id)); | ||
3396 | |||
3397 | /* This _CAN_ happen. The SCSI standard states that | ||
3398 | * the target is to _not_ respond to selection if | ||
3399 | * _it_ detects bad parity on the bus for any reason. | ||
3400 | * Therefore, we assume that if we've talked successfully | ||
3401 | * to this target before, bad parity is the problem. | ||
3402 | */ | ||
3403 | esp_done(esp, (DID_PARITY << 16)); | ||
3404 | } else { | ||
3405 | /* Else, there really isn't anyone there. */ | ||
3406 | ESPMISC(("esp: selection failure, maybe nobody there?\n")); | ||
3407 | ESPMISC(("esp: target %d lun %d\n", | ||
3408 | SCptr->device->id, SCptr->device->lun)); | ||
3409 | esp_done(esp, (DID_BAD_TARGET << 16)); | ||
3410 | } | ||
3411 | return do_intr_end; | ||
3412 | } | ||
3413 | |||
3414 | ESPLOG(("esp%d: Selection failure.\n", esp->esp_id)); | ||
3415 | printk("esp%d: Currently -- ", esp->esp_id); | ||
3416 | esp_print_ireg(esp->ireg); printk(" "); | ||
3417 | esp_print_statreg(esp->sreg); printk(" "); | ||
3418 | esp_print_seqreg(esp->seqreg); printk("\n"); | ||
3419 | printk("esp%d: New -- ", esp->esp_id); | ||
3420 | esp->sreg = sbus_readb(esp->eregs + ESP_STATUS); | ||
3421 | esp->seqreg = sbus_readb(esp->eregs + ESP_SSTEP); | ||
3422 | esp->ireg = sbus_readb(esp->eregs + ESP_INTRPT); | ||
3423 | esp_print_ireg(esp->ireg); printk(" "); | ||
3424 | esp_print_statreg(esp->sreg); printk(" "); | ||
3425 | esp_print_seqreg(esp->seqreg); printk("\n"); | ||
3426 | ESPLOG(("esp%d: resetting bus\n", esp->esp_id)); | ||
3427 | return do_reset_bus; /* ugh... */ | ||
3428 | } | ||
3429 | |||
3430 | /* Continue reading bytes for msgin phase. */ | ||
3431 | static int esp_do_msgincont(struct esp *esp) | ||
3432 | { | ||
3433 | if (esp->ireg & ESP_INTR_BSERV) { | ||
3434 | /* in the right phase too? */ | ||
3435 | if ((esp->sreg & ESP_STAT_PMASK) == ESP_MIP) { | ||
3436 | /* phew... */ | ||
3437 | esp_cmd(esp, ESP_CMD_TI); | ||
3438 | esp_advance_phase(esp->current_SC, in_msgindone); | ||
3439 | return do_intr_end; | ||
3440 | } | ||
3441 | |||
3442 | /* We changed phase but ESP shows bus service, | ||
3443 | * in this case it is most likely that we, the | ||
3444 | * hacker who has been up for 20hrs straight | ||
3445 | * staring at the screen, drowned in coffee | ||
3446 | * smelling like retched cigarette ashes | ||
3447 | * have miscoded something..... so, try to | ||
3448 | * recover as best we can. | ||
3449 | */ | ||
3450 | ESPLOG(("esp%d: message in mis-carriage.\n", esp->esp_id)); | ||
3451 | } | ||
3452 | esp_advance_phase(esp->current_SC, in_the_dark); | ||
3453 | return do_phase_determine; | ||
3454 | } | ||
3455 | |||
3456 | static int check_singlebyte_msg(struct esp *esp) | ||
3457 | { | ||
3458 | esp->prevmsgin = esp->cur_msgin[0]; | ||
3459 | if (esp->cur_msgin[0] & 0x80) { | ||
3460 | /* wheee... */ | ||
3461 | ESPLOG(("esp%d: target sends identify amidst phases\n", | ||
3462 | esp->esp_id)); | ||
3463 | esp_advance_phase(esp->current_SC, in_the_dark); | ||
3464 | return 0; | ||
3465 | } else if (((esp->cur_msgin[0] & 0xf0) == 0x20) || | ||
3466 | (esp->cur_msgin[0] == EXTENDED_MESSAGE)) { | ||
3467 | esp->msgin_len = 2; | ||
3468 | esp_advance_phase(esp->current_SC, in_msgincont); | ||
3469 | return 0; | ||
3470 | } | ||
3471 | esp_advance_phase(esp->current_SC, in_the_dark); | ||
3472 | switch (esp->cur_msgin[0]) { | ||
3473 | default: | ||
3474 | /* We don't want to hear about it. */ | ||
3475 | ESPLOG(("esp%d: msg %02x which we don't know about\n", esp->esp_id, | ||
3476 | esp->cur_msgin[0])); | ||
3477 | return MESSAGE_REJECT; | ||
3478 | |||
3479 | case NOP: | ||
3480 | ESPLOG(("esp%d: target %d sends a nop\n", esp->esp_id, | ||
3481 | esp->current_SC->device->id)); | ||
3482 | return 0; | ||
3483 | |||
3484 | case RESTORE_POINTERS: | ||
3485 | /* In this case we might also have to backup the | ||
3486 | * "slow command" pointer. It is rare to get such | ||
3487 | * a save/restore pointer sequence so early in the | ||
3488 | * bus transition sequences, but cover it. | ||
3489 | */ | ||
3490 | if (esp->esp_slowcmd) { | ||
3491 | esp->esp_scmdleft = esp->current_SC->cmd_len; | ||
3492 | esp->esp_scmdp = &esp->current_SC->cmnd[0]; | ||
3493 | } | ||
3494 | esp_restore_pointers(esp, esp->current_SC); | ||
3495 | return 0; | ||
3496 | |||
3497 | case SAVE_POINTERS: | ||
3498 | esp_save_pointers(esp, esp->current_SC); | ||
3499 | return 0; | ||
3500 | |||
3501 | case COMMAND_COMPLETE: | ||
3502 | case DISCONNECT: | ||
3503 | /* Freeing the bus, let it go. */ | ||
3504 | esp->current_SC->SCp.phase = in_freeing; | ||
3505 | return 0; | ||
3506 | |||
3507 | case MESSAGE_REJECT: | ||
3508 | ESPMISC(("msg reject, ")); | ||
3509 | if (esp->prevmsgout == EXTENDED_MESSAGE) { | ||
3510 | struct esp_device *esp_dev = esp->current_SC->device->hostdata; | ||
3511 | |||
3512 | /* Doesn't look like this target can | ||
3513 | * do synchronous or WIDE transfers. | ||
3514 | */ | ||
3515 | ESPSDTR(("got reject, was trying nego, clearing sync/WIDE\n")); | ||
3516 | esp_dev->sync = 1; | ||
3517 | esp_dev->wide = 1; | ||
3518 | esp_dev->sync_min_period = 0; | ||
3519 | esp_dev->sync_max_offset = 0; | ||
3520 | return 0; | ||
3521 | } else { | ||
3522 | ESPMISC(("not sync nego, sending ABORT\n")); | ||
3523 | return ABORT; | ||
3524 | } | ||
3525 | }; | ||
3526 | } | ||
3527 | |||
3528 | /* Target negotiates for synchronous transfers before we do, this | ||
3529 | * is legal although very strange. What is even funnier is that | ||
3530 | * the SCSI2 standard specifically recommends against targets doing | ||
3531 | * this because so many initiators cannot cope with this occurring. | ||
3532 | */ | ||
3533 | static int target_with_ants_in_pants(struct esp *esp, | ||
3534 | struct scsi_cmnd *SCptr, | ||
3535 | struct esp_device *esp_dev) | ||
3536 | { | ||
3537 | if (esp_dev->sync || SCptr->device->borken) { | ||
3538 | /* sorry, no can do */ | ||
3539 | ESPSDTR(("forcing to async, ")); | ||
3540 | build_sync_nego_msg(esp, 0, 0); | ||
3541 | esp_dev->sync = 1; | ||
3542 | esp->snip = 1; | ||
3543 | ESPLOG(("esp%d: hoping for msgout\n", esp->esp_id)); | ||
3544 | esp_advance_phase(SCptr, in_the_dark); | ||
3545 | return EXTENDED_MESSAGE; | ||
3546 | } | ||
3547 | |||
3548 | /* Ok, we'll check them out... */ | ||
3549 | return 0; | ||
3550 | } | ||
3551 | |||
3552 | static void sync_report(struct esp *esp) | ||
3553 | { | ||
3554 | int msg3, msg4; | ||
3555 | char *type; | ||
3556 | |||
3557 | msg3 = esp->cur_msgin[3]; | ||
3558 | msg4 = esp->cur_msgin[4]; | ||
3559 | if (msg4) { | ||
3560 | int hz = 1000000000 / (msg3 * 4); | ||
3561 | int integer = hz / 1000000; | ||
3562 | int fraction = (hz - (integer * 1000000)) / 10000; | ||
3563 | if ((esp->erev == fashme) && | ||
3564 | (esp->config3[esp->current_SC->device->id] & ESP_CONFIG3_EWIDE)) { | ||
3565 | type = "FAST-WIDE"; | ||
3566 | integer <<= 1; | ||
3567 | fraction <<= 1; | ||
3568 | } else if ((msg3 * 4) < 200) { | ||
3569 | type = "FAST"; | ||
3570 | } else { | ||
3571 | type = "synchronous"; | ||
3572 | } | ||
3573 | |||
3574 | /* Do not transform this back into one big printk | ||
3575 | * again, it triggers a bug in our sparc64-gcc272 | ||
3576 | * sibling call optimization. -DaveM | ||
3577 | */ | ||
3578 | ESPLOG((KERN_INFO "esp%d: target %d ", | ||
3579 | esp->esp_id, esp->current_SC->device->id)); | ||
3580 | ESPLOG(("[period %dns offset %d %d.%02dMHz ", | ||
3581 | (int) msg3 * 4, (int) msg4, | ||
3582 | integer, fraction)); | ||
3583 | ESPLOG(("%s SCSI%s]\n", type, | ||
3584 | (((msg3 * 4) < 200) ? "-II" : ""))); | ||
3585 | } else { | ||
3586 | ESPLOG((KERN_INFO "esp%d: target %d asynchronous\n", | ||
3587 | esp->esp_id, esp->current_SC->device->id)); | ||
3588 | } | ||
3589 | } | ||
3590 | |||
3591 | static int check_multibyte_msg(struct esp *esp) | ||
3592 | { | ||
3593 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
3594 | struct esp_device *esp_dev = SCptr->device->hostdata; | ||
3595 | u8 regval = 0; | ||
3596 | int message_out = 0; | ||
3597 | |||
3598 | ESPSDTR(("chk multibyte msg: ")); | ||
3599 | if (esp->cur_msgin[2] == EXTENDED_SDTR) { | ||
3600 | int period = esp->cur_msgin[3]; | ||
3601 | int offset = esp->cur_msgin[4]; | ||
3602 | |||
3603 | ESPSDTR(("is sync nego response, ")); | ||
3604 | if (!esp->snip) { | ||
3605 | int rval; | ||
3606 | |||
3607 | /* Target negotiates first! */ | ||
3608 | ESPSDTR(("target jumps the gun, ")); | ||
3609 | message_out = EXTENDED_MESSAGE; /* we must respond */ | ||
3610 | rval = target_with_ants_in_pants(esp, SCptr, esp_dev); | ||
3611 | if (rval) | ||
3612 | return rval; | ||
3613 | } | ||
3614 | |||
3615 | ESPSDTR(("examining sdtr, ")); | ||
3616 | |||
3617 | /* Offset cannot be larger than ESP fifo size. */ | ||
3618 | if (offset > 15) { | ||
3619 | ESPSDTR(("offset too big %2x, ", offset)); | ||
3620 | offset = 15; | ||
3621 | ESPSDTR(("sending back new offset\n")); | ||
3622 | build_sync_nego_msg(esp, period, offset); | ||
3623 | return EXTENDED_MESSAGE; | ||
3624 | } | ||
3625 | |||
3626 | if (offset && period > esp->max_period) { | ||
3627 | /* Yeee, async for this slow device. */ | ||
3628 | ESPSDTR(("period too long %2x, ", period)); | ||
3629 | build_sync_nego_msg(esp, 0, 0); | ||
3630 | ESPSDTR(("hoping for msgout\n")); | ||
3631 | esp_advance_phase(esp->current_SC, in_the_dark); | ||
3632 | return EXTENDED_MESSAGE; | ||
3633 | } else if (offset && period < esp->min_period) { | ||
3634 | ESPSDTR(("period too short %2x, ", period)); | ||
3635 | period = esp->min_period; | ||
3636 | if (esp->erev > esp236) | ||
3637 | regval = 4; | ||
3638 | else | ||
3639 | regval = 5; | ||
3640 | } else if (offset) { | ||
3641 | int tmp; | ||
3642 | |||
3643 | ESPSDTR(("period is ok, ")); | ||
3644 | tmp = esp->ccycle / 1000; | ||
3645 | regval = (((period << 2) + tmp - 1) / tmp); | ||
3646 | if (regval && ((esp->erev == fas100a || | ||
3647 | esp->erev == fas236 || | ||
3648 | esp->erev == fashme))) { | ||
3649 | if (period >= 50) | ||
3650 | regval--; | ||
3651 | } | ||
3652 | } | ||
3653 | |||
3654 | if (offset) { | ||
3655 | u8 bit; | ||
3656 | |||
3657 | esp_dev->sync_min_period = (regval & 0x1f); | ||
3658 | esp_dev->sync_max_offset = (offset | esp->radelay); | ||
3659 | if (esp->erev == fas100a || esp->erev == fas236 || esp->erev == fashme) { | ||
3660 | if ((esp->erev == fas100a) || (esp->erev == fashme)) | ||
3661 | bit = ESP_CONFIG3_FAST; | ||
3662 | else | ||
3663 | bit = ESP_CONFIG3_FSCSI; | ||
3664 | if (period < 50) { | ||
3665 | /* On FAS366, if using fast-20 synchronous transfers | ||
3666 | * we need to make sure the REQ/ACK assert/deassert | ||
3667 | * control bits are clear. | ||
3668 | */ | ||
3669 | if (esp->erev == fashme) | ||
3670 | esp_dev->sync_max_offset &= ~esp->radelay; | ||
3671 | esp->config3[SCptr->device->id] |= bit; | ||
3672 | } else { | ||
3673 | esp->config3[SCptr->device->id] &= ~bit; | ||
3674 | } | ||
3675 | esp->prev_cfg3 = esp->config3[SCptr->device->id]; | ||
3676 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
3677 | } | ||
3678 | esp->prev_soff = esp_dev->sync_max_offset; | ||
3679 | esp->prev_stp = esp_dev->sync_min_period; | ||
3680 | sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF); | ||
3681 | sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP); | ||
3682 | ESPSDTR(("soff=%2x stp=%2x cfg3=%2x\n", | ||
3683 | esp_dev->sync_max_offset, | ||
3684 | esp_dev->sync_min_period, | ||
3685 | esp->config3[SCptr->device->id])); | ||
3686 | |||
3687 | esp->snip = 0; | ||
3688 | } else if (esp_dev->sync_max_offset) { | ||
3689 | u8 bit; | ||
3690 | |||
3691 | /* back to async mode */ | ||
3692 | ESPSDTR(("unaccaptable sync nego, forcing async\n")); | ||
3693 | esp_dev->sync_max_offset = 0; | ||
3694 | esp_dev->sync_min_period = 0; | ||
3695 | esp->prev_soff = 0; | ||
3696 | esp->prev_stp = 0; | ||
3697 | sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF); | ||
3698 | sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP); | ||
3699 | if (esp->erev == fas100a || esp->erev == fas236 || esp->erev == fashme) { | ||
3700 | if ((esp->erev == fas100a) || (esp->erev == fashme)) | ||
3701 | bit = ESP_CONFIG3_FAST; | ||
3702 | else | ||
3703 | bit = ESP_CONFIG3_FSCSI; | ||
3704 | esp->config3[SCptr->device->id] &= ~bit; | ||
3705 | esp->prev_cfg3 = esp->config3[SCptr->device->id]; | ||
3706 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
3707 | } | ||
3708 | } | ||
3709 | |||
3710 | sync_report(esp); | ||
3711 | |||
3712 | ESPSDTR(("chk multibyte msg: sync is known, ")); | ||
3713 | esp_dev->sync = 1; | ||
3714 | |||
3715 | if (message_out) { | ||
3716 | ESPLOG(("esp%d: sending sdtr back, hoping for msgout\n", | ||
3717 | esp->esp_id)); | ||
3718 | build_sync_nego_msg(esp, period, offset); | ||
3719 | esp_advance_phase(SCptr, in_the_dark); | ||
3720 | return EXTENDED_MESSAGE; | ||
3721 | } | ||
3722 | |||
3723 | ESPSDTR(("returning zero\n")); | ||
3724 | esp_advance_phase(SCptr, in_the_dark); /* ...or else! */ | ||
3725 | return 0; | ||
3726 | } else if (esp->cur_msgin[2] == EXTENDED_WDTR) { | ||
3727 | int size = 8 << esp->cur_msgin[3]; | ||
3728 | |||
3729 | esp->wnip = 0; | ||
3730 | if (esp->erev != fashme) { | ||
3731 | ESPLOG(("esp%d: AIEEE wide msg received and not HME.\n", | ||
3732 | esp->esp_id)); | ||
3733 | message_out = MESSAGE_REJECT; | ||
3734 | } else if (size > 16) { | ||
3735 | ESPLOG(("esp%d: AIEEE wide transfer for %d size " | ||
3736 | "not supported.\n", esp->esp_id, size)); | ||
3737 | message_out = MESSAGE_REJECT; | ||
3738 | } else { | ||
3739 | /* Things look good; let's see what we got. */ | ||
3740 | if (size == 16) { | ||
3741 | /* Set config 3 register for this target. */ | ||
3742 | esp->config3[SCptr->device->id] |= ESP_CONFIG3_EWIDE; | ||
3743 | } else { | ||
3744 | /* Just make sure it was one byte sized. */ | ||
3745 | if (size != 8) { | ||
3746 | ESPLOG(("esp%d: Aieee, wide nego of %d size.\n", | ||
3747 | esp->esp_id, size)); | ||
3748 | message_out = MESSAGE_REJECT; | ||
3749 | goto finish; | ||
3750 | } | ||
3751 | /* Pure paranoia. */ | ||
3752 | esp->config3[SCptr->device->id] &= ~(ESP_CONFIG3_EWIDE); | ||
3753 | } | ||
3754 | esp->prev_cfg3 = esp->config3[SCptr->device->id]; | ||
3755 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
3756 | |||
3757 | /* Regardless, next try for sync transfers. */ | ||
3758 | build_sync_nego_msg(esp, esp->sync_defp, 15); | ||
3759 | esp_dev->sync = 1; | ||
3760 | esp->snip = 1; | ||
3761 | message_out = EXTENDED_MESSAGE; | ||
3762 | } | ||
3763 | } else if (esp->cur_msgin[2] == EXTENDED_MODIFY_DATA_POINTER) { | ||
3764 | ESPLOG(("esp%d: rejecting modify data ptr msg\n", esp->esp_id)); | ||
3765 | message_out = MESSAGE_REJECT; | ||
3766 | } | ||
3767 | finish: | ||
3768 | esp_advance_phase(SCptr, in_the_dark); | ||
3769 | return message_out; | ||
3770 | } | ||
3771 | |||
3772 | static int esp_do_msgindone(struct esp *esp) | ||
3773 | { | ||
3774 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
3775 | int message_out = 0, it = 0, rval; | ||
3776 | |||
3777 | rval = skipahead1(esp, SCptr, in_msgin, in_msgindone); | ||
3778 | if (rval) | ||
3779 | return rval; | ||
3780 | if (SCptr->SCp.sent_command != in_status) { | ||
3781 | if (!(esp->ireg & ESP_INTR_DC)) { | ||
3782 | if (esp->msgin_len && (esp->sreg & ESP_STAT_PERR)) { | ||
3783 | message_out = MSG_PARITY_ERROR; | ||
3784 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
3785 | } else if (esp->erev != fashme && | ||
3786 | (it = (sbus_readb(esp->eregs + ESP_FFLAGS) & ESP_FF_FBYTES)) != 1) { | ||
3787 | /* We certainly dropped the ball somewhere. */ | ||
3788 | message_out = INITIATOR_ERROR; | ||
3789 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
3790 | } else if (!esp->msgin_len) { | ||
3791 | if (esp->erev == fashme) | ||
3792 | it = esp->hme_fifo_workaround_buffer[0]; | ||
3793 | else | ||
3794 | it = sbus_readb(esp->eregs + ESP_FDATA); | ||
3795 | esp_advance_phase(SCptr, in_msgincont); | ||
3796 | } else { | ||
3797 | /* it is ok and we want it */ | ||
3798 | if (esp->erev == fashme) | ||
3799 | it = esp->cur_msgin[esp->msgin_ctr] = | ||
3800 | esp->hme_fifo_workaround_buffer[0]; | ||
3801 | else | ||
3802 | it = esp->cur_msgin[esp->msgin_ctr] = | ||
3803 | sbus_readb(esp->eregs + ESP_FDATA); | ||
3804 | esp->msgin_ctr++; | ||
3805 | } | ||
3806 | } else { | ||
3807 | esp_advance_phase(SCptr, in_the_dark); | ||
3808 | return do_work_bus; | ||
3809 | } | ||
3810 | } else { | ||
3811 | it = esp->cur_msgin[0]; | ||
3812 | } | ||
3813 | if (!message_out && esp->msgin_len) { | ||
3814 | if (esp->msgin_ctr < esp->msgin_len) { | ||
3815 | esp_advance_phase(SCptr, in_msgincont); | ||
3816 | } else if (esp->msgin_len == 1) { | ||
3817 | message_out = check_singlebyte_msg(esp); | ||
3818 | } else if (esp->msgin_len == 2) { | ||
3819 | if (esp->cur_msgin[0] == EXTENDED_MESSAGE) { | ||
3820 | if ((it + 2) >= 15) { | ||
3821 | message_out = MESSAGE_REJECT; | ||
3822 | } else { | ||
3823 | esp->msgin_len = (it + 2); | ||
3824 | esp_advance_phase(SCptr, in_msgincont); | ||
3825 | } | ||
3826 | } else { | ||
3827 | message_out = MESSAGE_REJECT; /* foo on you */ | ||
3828 | } | ||
3829 | } else { | ||
3830 | message_out = check_multibyte_msg(esp); | ||
3831 | } | ||
3832 | } | ||
3833 | if (message_out < 0) { | ||
3834 | return -message_out; | ||
3835 | } else if (message_out) { | ||
3836 | if (((message_out != 1) && | ||
3837 | ((message_out < 0x20) || (message_out & 0x80)))) | ||
3838 | esp->msgout_len = 1; | ||
3839 | esp->cur_msgout[0] = message_out; | ||
3840 | esp_cmd(esp, ESP_CMD_SATN); | ||
3841 | esp_advance_phase(SCptr, in_the_dark); | ||
3842 | esp->msgin_len = 0; | ||
3843 | } | ||
3844 | esp->sreg = sbus_readb(esp->eregs + ESP_STATUS); | ||
3845 | esp->sreg &= ~(ESP_STAT_INTR); | ||
3846 | if ((esp->sreg & (ESP_STAT_PMSG|ESP_STAT_PCD)) == (ESP_STAT_PMSG|ESP_STAT_PCD)) | ||
3847 | esp_cmd(esp, ESP_CMD_MOK); | ||
3848 | if ((SCptr->SCp.sent_command == in_msgindone) && | ||
3849 | (SCptr->SCp.phase == in_freeing)) | ||
3850 | return esp_do_freebus(esp); | ||
3851 | return do_intr_end; | ||
3852 | } | ||
3853 | |||
3854 | static int esp_do_cmdbegin(struct esp *esp) | ||
3855 | { | ||
3856 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
3857 | |||
3858 | esp_advance_phase(SCptr, in_cmdend); | ||
3859 | if (esp->erev == fashme) { | ||
3860 | u32 tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
3861 | int i; | ||
3862 | |||
3863 | for (i = 0; i < esp->esp_scmdleft; i++) | ||
3864 | esp->esp_command[i] = *esp->esp_scmdp++; | ||
3865 | esp->esp_scmdleft = 0; | ||
3866 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
3867 | esp_setcount(esp->eregs, i, 1); | ||
3868 | esp_cmd(esp, (ESP_CMD_DMA | ESP_CMD_TI)); | ||
3869 | tmp |= (DMA_SCSI_DISAB | DMA_ENABLE); | ||
3870 | tmp &= ~(DMA_ST_WRITE); | ||
3871 | sbus_writel(i, esp->dregs + DMA_COUNT); | ||
3872 | sbus_writel(esp->esp_command_dvma, esp->dregs + DMA_ADDR); | ||
3873 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
3874 | } else { | ||
3875 | u8 tmp; | ||
3876 | |||
3877 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
3878 | tmp = *esp->esp_scmdp++; | ||
3879 | esp->esp_scmdleft--; | ||
3880 | sbus_writeb(tmp, esp->eregs + ESP_FDATA); | ||
3881 | esp_cmd(esp, ESP_CMD_TI); | ||
3882 | } | ||
3883 | return do_intr_end; | ||
3884 | } | ||
3885 | |||
3886 | static int esp_do_cmddone(struct esp *esp) | ||
3887 | { | ||
3888 | if (esp->erev == fashme) | ||
3889 | dma_invalidate(esp); | ||
3890 | else | ||
3891 | esp_cmd(esp, ESP_CMD_NULL); | ||
3892 | |||
3893 | if (esp->ireg & ESP_INTR_BSERV) { | ||
3894 | esp_advance_phase(esp->current_SC, in_the_dark); | ||
3895 | return esp_do_phase_determine(esp); | ||
3896 | } | ||
3897 | |||
3898 | ESPLOG(("esp%d: in do_cmddone() but didn't get BSERV interrupt.\n", | ||
3899 | esp->esp_id)); | ||
3900 | return do_reset_bus; | ||
3901 | } | ||
3902 | |||
3903 | static int esp_do_msgout(struct esp *esp) | ||
3904 | { | ||
3905 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
3906 | switch (esp->msgout_len) { | ||
3907 | case 1: | ||
3908 | if (esp->erev == fashme) | ||
3909 | hme_fifo_push(esp, &esp->cur_msgout[0], 1); | ||
3910 | else | ||
3911 | sbus_writeb(esp->cur_msgout[0], esp->eregs + ESP_FDATA); | ||
3912 | |||
3913 | esp_cmd(esp, ESP_CMD_TI); | ||
3914 | break; | ||
3915 | |||
3916 | case 2: | ||
3917 | esp->esp_command[0] = esp->cur_msgout[0]; | ||
3918 | esp->esp_command[1] = esp->cur_msgout[1]; | ||
3919 | |||
3920 | if (esp->erev == fashme) { | ||
3921 | hme_fifo_push(esp, &esp->cur_msgout[0], 2); | ||
3922 | esp_cmd(esp, ESP_CMD_TI); | ||
3923 | } else { | ||
3924 | dma_setup(esp, esp->esp_command_dvma, 2, 0); | ||
3925 | esp_setcount(esp->eregs, 2, 0); | ||
3926 | esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI); | ||
3927 | } | ||
3928 | break; | ||
3929 | |||
3930 | case 4: | ||
3931 | esp->esp_command[0] = esp->cur_msgout[0]; | ||
3932 | esp->esp_command[1] = esp->cur_msgout[1]; | ||
3933 | esp->esp_command[2] = esp->cur_msgout[2]; | ||
3934 | esp->esp_command[3] = esp->cur_msgout[3]; | ||
3935 | esp->snip = 1; | ||
3936 | |||
3937 | if (esp->erev == fashme) { | ||
3938 | hme_fifo_push(esp, &esp->cur_msgout[0], 4); | ||
3939 | esp_cmd(esp, ESP_CMD_TI); | ||
3940 | } else { | ||
3941 | dma_setup(esp, esp->esp_command_dvma, 4, 0); | ||
3942 | esp_setcount(esp->eregs, 4, 0); | ||
3943 | esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI); | ||
3944 | } | ||
3945 | break; | ||
3946 | |||
3947 | case 5: | ||
3948 | esp->esp_command[0] = esp->cur_msgout[0]; | ||
3949 | esp->esp_command[1] = esp->cur_msgout[1]; | ||
3950 | esp->esp_command[2] = esp->cur_msgout[2]; | ||
3951 | esp->esp_command[3] = esp->cur_msgout[3]; | ||
3952 | esp->esp_command[4] = esp->cur_msgout[4]; | ||
3953 | esp->snip = 1; | ||
3954 | |||
3955 | if (esp->erev == fashme) { | ||
3956 | hme_fifo_push(esp, &esp->cur_msgout[0], 5); | ||
3957 | esp_cmd(esp, ESP_CMD_TI); | ||
3958 | } else { | ||
3959 | dma_setup(esp, esp->esp_command_dvma, 5, 0); | ||
3960 | esp_setcount(esp->eregs, 5, 0); | ||
3961 | esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI); | ||
3962 | } | ||
3963 | break; | ||
3964 | |||
3965 | default: | ||
3966 | /* whoops */ | ||
3967 | ESPMISC(("bogus msgout sending NOP\n")); | ||
3968 | esp->cur_msgout[0] = NOP; | ||
3969 | |||
3970 | if (esp->erev == fashme) { | ||
3971 | hme_fifo_push(esp, &esp->cur_msgout[0], 1); | ||
3972 | } else { | ||
3973 | sbus_writeb(esp->cur_msgout[0], esp->eregs + ESP_FDATA); | ||
3974 | } | ||
3975 | |||
3976 | esp->msgout_len = 1; | ||
3977 | esp_cmd(esp, ESP_CMD_TI); | ||
3978 | break; | ||
3979 | }; | ||
3980 | |||
3981 | esp_advance_phase(esp->current_SC, in_msgoutdone); | ||
3982 | return do_intr_end; | ||
3983 | } | ||
3984 | |||
3985 | static int esp_do_msgoutdone(struct esp *esp) | ||
3986 | { | ||
3987 | if (esp->msgout_len > 1) { | ||
3988 | /* XXX HME/FAS ATN deassert workaround required, | ||
3989 | * XXX no DMA flushing, only possible ESP_CMD_FLUSH | ||
3990 | * XXX to kill the fifo. | ||
3991 | */ | ||
3992 | if (esp->erev != fashme) { | ||
3993 | u32 tmp; | ||
3994 | |||
3995 | while ((tmp = sbus_readl(esp->dregs + DMA_CSR)) & DMA_PEND_READ) | ||
3996 | udelay(1); | ||
3997 | tmp &= ~DMA_ENABLE; | ||
3998 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
3999 | dma_invalidate(esp); | ||
4000 | } else { | ||
4001 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
4002 | } | ||
4003 | } | ||
4004 | if (!(esp->ireg & ESP_INTR_DC)) { | ||
4005 | if (esp->erev != fashme) | ||
4006 | esp_cmd(esp, ESP_CMD_NULL); | ||
4007 | switch (esp->sreg & ESP_STAT_PMASK) { | ||
4008 | case ESP_MOP: | ||
4009 | /* whoops, parity error */ | ||
4010 | ESPLOG(("esp%d: still in msgout, parity error assumed\n", | ||
4011 | esp->esp_id)); | ||
4012 | if (esp->msgout_len > 1) | ||
4013 | esp_cmd(esp, ESP_CMD_SATN); | ||
4014 | esp_advance_phase(esp->current_SC, in_msgout); | ||
4015 | return do_work_bus; | ||
4016 | |||
4017 | case ESP_DIP: | ||
4018 | break; | ||
4019 | |||
4020 | default: | ||
4021 | /* Happy Meal fifo is touchy... */ | ||
4022 | if ((esp->erev != fashme) && | ||
4023 | !fcount(esp) && | ||
4024 | !(((struct esp_device *)esp->current_SC->device->hostdata)->sync_max_offset)) | ||
4025 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
4026 | break; | ||
4027 | |||
4028 | }; | ||
4029 | } else { | ||
4030 | ESPLOG(("esp%d: disconnect, resetting bus\n", esp->esp_id)); | ||
4031 | return do_reset_bus; | ||
4032 | } | ||
4033 | |||
4034 | /* If we sent out a synchronous negotiation message, update | ||
4035 | * our state. | ||
4036 | */ | ||
4037 | if (esp->cur_msgout[2] == EXTENDED_MESSAGE && | ||
4038 | esp->cur_msgout[4] == EXTENDED_SDTR) { | ||
4039 | esp->snip = 1; /* anal retentiveness... */ | ||
4040 | } | ||
4041 | |||
4042 | esp->prevmsgout = esp->cur_msgout[0]; | ||
4043 | esp->msgout_len = 0; | ||
4044 | esp_advance_phase(esp->current_SC, in_the_dark); | ||
4045 | return esp_do_phase_determine(esp); | ||
4046 | } | ||
4047 | |||
4048 | static int esp_bus_unexpected(struct esp *esp) | ||
4049 | { | ||
4050 | ESPLOG(("esp%d: command in weird state %2x\n", | ||
4051 | esp->esp_id, esp->current_SC->SCp.phase)); | ||
4052 | return do_reset_bus; | ||
4053 | } | ||
4054 | |||
4055 | static espfunc_t bus_vector[] = { | ||
4056 | esp_do_data_finale, | ||
4057 | esp_do_data_finale, | ||
4058 | esp_bus_unexpected, | ||
4059 | esp_do_msgin, | ||
4060 | esp_do_msgincont, | ||
4061 | esp_do_msgindone, | ||
4062 | esp_do_msgout, | ||
4063 | esp_do_msgoutdone, | ||
4064 | esp_do_cmdbegin, | ||
4065 | esp_do_cmddone, | ||
4066 | esp_do_status, | ||
4067 | esp_do_freebus, | ||
4068 | esp_do_phase_determine, | ||
4069 | esp_bus_unexpected, | ||
4070 | esp_bus_unexpected, | ||
4071 | esp_bus_unexpected, | ||
4072 | }; | ||
4073 | |||
4074 | /* This is the second tier in our dual-level SCSI state machine. */ | ||
4075 | static int esp_work_bus(struct esp *esp) | ||
4076 | { | ||
4077 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
4078 | unsigned int phase; | ||
4079 | |||
4080 | ESPBUS(("esp_work_bus: ")); | ||
4081 | if (!SCptr) { | ||
4082 | ESPBUS(("reconnect\n")); | ||
4083 | return esp_do_reconnect(esp); | ||
4084 | } | ||
4085 | phase = SCptr->SCp.phase; | ||
4086 | if ((phase & 0xf0) == in_phases_mask) | ||
4087 | return bus_vector[(phase & 0x0f)](esp); | ||
4088 | else if ((phase & 0xf0) == in_slct_mask) | ||
4089 | return esp_select_complete(esp); | ||
4090 | else | ||
4091 | return esp_bus_unexpected(esp); | ||
4092 | } | ||
4093 | |||
4094 | static espfunc_t isvc_vector[] = { | ||
4095 | NULL, | ||
4096 | esp_do_phase_determine, | ||
4097 | esp_do_resetbus, | ||
4098 | esp_finish_reset, | ||
4099 | esp_work_bus | ||
4100 | }; | ||
4101 | |||
4102 | /* Main interrupt handler for an esp adapter. */ | ||
4103 | static void esp_handle(struct esp *esp) | ||
4104 | { | ||
4105 | struct scsi_cmnd *SCptr; | ||
4106 | int what_next = do_intr_end; | ||
4107 | |||
4108 | SCptr = esp->current_SC; | ||
4109 | |||
4110 | /* Check for errors. */ | ||
4111 | esp->sreg = sbus_readb(esp->eregs + ESP_STATUS); | ||
4112 | esp->sreg &= (~ESP_STAT_INTR); | ||
4113 | if (esp->erev == fashme) { | ||
4114 | esp->sreg2 = sbus_readb(esp->eregs + ESP_STATUS2); | ||
4115 | esp->seqreg = (sbus_readb(esp->eregs + ESP_SSTEP) & ESP_STEP_VBITS); | ||
4116 | } | ||
4117 | |||
4118 | if (esp->sreg & (ESP_STAT_SPAM)) { | ||
4119 | /* Gross error, could be due to one of: | ||
4120 | * | ||
4121 | * - top of fifo overwritten, could be because | ||
4122 | * we tried to do a synchronous transfer with | ||
4123 | * an offset greater than ESP fifo size | ||
4124 | * | ||
4125 | * - top of command register overwritten | ||
4126 | * | ||
4127 | * - DMA setup to go in one direction, SCSI | ||
4128 | * bus points in the other, whoops | ||
4129 | * | ||
4130 | * - weird phase change during asynchronous | ||
4131 | * data phase while we are initiator | ||
4132 | */ | ||
4133 | ESPLOG(("esp%d: Gross error sreg=%2x\n", esp->esp_id, esp->sreg)); | ||
4134 | |||
4135 | /* If a command is live on the bus we cannot safely | ||
4136 | * reset the bus, so we'll just let the pieces fall | ||
4137 | * where they may. Here we are hoping that the | ||
4138 | * target will be able to cleanly go away soon | ||
4139 | * so we can safely reset things. | ||
4140 | */ | ||
4141 | if (!SCptr) { | ||
4142 | ESPLOG(("esp%d: No current cmd during gross error, " | ||
4143 | "resetting bus\n", esp->esp_id)); | ||
4144 | what_next = do_reset_bus; | ||
4145 | goto state_machine; | ||
4146 | } | ||
4147 | } | ||
4148 | |||
4149 | if (sbus_readl(esp->dregs + DMA_CSR) & DMA_HNDL_ERROR) { | ||
4150 | /* A DMA gate array error. Here we must | ||
4151 | * be seeing one of two things. Either the | ||
4152 | * virtual to physical address translation | ||
4153 | * on the SBUS could not occur, else the | ||
4154 | * translation it did get pointed to a bogus | ||
4155 | * page. Ho hum... | ||
4156 | */ | ||
4157 | ESPLOG(("esp%d: DMA error %08x\n", esp->esp_id, | ||
4158 | sbus_readl(esp->dregs + DMA_CSR))); | ||
4159 | |||
4160 | /* DMA gate array itself must be reset to clear the | ||
4161 | * error condition. | ||
4162 | */ | ||
4163 | esp_reset_dma(esp); | ||
4164 | |||
4165 | what_next = do_reset_bus; | ||
4166 | goto state_machine; | ||
4167 | } | ||
4168 | |||
4169 | esp->ireg = sbus_readb(esp->eregs + ESP_INTRPT); /* Unlatch intr reg */ | ||
4170 | |||
4171 | if (esp->erev == fashme) { | ||
4172 | /* This chip is really losing. */ | ||
4173 | ESPHME(("HME[")); | ||
4174 | |||
4175 | ESPHME(("sreg2=%02x,", esp->sreg2)); | ||
4176 | /* Must latch fifo before reading the interrupt | ||
4177 | * register else garbage ends up in the FIFO | ||
4178 | * which confuses the driver utterly. | ||
4179 | */ | ||
4180 | if (!(esp->sreg2 & ESP_STAT2_FEMPTY) || | ||
4181 | (esp->sreg2 & ESP_STAT2_F1BYTE)) { | ||
4182 | ESPHME(("fifo_workaround]")); | ||
4183 | hme_fifo_read(esp); | ||
4184 | } else { | ||
4185 | ESPHME(("no_fifo_workaround]")); | ||
4186 | } | ||
4187 | } | ||
4188 | |||
4189 | /* No current cmd is only valid at this point when there are | ||
4190 | * commands off the bus or we are trying a reset. | ||
4191 | */ | ||
4192 | if (!SCptr && !esp->disconnected_SC && !(esp->ireg & ESP_INTR_SR)) { | ||
4193 | /* Panic is safe, since current_SC is null. */ | ||
4194 | ESPLOG(("esp%d: no command in esp_handle()\n", esp->esp_id)); | ||
4195 | panic("esp_handle: current_SC == penguin within interrupt!"); | ||
4196 | } | ||
4197 | |||
4198 | if (esp->ireg & (ESP_INTR_IC)) { | ||
4199 | /* Illegal command fed to ESP. Outside of obvious | ||
4200 | * software bugs that could cause this, there is | ||
4201 | * a condition with esp100 where we can confuse the | ||
4202 | * ESP into an erroneous illegal command interrupt | ||
4203 | * because it does not scrape the FIFO properly | ||
4204 | * for reselection. See esp100_reconnect_hwbug() | ||
4205 | * to see how we try very hard to avoid this. | ||
4206 | */ | ||
4207 | ESPLOG(("esp%d: invalid command\n", esp->esp_id)); | ||
4208 | |||
4209 | esp_dump_state(esp); | ||
4210 | |||
4211 | if (SCptr != NULL) { | ||
4212 | /* Devices with very buggy firmware can drop BSY | ||
4213 | * during a scatter list interrupt when using sync | ||
4214 | * mode transfers. We continue the transfer as | ||
4215 | * expected, the target drops the bus, the ESP | ||
4216 | * gets confused, and we get a illegal command | ||
4217 | * interrupt because the bus is in the disconnected | ||
4218 | * state now and ESP_CMD_TI is only allowed when | ||
4219 | * a nexus is alive on the bus. | ||
4220 | */ | ||
4221 | ESPLOG(("esp%d: Forcing async and disabling disconnect for " | ||
4222 | "target %d\n", esp->esp_id, SCptr->device->id)); | ||
4223 | SCptr->device->borken = 1; /* foo on you */ | ||
4224 | } | ||
4225 | |||
4226 | what_next = do_reset_bus; | ||
4227 | } else if (!(esp->ireg & ~(ESP_INTR_FDONE | ESP_INTR_BSERV | ESP_INTR_DC))) { | ||
4228 | if (SCptr) { | ||
4229 | unsigned int phase = SCptr->SCp.phase; | ||
4230 | |||
4231 | if (phase & in_phases_mask) { | ||
4232 | what_next = esp_work_bus(esp); | ||
4233 | } else if (phase & in_slct_mask) { | ||
4234 | what_next = esp_select_complete(esp); | ||
4235 | } else { | ||
4236 | ESPLOG(("esp%d: interrupt for no good reason...\n", | ||
4237 | esp->esp_id)); | ||
4238 | what_next = do_intr_end; | ||
4239 | } | ||
4240 | } else { | ||
4241 | ESPLOG(("esp%d: BSERV or FDONE or DC while SCptr==NULL\n", | ||
4242 | esp->esp_id)); | ||
4243 | what_next = do_reset_bus; | ||
4244 | } | ||
4245 | } else if (esp->ireg & ESP_INTR_SR) { | ||
4246 | ESPLOG(("esp%d: SCSI bus reset interrupt\n", esp->esp_id)); | ||
4247 | what_next = do_reset_complete; | ||
4248 | } else if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN)) { | ||
4249 | ESPLOG(("esp%d: AIEEE we have been selected by another initiator!\n", | ||
4250 | esp->esp_id)); | ||
4251 | what_next = do_reset_bus; | ||
4252 | } else if (esp->ireg & ESP_INTR_RSEL) { | ||
4253 | if (SCptr == NULL) { | ||
4254 | /* This is ok. */ | ||
4255 | what_next = esp_do_reconnect(esp); | ||
4256 | } else if (SCptr->SCp.phase & in_slct_mask) { | ||
4257 | /* Only selection code knows how to clean | ||
4258 | * up properly. | ||
4259 | */ | ||
4260 | ESPDISC(("Reselected during selection attempt\n")); | ||
4261 | what_next = esp_select_complete(esp); | ||
4262 | } else { | ||
4263 | ESPLOG(("esp%d: Reselected while bus is busy\n", | ||
4264 | esp->esp_id)); | ||
4265 | what_next = do_reset_bus; | ||
4266 | } | ||
4267 | } | ||
4268 | |||
4269 | /* This is tier-one in our dual level SCSI state machine. */ | ||
4270 | state_machine: | ||
4271 | while (what_next != do_intr_end) { | ||
4272 | if (what_next >= do_phase_determine && | ||
4273 | what_next < do_intr_end) { | ||
4274 | what_next = isvc_vector[what_next](esp); | ||
4275 | } else { | ||
4276 | /* state is completely lost ;-( */ | ||
4277 | ESPLOG(("esp%d: interrupt engine loses state, resetting bus\n", | ||
4278 | esp->esp_id)); | ||
4279 | what_next = do_reset_bus; | ||
4280 | } | ||
4281 | } | ||
4282 | } | ||
4283 | |||
4284 | /* Service only the ESP described by dev_id. */ | ||
4285 | static irqreturn_t esp_intr(int irq, void *dev_id) | ||
4286 | { | ||
4287 | struct esp *esp = dev_id; | ||
4288 | unsigned long flags; | ||
4289 | |||
4290 | spin_lock_irqsave(esp->ehost->host_lock, flags); | ||
4291 | if (ESP_IRQ_P(esp->dregs)) { | ||
4292 | ESP_INTSOFF(esp->dregs); | ||
4293 | |||
4294 | ESPIRQ(("I[%d:%d](", smp_processor_id(), esp->esp_id)); | ||
4295 | esp_handle(esp); | ||
4296 | ESPIRQ((")")); | ||
4297 | |||
4298 | ESP_INTSON(esp->dregs); | ||
4299 | } | ||
4300 | spin_unlock_irqrestore(esp->ehost->host_lock, flags); | ||
4301 | |||
4302 | return IRQ_HANDLED; | ||
4303 | } | ||
4304 | |||
4305 | static int esp_slave_alloc(struct scsi_device *SDptr) | ||
4306 | { | ||
4307 | struct esp_device *esp_dev = | ||
4308 | kmalloc(sizeof(struct esp_device), GFP_ATOMIC); | ||
4309 | |||
4310 | if (!esp_dev) | ||
4311 | return -ENOMEM; | ||
4312 | memset(esp_dev, 0, sizeof(struct esp_device)); | ||
4313 | SDptr->hostdata = esp_dev; | ||
4314 | return 0; | ||
4315 | } | ||
4316 | |||
4317 | static void esp_slave_destroy(struct scsi_device *SDptr) | ||
4318 | { | ||
4319 | struct esp *esp = (struct esp *) SDptr->host->hostdata; | ||
4320 | |||
4321 | esp->targets_present &= ~(1 << SDptr->id); | ||
4322 | kfree(SDptr->hostdata); | ||
4323 | SDptr->hostdata = NULL; | ||
4324 | } | ||
4325 | |||
4326 | static struct scsi_host_template esp_template = { | ||
4327 | .module = THIS_MODULE, | ||
4328 | .name = "esp", | ||
4329 | .info = esp_info, | ||
4330 | .slave_alloc = esp_slave_alloc, | ||
4331 | .slave_destroy = esp_slave_destroy, | ||
4332 | .queuecommand = esp_queue, | ||
4333 | .eh_abort_handler = esp_abort, | ||
4334 | .eh_bus_reset_handler = esp_reset, | ||
4335 | .can_queue = 7, | ||
4336 | .this_id = 7, | ||
4337 | .sg_tablesize = SG_ALL, | ||
4338 | .cmd_per_lun = 1, | ||
4339 | .use_clustering = ENABLE_CLUSTERING, | ||
4340 | .proc_name = "esp", | ||
4341 | .proc_info = esp_proc_info, | ||
4342 | }; | ||
4343 | |||
4344 | #ifndef CONFIG_SUN4 | ||
4345 | static struct of_device_id esp_match[] = { | ||
4346 | { | ||
4347 | .name = "SUNW,esp", | ||
4348 | .data = &esp_template, | ||
4349 | }, | ||
4350 | { | ||
4351 | .name = "SUNW,fas", | ||
4352 | .data = &esp_template, | ||
4353 | }, | ||
4354 | { | ||
4355 | .name = "esp", | ||
4356 | .data = &esp_template, | ||
4357 | }, | ||
4358 | {}, | ||
4359 | }; | ||
4360 | MODULE_DEVICE_TABLE(of, esp_match); | ||
4361 | |||
4362 | static struct of_platform_driver esp_sbus_driver = { | ||
4363 | .name = "esp", | ||
4364 | .match_table = esp_match, | ||
4365 | .probe = esp_sbus_probe, | ||
4366 | .remove = __devexit_p(esp_sbus_remove), | ||
4367 | }; | ||
4368 | #endif | ||
4369 | |||
4370 | static int __init esp_init(void) | ||
4371 | { | ||
4372 | #ifdef CONFIG_SUN4 | ||
4373 | return esp_sun4_probe(&esp_template); | ||
4374 | #else | ||
4375 | return of_register_driver(&esp_sbus_driver, &sbus_bus_type); | ||
4376 | #endif | ||
4377 | } | ||
4378 | |||
4379 | static void __exit esp_exit(void) | ||
4380 | { | ||
4381 | #ifdef CONFIG_SUN4 | ||
4382 | esp_sun4_remove(); | ||
4383 | #else | ||
4384 | of_unregister_driver(&esp_sbus_driver); | ||
4385 | #endif | ||
4386 | } | ||
4387 | |||
4388 | MODULE_DESCRIPTION("ESP Sun SCSI driver"); | ||
4389 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | ||
4390 | MODULE_LICENSE("GPL"); | ||
4391 | MODULE_VERSION(DRV_VERSION); | ||
4392 | |||
4393 | module_init(esp_init); | ||
4394 | module_exit(esp_exit); | ||
diff --git a/drivers/scsi/esp.h b/drivers/scsi/esp.h deleted file mode 100644 index a98cda9121fc..000000000000 --- a/drivers/scsi/esp.h +++ /dev/null | |||
@@ -1,406 +0,0 @@ | |||
1 | /* $Id: esp.h,v 1.29 2001/12/11 04:55:47 davem Exp $ | ||
2 | * esp.h: Defines and structures for the Sparc ESP (Enhanced SCSI | ||
3 | * Processor) driver under Linux. | ||
4 | * | ||
5 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | ||
6 | */ | ||
7 | |||
8 | #ifndef _SPARC_ESP_H | ||
9 | #define _SPARC_ESP_H | ||
10 | |||
11 | /* For dvma controller register definitions. */ | ||
12 | #include <asm/dma.h> | ||
13 | |||
14 | /* The ESP SCSI controllers have their register sets in three | ||
15 | * "classes": | ||
16 | * | ||
17 | * 1) Registers which are both read and write. | ||
18 | * 2) Registers which are read only. | ||
19 | * 3) Registers which are write only. | ||
20 | * | ||
21 | * Yet, they all live within the same IO space. | ||
22 | */ | ||
23 | |||
24 | /* All the ESP registers are one byte each and are accessed longwords | ||
25 | * apart with a big-endian ordering to the bytes. | ||
26 | */ | ||
27 | /* Access Description Offset */ | ||
28 | #define ESP_TCLOW 0x00UL /* rw Low bits of the transfer count 0x00 */ | ||
29 | #define ESP_TCMED 0x04UL /* rw Mid bits of the transfer count 0x04 */ | ||
30 | #define ESP_FDATA 0x08UL /* rw FIFO data bits 0x08 */ | ||
31 | #define ESP_CMD 0x0cUL /* rw SCSI command bits 0x0c */ | ||
32 | #define ESP_STATUS 0x10UL /* ro ESP status register 0x10 */ | ||
33 | #define ESP_BUSID ESP_STATUS /* wo Bus ID for select/reselect 0x10 */ | ||
34 | #define ESP_INTRPT 0x14UL /* ro Kind of interrupt 0x14 */ | ||
35 | #define ESP_TIMEO ESP_INTRPT /* wo Timeout value for select/resel 0x14 */ | ||
36 | #define ESP_SSTEP 0x18UL /* ro Sequence step register 0x18 */ | ||
37 | #define ESP_STP ESP_SSTEP /* wo Transfer period per sync 0x18 */ | ||
38 | #define ESP_FFLAGS 0x1cUL /* ro Bits of current FIFO info 0x1c */ | ||
39 | #define ESP_SOFF ESP_FFLAGS /* wo Sync offset 0x1c */ | ||
40 | #define ESP_CFG1 0x20UL /* rw First configuration register 0x20 */ | ||
41 | #define ESP_CFACT 0x24UL /* wo Clock conversion factor 0x24 */ | ||
42 | #define ESP_STATUS2 ESP_CFACT /* ro HME status2 register 0x24 */ | ||
43 | #define ESP_CTEST 0x28UL /* wo Chip test register 0x28 */ | ||
44 | #define ESP_CFG2 0x2cUL /* rw Second configuration register 0x2c */ | ||
45 | #define ESP_CFG3 0x30UL /* rw Third configuration register 0x30 */ | ||
46 | #define ESP_TCHI 0x38UL /* rw High bits of transfer count 0x38 */ | ||
47 | #define ESP_UID ESP_TCHI /* ro Unique ID code 0x38 */ | ||
48 | #define FAS_RLO ESP_TCHI /* rw HME extended counter 0x38 */ | ||
49 | #define ESP_FGRND 0x3cUL /* rw Data base for fifo 0x3c */ | ||
50 | #define FAS_RHI ESP_FGRND /* rw HME extended counter 0x3c */ | ||
51 | #define ESP_REG_SIZE 0x40UL | ||
52 | |||
53 | /* Various revisions of the ESP board. */ | ||
54 | enum esp_rev { | ||
55 | esp100 = 0x00, /* NCR53C90 - very broken */ | ||
56 | esp100a = 0x01, /* NCR53C90A */ | ||
57 | esp236 = 0x02, | ||
58 | fas236 = 0x03, | ||
59 | fas100a = 0x04, | ||
60 | fast = 0x05, | ||
61 | fashme = 0x06, | ||
62 | espunknown = 0x07 | ||
63 | }; | ||
64 | |||
65 | /* We allocate one of these for each scsi device and attach it to | ||
66 | * SDptr->hostdata for use in the driver | ||
67 | */ | ||
68 | struct esp_device { | ||
69 | unsigned char sync_min_period; | ||
70 | unsigned char sync_max_offset; | ||
71 | unsigned sync:1; | ||
72 | unsigned wide:1; | ||
73 | unsigned disconnect:1; | ||
74 | }; | ||
75 | |||
76 | struct scsi_cmnd; | ||
77 | |||
78 | /* We get one of these for each ESP probed. */ | ||
79 | struct esp { | ||
80 | void __iomem *eregs; /* ESP controller registers */ | ||
81 | void __iomem *dregs; /* DMA controller registers */ | ||
82 | struct sbus_dma *dma; /* DMA controller sw state */ | ||
83 | struct Scsi_Host *ehost; /* Backpointer to SCSI Host */ | ||
84 | struct sbus_dev *sdev; /* Pointer to SBus entry */ | ||
85 | |||
86 | /* ESP Configuration Registers */ | ||
87 | u8 config1; /* Copy of the 1st config register */ | ||
88 | u8 config2; /* Copy of the 2nd config register */ | ||
89 | u8 config3[16]; /* Copy of the 3rd config register */ | ||
90 | |||
91 | /* The current command we are sending to the ESP chip. This esp_command | ||
92 | * ptr needs to be mapped in DVMA area so we can send commands and read | ||
93 | * from the ESP fifo without burning precious CPU cycles. Programmed I/O | ||
94 | * sucks when we have the DVMA to do it for us. The ESP is stupid and will | ||
95 | * only send out 6, 10, and 12 byte SCSI commands, others we need to send | ||
96 | * one byte at a time. esp_slowcmd being set says that we are doing one | ||
97 | * of the command types ESP doesn't understand, esp_scmdp keeps track of | ||
98 | * which byte we are sending, esp_scmdleft says how many bytes to go. | ||
99 | */ | ||
100 | volatile u8 *esp_command; /* Location of command (CPU view) */ | ||
101 | __u32 esp_command_dvma;/* Location of command (DVMA view) */ | ||
102 | unsigned char esp_clen; /* Length of this command */ | ||
103 | unsigned char esp_slowcmd; | ||
104 | unsigned char *esp_scmdp; | ||
105 | unsigned char esp_scmdleft; | ||
106 | |||
107 | /* The following are used to determine the cause of an IRQ. Upon every | ||
108 | * IRQ entry we synchronize these with the hardware registers. | ||
109 | */ | ||
110 | u8 ireg; /* Copy of ESP interrupt register */ | ||
111 | u8 sreg; /* Copy of ESP status register */ | ||
112 | u8 seqreg; /* Copy of ESP sequence step register */ | ||
113 | u8 sreg2; /* Copy of HME status2 register */ | ||
114 | |||
115 | /* To save register writes to the ESP, which can be expensive, we | ||
116 | * keep track of the previous value that various registers had for | ||
117 | * the last target we connected to. If they are the same for the | ||
118 | * current target, we skip the register writes as they are not needed. | ||
119 | */ | ||
120 | u8 prev_soff, prev_stp; | ||
121 | u8 prev_cfg3, __cache_pad; | ||
122 | |||
123 | /* We also keep a cache of the previous FAS/HME DMA CSR register value. */ | ||
124 | u32 prev_hme_dmacsr; | ||
125 | |||
126 | /* The HME is the biggest piece of shit I have ever seen. */ | ||
127 | u8 hme_fifo_workaround_buffer[16 * 2]; | ||
128 | u8 hme_fifo_workaround_count; | ||
129 | |||
130 | /* For each target we keep track of save/restore data | ||
131 | * pointer information. This needs to be updated majorly | ||
132 | * when we add support for tagged queueing. -DaveM | ||
133 | */ | ||
134 | struct esp_pointers { | ||
135 | char *saved_ptr; | ||
136 | struct scatterlist *saved_buffer; | ||
137 | int saved_this_residual; | ||
138 | int saved_buffers_residual; | ||
139 | } data_pointers[16] /*XXX [MAX_TAGS_PER_TARGET]*/; | ||
140 | |||
141 | /* Clock periods, frequencies, synchronization, etc. */ | ||
142 | unsigned int cfreq; /* Clock frequency in HZ */ | ||
143 | unsigned int cfact; /* Clock conversion factor */ | ||
144 | unsigned int raw_cfact; /* Raw copy from probing */ | ||
145 | unsigned int ccycle; /* One ESP clock cycle */ | ||
146 | unsigned int ctick; /* One ESP clock time */ | ||
147 | unsigned int radelay; /* FAST chip req/ack delay */ | ||
148 | unsigned int neg_defp; /* Default negotiation period */ | ||
149 | unsigned int sync_defp; /* Default sync transfer period */ | ||
150 | unsigned int max_period; /* longest our period can be */ | ||
151 | unsigned int min_period; /* shortest period we can withstand */ | ||
152 | |||
153 | struct esp *next; /* Next ESP we probed or NULL */ | ||
154 | char prom_name[64]; /* Name of ESP device from prom */ | ||
155 | int prom_node; /* Prom node where ESP found */ | ||
156 | int esp_id; /* Unique per-ESP ID number */ | ||
157 | |||
158 | /* For slow to medium speed input clock rates we shoot for 5mb/s, | ||
159 | * but for high input clock rates we try to do 10mb/s although I | ||
160 | * don't think a transfer can even run that fast with an ESP even | ||
161 | * with DMA2 scatter gather pipelining. | ||
162 | */ | ||
163 | #define SYNC_DEFP_SLOW 0x32 /* 5mb/s */ | ||
164 | #define SYNC_DEFP_FAST 0x19 /* 10mb/s */ | ||
165 | |||
166 | unsigned int snip; /* Sync. negotiation in progress */ | ||
167 | unsigned int wnip; /* WIDE negotiation in progress */ | ||
168 | unsigned int targets_present;/* targets spoken to before */ | ||
169 | |||
170 | int current_transfer_size; /* Set at beginning of data dma */ | ||
171 | |||
172 | u8 espcmdlog[32]; /* Log of current esp cmds sent. */ | ||
173 | u8 espcmdent; /* Current entry in esp cmd log. */ | ||
174 | |||
175 | /* Misc. info about this ESP */ | ||
176 | enum esp_rev erev; /* ESP revision */ | ||
177 | int irq; /* SBus IRQ for this ESP */ | ||
178 | int scsi_id; /* Who am I as initiator? */ | ||
179 | int scsi_id_mask; /* Bitmask of 'me'. */ | ||
180 | int diff; /* Differential SCSI bus? */ | ||
181 | int bursts; /* Burst sizes our DVMA supports */ | ||
182 | |||
183 | /* Our command queues, only one cmd lives in the current_SC queue. */ | ||
184 | struct scsi_cmnd *issue_SC; /* Commands to be issued */ | ||
185 | struct scsi_cmnd *current_SC; /* Who is currently working the bus */ | ||
186 | struct scsi_cmnd *disconnected_SC;/* Commands disconnected from the bus */ | ||
187 | |||
188 | /* Message goo */ | ||
189 | u8 cur_msgout[16]; | ||
190 | u8 cur_msgin[16]; | ||
191 | u8 prevmsgout, prevmsgin; | ||
192 | u8 msgout_len, msgin_len; | ||
193 | u8 msgout_ctr, msgin_ctr; | ||
194 | |||
195 | /* States that we cannot keep in the per cmd structure because they | ||
196 | * cannot be assosciated with any specific command. | ||
197 | */ | ||
198 | u8 resetting_bus; | ||
199 | wait_queue_head_t reset_queue; | ||
200 | }; | ||
201 | |||
202 | /* Bitfield meanings for the above registers. */ | ||
203 | |||
204 | /* ESP config reg 1, read-write, found on all ESP chips */ | ||
205 | #define ESP_CONFIG1_ID 0x07 /* My BUS ID bits */ | ||
206 | #define ESP_CONFIG1_CHTEST 0x08 /* Enable ESP chip tests */ | ||
207 | #define ESP_CONFIG1_PENABLE 0x10 /* Enable parity checks */ | ||
208 | #define ESP_CONFIG1_PARTEST 0x20 /* Parity test mode enabled? */ | ||
209 | #define ESP_CONFIG1_SRRDISAB 0x40 /* Disable SCSI reset reports */ | ||
210 | #define ESP_CONFIG1_SLCABLE 0x80 /* Enable slow cable mode */ | ||
211 | |||
212 | /* ESP config reg 2, read-write, found only on esp100a+esp200+esp236 chips */ | ||
213 | #define ESP_CONFIG2_DMAPARITY 0x01 /* enable DMA Parity (200,236) */ | ||
214 | #define ESP_CONFIG2_REGPARITY 0x02 /* enable reg Parity (200,236) */ | ||
215 | #define ESP_CONFIG2_BADPARITY 0x04 /* Bad parity target abort */ | ||
216 | #define ESP_CONFIG2_SCSI2ENAB 0x08 /* Enable SCSI-2 features (tmode only) */ | ||
217 | #define ESP_CONFIG2_HI 0x10 /* High Impedance DREQ ??? */ | ||
218 | #define ESP_CONFIG2_HMEFENAB 0x10 /* HME features enable */ | ||
219 | #define ESP_CONFIG2_BCM 0x20 /* Enable byte-ctrl (236) */ | ||
220 | #define ESP_CONFIG2_DISPINT 0x20 /* Disable pause irq (hme) */ | ||
221 | #define ESP_CONFIG2_FENAB 0x40 /* Enable features (fas100,esp216) */ | ||
222 | #define ESP_CONFIG2_SPL 0x40 /* Enable status-phase latch (esp236) */ | ||
223 | #define ESP_CONFIG2_MKDONE 0x40 /* HME magic feature */ | ||
224 | #define ESP_CONFIG2_HME32 0x80 /* HME 32 extended */ | ||
225 | #define ESP_CONFIG2_MAGIC 0xe0 /* Invalid bits... */ | ||
226 | |||
227 | /* ESP config register 3 read-write, found only esp236+fas236+fas100a+hme chips */ | ||
228 | #define ESP_CONFIG3_FCLOCK 0x01 /* FAST SCSI clock rate (esp100a/hme) */ | ||
229 | #define ESP_CONFIG3_TEM 0x01 /* Enable thresh-8 mode (esp/fas236) */ | ||
230 | #define ESP_CONFIG3_FAST 0x02 /* Enable FAST SCSI (esp100a/hme) */ | ||
231 | #define ESP_CONFIG3_ADMA 0x02 /* Enable alternate-dma (esp/fas236) */ | ||
232 | #define ESP_CONFIG3_TENB 0x04 /* group2 SCSI2 support (esp100a/hme) */ | ||
233 | #define ESP_CONFIG3_SRB 0x04 /* Save residual byte (esp/fas236) */ | ||
234 | #define ESP_CONFIG3_TMS 0x08 /* Three-byte msg's ok (esp100a/hme) */ | ||
235 | #define ESP_CONFIG3_FCLK 0x08 /* Fast SCSI clock rate (esp/fas236) */ | ||
236 | #define ESP_CONFIG3_IDMSG 0x10 /* ID message checking (esp100a/hme) */ | ||
237 | #define ESP_CONFIG3_FSCSI 0x10 /* Enable FAST SCSI (esp/fas236) */ | ||
238 | #define ESP_CONFIG3_GTM 0x20 /* group2 SCSI2 support (esp/fas236) */ | ||
239 | #define ESP_CONFIG3_IDBIT3 0x20 /* Bit 3 of HME SCSI-ID (hme) */ | ||
240 | #define ESP_CONFIG3_TBMS 0x40 /* Three-byte msg's ok (esp/fas236) */ | ||
241 | #define ESP_CONFIG3_EWIDE 0x40 /* Enable Wide-SCSI (hme) */ | ||
242 | #define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236) */ | ||
243 | #define ESP_CONFIG3_OBPUSH 0x80 /* Push odd-byte to dma (hme) */ | ||
244 | |||
245 | /* ESP command register read-write */ | ||
246 | /* Group 1 commands: These may be sent at any point in time to the ESP | ||
247 | * chip. None of them can generate interrupts 'cept | ||
248 | * the "SCSI bus reset" command if you have not disabled | ||
249 | * SCSI reset interrupts in the config1 ESP register. | ||
250 | */ | ||
251 | #define ESP_CMD_NULL 0x00 /* Null command, ie. a nop */ | ||
252 | #define ESP_CMD_FLUSH 0x01 /* FIFO Flush */ | ||
253 | #define ESP_CMD_RC 0x02 /* Chip reset */ | ||
254 | #define ESP_CMD_RS 0x03 /* SCSI bus reset */ | ||
255 | |||
256 | /* Group 2 commands: ESP must be an initiator and connected to a target | ||
257 | * for these commands to work. | ||
258 | */ | ||
259 | #define ESP_CMD_TI 0x10 /* Transfer Information */ | ||
260 | #define ESP_CMD_ICCSEQ 0x11 /* Initiator cmd complete sequence */ | ||
261 | #define ESP_CMD_MOK 0x12 /* Message okie-dokie */ | ||
262 | #define ESP_CMD_TPAD 0x18 /* Transfer Pad */ | ||
263 | #define ESP_CMD_SATN 0x1a /* Set ATN */ | ||
264 | #define ESP_CMD_RATN 0x1b /* De-assert ATN */ | ||
265 | |||
266 | /* Group 3 commands: ESP must be in the MSGOUT or MSGIN state and be connected | ||
267 | * to a target as the initiator for these commands to work. | ||
268 | */ | ||
269 | #define ESP_CMD_SMSG 0x20 /* Send message */ | ||
270 | #define ESP_CMD_SSTAT 0x21 /* Send status */ | ||
271 | #define ESP_CMD_SDATA 0x22 /* Send data */ | ||
272 | #define ESP_CMD_DSEQ 0x23 /* Discontinue Sequence */ | ||
273 | #define ESP_CMD_TSEQ 0x24 /* Terminate Sequence */ | ||
274 | #define ESP_CMD_TCCSEQ 0x25 /* Target cmd cmplt sequence */ | ||
275 | #define ESP_CMD_DCNCT 0x27 /* Disconnect */ | ||
276 | #define ESP_CMD_RMSG 0x28 /* Receive Message */ | ||
277 | #define ESP_CMD_RCMD 0x29 /* Receive Command */ | ||
278 | #define ESP_CMD_RDATA 0x2a /* Receive Data */ | ||
279 | #define ESP_CMD_RCSEQ 0x2b /* Receive cmd sequence */ | ||
280 | |||
281 | /* Group 4 commands: The ESP must be in the disconnected state and must | ||
282 | * not be connected to any targets as initiator for | ||
283 | * these commands to work. | ||
284 | */ | ||
285 | #define ESP_CMD_RSEL 0x40 /* Reselect */ | ||
286 | #define ESP_CMD_SEL 0x41 /* Select w/o ATN */ | ||
287 | #define ESP_CMD_SELA 0x42 /* Select w/ATN */ | ||
288 | #define ESP_CMD_SELAS 0x43 /* Select w/ATN & STOP */ | ||
289 | #define ESP_CMD_ESEL 0x44 /* Enable selection */ | ||
290 | #define ESP_CMD_DSEL 0x45 /* Disable selections */ | ||
291 | #define ESP_CMD_SA3 0x46 /* Select w/ATN3 */ | ||
292 | #define ESP_CMD_RSEL3 0x47 /* Reselect3 */ | ||
293 | |||
294 | /* This bit enables the ESP's DMA on the SBus */ | ||
295 | #define ESP_CMD_DMA 0x80 /* Do DMA? */ | ||
296 | |||
297 | |||
298 | /* ESP status register read-only */ | ||
299 | #define ESP_STAT_PIO 0x01 /* IO phase bit */ | ||
300 | #define ESP_STAT_PCD 0x02 /* CD phase bit */ | ||
301 | #define ESP_STAT_PMSG 0x04 /* MSG phase bit */ | ||
302 | #define ESP_STAT_PMASK 0x07 /* Mask of phase bits */ | ||
303 | #define ESP_STAT_TDONE 0x08 /* Transfer Completed */ | ||
304 | #define ESP_STAT_TCNT 0x10 /* Transfer Counter Is Zero */ | ||
305 | #define ESP_STAT_PERR 0x20 /* Parity error */ | ||
306 | #define ESP_STAT_SPAM 0x40 /* Real bad error */ | ||
307 | /* This indicates the 'interrupt pending' condition on esp236, it is a reserved | ||
308 | * bit on other revs of the ESP. | ||
309 | */ | ||
310 | #define ESP_STAT_INTR 0x80 /* Interrupt */ | ||
311 | |||
312 | /* HME only: status 2 register */ | ||
313 | #define ESP_STAT2_SCHBIT 0x01 /* Upper bits 3-7 of sstep enabled */ | ||
314 | #define ESP_STAT2_FFLAGS 0x02 /* The fifo flags are now latched */ | ||
315 | #define ESP_STAT2_XCNT 0x04 /* The transfer counter is latched */ | ||
316 | #define ESP_STAT2_CREGA 0x08 /* The command reg is active now */ | ||
317 | #define ESP_STAT2_WIDE 0x10 /* Interface on this adapter is wide */ | ||
318 | #define ESP_STAT2_F1BYTE 0x20 /* There is one byte at top of fifo */ | ||
319 | #define ESP_STAT2_FMSB 0x40 /* Next byte in fifo is most significant */ | ||
320 | #define ESP_STAT2_FEMPTY 0x80 /* FIFO is empty */ | ||
321 | |||
322 | /* The status register can be masked with ESP_STAT_PMASK and compared | ||
323 | * with the following values to determine the current phase the ESP | ||
324 | * (at least thinks it) is in. For our purposes we also add our own | ||
325 | * software 'done' bit for our phase management engine. | ||
326 | */ | ||
327 | #define ESP_DOP (0) /* Data Out */ | ||
328 | #define ESP_DIP (ESP_STAT_PIO) /* Data In */ | ||
329 | #define ESP_CMDP (ESP_STAT_PCD) /* Command */ | ||
330 | #define ESP_STATP (ESP_STAT_PCD|ESP_STAT_PIO) /* Status */ | ||
331 | #define ESP_MOP (ESP_STAT_PMSG|ESP_STAT_PCD) /* Message Out */ | ||
332 | #define ESP_MIP (ESP_STAT_PMSG|ESP_STAT_PCD|ESP_STAT_PIO) /* Message In */ | ||
333 | |||
334 | /* ESP interrupt register read-only */ | ||
335 | #define ESP_INTR_S 0x01 /* Select w/o ATN */ | ||
336 | #define ESP_INTR_SATN 0x02 /* Select w/ATN */ | ||
337 | #define ESP_INTR_RSEL 0x04 /* Reselected */ | ||
338 | #define ESP_INTR_FDONE 0x08 /* Function done */ | ||
339 | #define ESP_INTR_BSERV 0x10 /* Bus service */ | ||
340 | #define ESP_INTR_DC 0x20 /* Disconnect */ | ||
341 | #define ESP_INTR_IC 0x40 /* Illegal command given */ | ||
342 | #define ESP_INTR_SR 0x80 /* SCSI bus reset detected */ | ||
343 | |||
344 | /* Interrupt status macros */ | ||
345 | #define ESP_SRESET_IRQ(esp) ((esp)->intreg & (ESP_INTR_SR)) | ||
346 | #define ESP_ILLCMD_IRQ(esp) ((esp)->intreg & (ESP_INTR_IC)) | ||
347 | #define ESP_SELECT_WITH_ATN_IRQ(esp) ((esp)->intreg & (ESP_INTR_SATN)) | ||
348 | #define ESP_SELECT_WITHOUT_ATN_IRQ(esp) ((esp)->intreg & (ESP_INTR_S)) | ||
349 | #define ESP_SELECTION_IRQ(esp) ((ESP_SELECT_WITH_ATN_IRQ(esp)) || \ | ||
350 | (ESP_SELECT_WITHOUT_ATN_IRQ(esp))) | ||
351 | #define ESP_RESELECTION_IRQ(esp) ((esp)->intreg & (ESP_INTR_RSEL)) | ||
352 | |||
353 | /* ESP sequence step register read-only */ | ||
354 | #define ESP_STEP_VBITS 0x07 /* Valid bits */ | ||
355 | #define ESP_STEP_ASEL 0x00 /* Selection&Arbitrate cmplt */ | ||
356 | #define ESP_STEP_SID 0x01 /* One msg byte sent */ | ||
357 | #define ESP_STEP_NCMD 0x02 /* Was not in command phase */ | ||
358 | #define ESP_STEP_PPC 0x03 /* Early phase chg caused cmnd | ||
359 | * bytes to be lost | ||
360 | */ | ||
361 | #define ESP_STEP_FINI4 0x04 /* Command was sent ok */ | ||
362 | |||
363 | /* Ho hum, some ESP's set the step register to this as well... */ | ||
364 | #define ESP_STEP_FINI5 0x05 | ||
365 | #define ESP_STEP_FINI6 0x06 | ||
366 | #define ESP_STEP_FINI7 0x07 | ||
367 | |||
368 | /* ESP chip-test register read-write */ | ||
369 | #define ESP_TEST_TARG 0x01 /* Target test mode */ | ||
370 | #define ESP_TEST_INI 0x02 /* Initiator test mode */ | ||
371 | #define ESP_TEST_TS 0x04 /* Tristate test mode */ | ||
372 | |||
373 | /* ESP unique ID register read-only, found on fas236+fas100a only */ | ||
374 | #define ESP_UID_F100A 0x00 /* ESP FAS100A */ | ||
375 | #define ESP_UID_F236 0x02 /* ESP FAS236 */ | ||
376 | #define ESP_UID_REV 0x07 /* ESP revision */ | ||
377 | #define ESP_UID_FAM 0xf8 /* ESP family */ | ||
378 | |||
379 | /* ESP fifo flags register read-only */ | ||
380 | /* Note that the following implies a 16 byte FIFO on the ESP. */ | ||
381 | #define ESP_FF_FBYTES 0x1f /* Num bytes in FIFO */ | ||
382 | #define ESP_FF_ONOTZERO 0x20 /* offset ctr not zero (esp100) */ | ||
383 | #define ESP_FF_SSTEP 0xe0 /* Sequence step */ | ||
384 | |||
385 | /* ESP clock conversion factor register write-only */ | ||
386 | #define ESP_CCF_F0 0x00 /* 35.01MHz - 40MHz */ | ||
387 | #define ESP_CCF_NEVER 0x01 /* Set it to this and die */ | ||
388 | #define ESP_CCF_F2 0x02 /* 10MHz */ | ||
389 | #define ESP_CCF_F3 0x03 /* 10.01MHz - 15MHz */ | ||
390 | #define ESP_CCF_F4 0x04 /* 15.01MHz - 20MHz */ | ||
391 | #define ESP_CCF_F5 0x05 /* 20.01MHz - 25MHz */ | ||
392 | #define ESP_CCF_F6 0x06 /* 25.01MHz - 30MHz */ | ||
393 | #define ESP_CCF_F7 0x07 /* 30.01MHz - 35MHz */ | ||
394 | |||
395 | /* HME only... */ | ||
396 | #define ESP_BUSID_RESELID 0x10 | ||
397 | #define ESP_BUSID_CTR32BIT 0x40 | ||
398 | |||
399 | #define ESP_BUS_TIMEOUT 275 /* In milli-seconds */ | ||
400 | #define ESP_TIMEO_CONST 8192 | ||
401 | #define ESP_NEG_DEFP(mhz, cfact) \ | ||
402 | ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact))) | ||
403 | #define ESP_MHZ_TO_CYCLE(mhertz) ((1000000000) / ((mhertz) / 1000)) | ||
404 | #define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000)) | ||
405 | |||
406 | #endif /* !(_SPARC_ESP_H) */ | ||
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c new file mode 100644 index 000000000000..3cd5bf723da4 --- /dev/null +++ b/drivers/scsi/esp_scsi.c | |||
@@ -0,0 +1,2710 @@ | |||
1 | /* esp_scsi.c: ESP SCSI driver. | ||
2 | * | ||
3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <linux/delay.h> | ||
10 | #include <linux/list.h> | ||
11 | #include <linux/completion.h> | ||
12 | #include <linux/kallsyms.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/moduleparam.h> | ||
15 | #include <linux/init.h> | ||
16 | |||
17 | #include <asm/irq.h> | ||
18 | #include <asm/io.h> | ||
19 | #include <asm/dma.h> | ||
20 | |||
21 | #include <scsi/scsi.h> | ||
22 | #include <scsi/scsi_host.h> | ||
23 | #include <scsi/scsi_cmnd.h> | ||
24 | #include <scsi/scsi_device.h> | ||
25 | #include <scsi/scsi_tcq.h> | ||
26 | #include <scsi/scsi_dbg.h> | ||
27 | #include <scsi/scsi_transport_spi.h> | ||
28 | |||
29 | #include "esp_scsi.h" | ||
30 | |||
31 | #define DRV_MODULE_NAME "esp" | ||
32 | #define PFX DRV_MODULE_NAME ": " | ||
33 | #define DRV_VERSION "2.000" | ||
34 | #define DRV_MODULE_RELDATE "April 19, 2007" | ||
35 | |||
36 | /* SCSI bus reset settle time in seconds. */ | ||
37 | static int esp_bus_reset_settle = 3; | ||
38 | |||
39 | static u32 esp_debug; | ||
40 | #define ESP_DEBUG_INTR 0x00000001 | ||
41 | #define ESP_DEBUG_SCSICMD 0x00000002 | ||
42 | #define ESP_DEBUG_RESET 0x00000004 | ||
43 | #define ESP_DEBUG_MSGIN 0x00000008 | ||
44 | #define ESP_DEBUG_MSGOUT 0x00000010 | ||
45 | #define ESP_DEBUG_CMDDONE 0x00000020 | ||
46 | #define ESP_DEBUG_DISCONNECT 0x00000040 | ||
47 | #define ESP_DEBUG_DATASTART 0x00000080 | ||
48 | #define ESP_DEBUG_DATADONE 0x00000100 | ||
49 | #define ESP_DEBUG_RECONNECT 0x00000200 | ||
50 | #define ESP_DEBUG_AUTOSENSE 0x00000400 | ||
51 | |||
52 | #define esp_log_intr(f, a...) \ | ||
53 | do { if (esp_debug & ESP_DEBUG_INTR) \ | ||
54 | printk(f, ## a); \ | ||
55 | } while (0) | ||
56 | |||
57 | #define esp_log_reset(f, a...) \ | ||
58 | do { if (esp_debug & ESP_DEBUG_RESET) \ | ||
59 | printk(f, ## a); \ | ||
60 | } while (0) | ||
61 | |||
62 | #define esp_log_msgin(f, a...) \ | ||
63 | do { if (esp_debug & ESP_DEBUG_MSGIN) \ | ||
64 | printk(f, ## a); \ | ||
65 | } while (0) | ||
66 | |||
67 | #define esp_log_msgout(f, a...) \ | ||
68 | do { if (esp_debug & ESP_DEBUG_MSGOUT) \ | ||
69 | printk(f, ## a); \ | ||
70 | } while (0) | ||
71 | |||
72 | #define esp_log_cmddone(f, a...) \ | ||
73 | do { if (esp_debug & ESP_DEBUG_CMDDONE) \ | ||
74 | printk(f, ## a); \ | ||
75 | } while (0) | ||
76 | |||
77 | #define esp_log_disconnect(f, a...) \ | ||
78 | do { if (esp_debug & ESP_DEBUG_DISCONNECT) \ | ||
79 | printk(f, ## a); \ | ||
80 | } while (0) | ||
81 | |||
82 | #define esp_log_datastart(f, a...) \ | ||
83 | do { if (esp_debug & ESP_DEBUG_DATASTART) \ | ||
84 | printk(f, ## a); \ | ||
85 | } while (0) | ||
86 | |||
87 | #define esp_log_datadone(f, a...) \ | ||
88 | do { if (esp_debug & ESP_DEBUG_DATADONE) \ | ||
89 | printk(f, ## a); \ | ||
90 | } while (0) | ||
91 | |||
92 | #define esp_log_reconnect(f, a...) \ | ||
93 | do { if (esp_debug & ESP_DEBUG_RECONNECT) \ | ||
94 | printk(f, ## a); \ | ||
95 | } while (0) | ||
96 | |||
97 | #define esp_log_autosense(f, a...) \ | ||
98 | do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \ | ||
99 | printk(f, ## a); \ | ||
100 | } while (0) | ||
101 | |||
102 | #define esp_read8(REG) esp->ops->esp_read8(esp, REG) | ||
103 | #define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG) | ||
104 | |||
105 | static void esp_log_fill_regs(struct esp *esp, | ||
106 | struct esp_event_ent *p) | ||
107 | { | ||
108 | p->sreg = esp->sreg; | ||
109 | p->seqreg = esp->seqreg; | ||
110 | p->sreg2 = esp->sreg2; | ||
111 | p->ireg = esp->ireg; | ||
112 | p->select_state = esp->select_state; | ||
113 | p->event = esp->event; | ||
114 | } | ||
115 | |||
116 | void scsi_esp_cmd(struct esp *esp, u8 val) | ||
117 | { | ||
118 | struct esp_event_ent *p; | ||
119 | int idx = esp->esp_event_cur; | ||
120 | |||
121 | p = &esp->esp_event_log[idx]; | ||
122 | p->type = ESP_EVENT_TYPE_CMD; | ||
123 | p->val = val; | ||
124 | esp_log_fill_regs(esp, p); | ||
125 | |||
126 | esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); | ||
127 | |||
128 | esp_write8(val, ESP_CMD); | ||
129 | } | ||
130 | EXPORT_SYMBOL(scsi_esp_cmd); | ||
131 | |||
132 | static void esp_event(struct esp *esp, u8 val) | ||
133 | { | ||
134 | struct esp_event_ent *p; | ||
135 | int idx = esp->esp_event_cur; | ||
136 | |||
137 | p = &esp->esp_event_log[idx]; | ||
138 | p->type = ESP_EVENT_TYPE_EVENT; | ||
139 | p->val = val; | ||
140 | esp_log_fill_regs(esp, p); | ||
141 | |||
142 | esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); | ||
143 | |||
144 | esp->event = val; | ||
145 | } | ||
146 | |||
147 | static void esp_dump_cmd_log(struct esp *esp) | ||
148 | { | ||
149 | int idx = esp->esp_event_cur; | ||
150 | int stop = idx; | ||
151 | |||
152 | printk(KERN_INFO PFX "esp%d: Dumping command log\n", | ||
153 | esp->host->unique_id); | ||
154 | do { | ||
155 | struct esp_event_ent *p = &esp->esp_event_log[idx]; | ||
156 | |||
157 | printk(KERN_INFO PFX "esp%d: ent[%d] %s ", | ||
158 | esp->host->unique_id, idx, | ||
159 | p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT"); | ||
160 | |||
161 | printk("val[%02x] sreg[%02x] seqreg[%02x] " | ||
162 | "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n", | ||
163 | p->val, p->sreg, p->seqreg, | ||
164 | p->sreg2, p->ireg, p->select_state, p->event); | ||
165 | |||
166 | idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); | ||
167 | } while (idx != stop); | ||
168 | } | ||
169 | |||
170 | static void esp_flush_fifo(struct esp *esp) | ||
171 | { | ||
172 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
173 | if (esp->rev == ESP236) { | ||
174 | int lim = 1000; | ||
175 | |||
176 | while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) { | ||
177 | if (--lim == 0) { | ||
178 | printk(KERN_ALERT PFX "esp%d: ESP_FF_BYTES " | ||
179 | "will not clear!\n", | ||
180 | esp->host->unique_id); | ||
181 | break; | ||
182 | } | ||
183 | udelay(1); | ||
184 | } | ||
185 | } | ||
186 | } | ||
187 | |||
188 | static void hme_read_fifo(struct esp *esp) | ||
189 | { | ||
190 | int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; | ||
191 | int idx = 0; | ||
192 | |||
193 | while (fcnt--) { | ||
194 | esp->fifo[idx++] = esp_read8(ESP_FDATA); | ||
195 | esp->fifo[idx++] = esp_read8(ESP_FDATA); | ||
196 | } | ||
197 | if (esp->sreg2 & ESP_STAT2_F1BYTE) { | ||
198 | esp_write8(0, ESP_FDATA); | ||
199 | esp->fifo[idx++] = esp_read8(ESP_FDATA); | ||
200 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
201 | } | ||
202 | esp->fifo_cnt = idx; | ||
203 | } | ||
204 | |||
205 | static void esp_set_all_config3(struct esp *esp, u8 val) | ||
206 | { | ||
207 | int i; | ||
208 | |||
209 | for (i = 0; i < ESP_MAX_TARGET; i++) | ||
210 | esp->target[i].esp_config3 = val; | ||
211 | } | ||
212 | |||
213 | /* Reset the ESP chip, _not_ the SCSI bus. */ | ||
214 | static void esp_reset_esp(struct esp *esp) | ||
215 | { | ||
216 | u8 family_code, version; | ||
217 | |||
218 | /* Now reset the ESP chip */ | ||
219 | scsi_esp_cmd(esp, ESP_CMD_RC); | ||
220 | scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); | ||
221 | scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); | ||
222 | |||
223 | /* Reload the configuration registers */ | ||
224 | esp_write8(esp->cfact, ESP_CFACT); | ||
225 | |||
226 | esp->prev_stp = 0; | ||
227 | esp_write8(esp->prev_stp, ESP_STP); | ||
228 | |||
229 | esp->prev_soff = 0; | ||
230 | esp_write8(esp->prev_soff, ESP_SOFF); | ||
231 | |||
232 | esp_write8(esp->neg_defp, ESP_TIMEO); | ||
233 | |||
234 | /* This is the only point at which it is reliable to read | ||
235 | * the ID-code for a fast ESP chip variants. | ||
236 | */ | ||
237 | esp->max_period = ((35 * esp->ccycle) / 1000); | ||
238 | if (esp->rev == FAST) { | ||
239 | version = esp_read8(ESP_UID); | ||
240 | family_code = (version & 0xf8) >> 3; | ||
241 | if (family_code == 0x02) | ||
242 | esp->rev = FAS236; | ||
243 | else if (family_code == 0x0a) | ||
244 | esp->rev = FASHME; /* Version is usually '5'. */ | ||
245 | else | ||
246 | esp->rev = FAS100A; | ||
247 | esp->min_period = ((4 * esp->ccycle) / 1000); | ||
248 | } else { | ||
249 | esp->min_period = ((5 * esp->ccycle) / 1000); | ||
250 | } | ||
251 | esp->max_period = (esp->max_period + 3)>>2; | ||
252 | esp->min_period = (esp->min_period + 3)>>2; | ||
253 | |||
254 | esp_write8(esp->config1, ESP_CFG1); | ||
255 | switch (esp->rev) { | ||
256 | case ESP100: | ||
257 | /* nothing to do */ | ||
258 | break; | ||
259 | |||
260 | case ESP100A: | ||
261 | esp_write8(esp->config2, ESP_CFG2); | ||
262 | break; | ||
263 | |||
264 | case ESP236: | ||
265 | /* Slow 236 */ | ||
266 | esp_write8(esp->config2, ESP_CFG2); | ||
267 | esp->prev_cfg3 = esp->target[0].esp_config3; | ||
268 | esp_write8(esp->prev_cfg3, ESP_CFG3); | ||
269 | break; | ||
270 | |||
271 | case FASHME: | ||
272 | esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB); | ||
273 | /* fallthrough... */ | ||
274 | |||
275 | case FAS236: | ||
276 | /* Fast 236 or HME */ | ||
277 | esp_write8(esp->config2, ESP_CFG2); | ||
278 | if (esp->rev == FASHME) { | ||
279 | u8 cfg3 = esp->target[0].esp_config3; | ||
280 | |||
281 | cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH; | ||
282 | if (esp->scsi_id >= 8) | ||
283 | cfg3 |= ESP_CONFIG3_IDBIT3; | ||
284 | esp_set_all_config3(esp, cfg3); | ||
285 | } else { | ||
286 | u32 cfg3 = esp->target[0].esp_config3; | ||
287 | |||
288 | cfg3 |= ESP_CONFIG3_FCLK; | ||
289 | esp_set_all_config3(esp, cfg3); | ||
290 | } | ||
291 | esp->prev_cfg3 = esp->target[0].esp_config3; | ||
292 | esp_write8(esp->prev_cfg3, ESP_CFG3); | ||
293 | if (esp->rev == FASHME) { | ||
294 | esp->radelay = 80; | ||
295 | } else { | ||
296 | if (esp->flags & ESP_FLAG_DIFFERENTIAL) | ||
297 | esp->radelay = 0; | ||
298 | else | ||
299 | esp->radelay = 96; | ||
300 | } | ||
301 | break; | ||
302 | |||
303 | case FAS100A: | ||
304 | /* Fast 100a */ | ||
305 | esp_write8(esp->config2, ESP_CFG2); | ||
306 | esp_set_all_config3(esp, | ||
307 | (esp->target[0].esp_config3 | | ||
308 | ESP_CONFIG3_FCLOCK)); | ||
309 | esp->prev_cfg3 = esp->target[0].esp_config3; | ||
310 | esp_write8(esp->prev_cfg3, ESP_CFG3); | ||
311 | esp->radelay = 32; | ||
312 | break; | ||
313 | |||
314 | default: | ||
315 | break; | ||
316 | } | ||
317 | |||
318 | /* Eat any bitrot in the chip */ | ||
319 | esp_read8(ESP_INTRPT); | ||
320 | udelay(100); | ||
321 | } | ||
322 | |||
323 | static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd) | ||
324 | { | ||
325 | struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); | ||
326 | struct scatterlist *sg = cmd->request_buffer; | ||
327 | int dir = cmd->sc_data_direction; | ||
328 | int total, i; | ||
329 | |||
330 | if (dir == DMA_NONE) | ||
331 | return; | ||
332 | |||
333 | BUG_ON(cmd->use_sg == 0); | ||
334 | |||
335 | spriv->u.num_sg = esp->ops->map_sg(esp, sg, | ||
336 | cmd->use_sg, dir); | ||
337 | spriv->cur_residue = sg_dma_len(sg); | ||
338 | spriv->cur_sg = sg; | ||
339 | |||
340 | total = 0; | ||
341 | for (i = 0; i < spriv->u.num_sg; i++) | ||
342 | total += sg_dma_len(&sg[i]); | ||
343 | spriv->tot_residue = total; | ||
344 | } | ||
345 | |||
346 | static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent, | ||
347 | struct scsi_cmnd *cmd) | ||
348 | { | ||
349 | struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); | ||
350 | |||
351 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
352 | return ent->sense_dma + | ||
353 | (ent->sense_ptr - cmd->sense_buffer); | ||
354 | } | ||
355 | |||
356 | return sg_dma_address(p->cur_sg) + | ||
357 | (sg_dma_len(p->cur_sg) - | ||
358 | p->cur_residue); | ||
359 | } | ||
360 | |||
361 | static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent, | ||
362 | struct scsi_cmnd *cmd) | ||
363 | { | ||
364 | struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); | ||
365 | |||
366 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
367 | return SCSI_SENSE_BUFFERSIZE - | ||
368 | (ent->sense_ptr - cmd->sense_buffer); | ||
369 | } | ||
370 | return p->cur_residue; | ||
371 | } | ||
372 | |||
373 | static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent, | ||
374 | struct scsi_cmnd *cmd, unsigned int len) | ||
375 | { | ||
376 | struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); | ||
377 | |||
378 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
379 | ent->sense_ptr += len; | ||
380 | return; | ||
381 | } | ||
382 | |||
383 | p->cur_residue -= len; | ||
384 | p->tot_residue -= len; | ||
385 | if (p->cur_residue < 0 || p->tot_residue < 0) { | ||
386 | printk(KERN_ERR PFX "esp%d: Data transfer overflow.\n", | ||
387 | esp->host->unique_id); | ||
388 | printk(KERN_ERR PFX "esp%d: cur_residue[%d] tot_residue[%d] " | ||
389 | "len[%u]\n", | ||
390 | esp->host->unique_id, | ||
391 | p->cur_residue, p->tot_residue, len); | ||
392 | p->cur_residue = 0; | ||
393 | p->tot_residue = 0; | ||
394 | } | ||
395 | if (!p->cur_residue && p->tot_residue) { | ||
396 | p->cur_sg++; | ||
397 | p->cur_residue = sg_dma_len(p->cur_sg); | ||
398 | } | ||
399 | } | ||
400 | |||
401 | static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd) | ||
402 | { | ||
403 | struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); | ||
404 | int dir = cmd->sc_data_direction; | ||
405 | |||
406 | if (dir == DMA_NONE) | ||
407 | return; | ||
408 | |||
409 | esp->ops->unmap_sg(esp, cmd->request_buffer, | ||
410 | spriv->u.num_sg, dir); | ||
411 | } | ||
412 | |||
413 | static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent) | ||
414 | { | ||
415 | struct scsi_cmnd *cmd = ent->cmd; | ||
416 | struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); | ||
417 | |||
418 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
419 | ent->saved_sense_ptr = ent->sense_ptr; | ||
420 | return; | ||
421 | } | ||
422 | ent->saved_cur_residue = spriv->cur_residue; | ||
423 | ent->saved_cur_sg = spriv->cur_sg; | ||
424 | ent->saved_tot_residue = spriv->tot_residue; | ||
425 | } | ||
426 | |||
427 | static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent) | ||
428 | { | ||
429 | struct scsi_cmnd *cmd = ent->cmd; | ||
430 | struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); | ||
431 | |||
432 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
433 | ent->sense_ptr = ent->saved_sense_ptr; | ||
434 | return; | ||
435 | } | ||
436 | spriv->cur_residue = ent->saved_cur_residue; | ||
437 | spriv->cur_sg = ent->saved_cur_sg; | ||
438 | spriv->tot_residue = ent->saved_tot_residue; | ||
439 | } | ||
440 | |||
441 | static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd) | ||
442 | { | ||
443 | if (cmd->cmd_len == 6 || | ||
444 | cmd->cmd_len == 10 || | ||
445 | cmd->cmd_len == 12) { | ||
446 | esp->flags &= ~ESP_FLAG_DOING_SLOWCMD; | ||
447 | } else { | ||
448 | esp->flags |= ESP_FLAG_DOING_SLOWCMD; | ||
449 | } | ||
450 | } | ||
451 | |||
452 | static void esp_write_tgt_config3(struct esp *esp, int tgt) | ||
453 | { | ||
454 | if (esp->rev > ESP100A) { | ||
455 | u8 val = esp->target[tgt].esp_config3; | ||
456 | |||
457 | if (val != esp->prev_cfg3) { | ||
458 | esp->prev_cfg3 = val; | ||
459 | esp_write8(val, ESP_CFG3); | ||
460 | } | ||
461 | } | ||
462 | } | ||
463 | |||
464 | static void esp_write_tgt_sync(struct esp *esp, int tgt) | ||
465 | { | ||
466 | u8 off = esp->target[tgt].esp_offset; | ||
467 | u8 per = esp->target[tgt].esp_period; | ||
468 | |||
469 | if (off != esp->prev_soff) { | ||
470 | esp->prev_soff = off; | ||
471 | esp_write8(off, ESP_SOFF); | ||
472 | } | ||
473 | if (per != esp->prev_stp) { | ||
474 | esp->prev_stp = per; | ||
475 | esp_write8(per, ESP_STP); | ||
476 | } | ||
477 | } | ||
478 | |||
479 | static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len) | ||
480 | { | ||
481 | if (esp->rev == FASHME) { | ||
482 | /* Arbitrary segment boundaries, 24-bit counts. */ | ||
483 | if (dma_len > (1U << 24)) | ||
484 | dma_len = (1U << 24); | ||
485 | } else { | ||
486 | u32 base, end; | ||
487 | |||
488 | /* ESP chip limits other variants by 16-bits of transfer | ||
489 | * count. Actually on FAS100A and FAS236 we could get | ||
490 | * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB | ||
491 | * in the ESP_CFG2 register but that causes other unwanted | ||
492 | * changes so we don't use it currently. | ||
493 | */ | ||
494 | if (dma_len > (1U << 16)) | ||
495 | dma_len = (1U << 16); | ||
496 | |||
497 | /* All of the DMA variants hooked up to these chips | ||
498 | * cannot handle crossing a 24-bit address boundary. | ||
499 | */ | ||
500 | base = dma_addr & ((1U << 24) - 1U); | ||
501 | end = base + dma_len; | ||
502 | if (end > (1U << 24)) | ||
503 | end = (1U <<24); | ||
504 | dma_len = end - base; | ||
505 | } | ||
506 | return dma_len; | ||
507 | } | ||
508 | |||
509 | static int esp_need_to_nego_wide(struct esp_target_data *tp) | ||
510 | { | ||
511 | struct scsi_target *target = tp->starget; | ||
512 | |||
513 | return spi_width(target) != tp->nego_goal_width; | ||
514 | } | ||
515 | |||
516 | static int esp_need_to_nego_sync(struct esp_target_data *tp) | ||
517 | { | ||
518 | struct scsi_target *target = tp->starget; | ||
519 | |||
520 | /* When offset is zero, period is "don't care". */ | ||
521 | if (!spi_offset(target) && !tp->nego_goal_offset) | ||
522 | return 0; | ||
523 | |||
524 | if (spi_offset(target) == tp->nego_goal_offset && | ||
525 | spi_period(target) == tp->nego_goal_period) | ||
526 | return 0; | ||
527 | |||
528 | return 1; | ||
529 | } | ||
530 | |||
531 | static int esp_alloc_lun_tag(struct esp_cmd_entry *ent, | ||
532 | struct esp_lun_data *lp) | ||
533 | { | ||
534 | if (!ent->tag[0]) { | ||
535 | /* Non-tagged, slot already taken? */ | ||
536 | if (lp->non_tagged_cmd) | ||
537 | return -EBUSY; | ||
538 | |||
539 | if (lp->hold) { | ||
540 | /* We are being held by active tagged | ||
541 | * commands. | ||
542 | */ | ||
543 | if (lp->num_tagged) | ||
544 | return -EBUSY; | ||
545 | |||
546 | /* Tagged commands completed, we can unplug | ||
547 | * the queue and run this untagged command. | ||
548 | */ | ||
549 | lp->hold = 0; | ||
550 | } else if (lp->num_tagged) { | ||
551 | /* Plug the queue until num_tagged decreases | ||
552 | * to zero in esp_free_lun_tag. | ||
553 | */ | ||
554 | lp->hold = 1; | ||
555 | return -EBUSY; | ||
556 | } | ||
557 | |||
558 | lp->non_tagged_cmd = ent; | ||
559 | return 0; | ||
560 | } else { | ||
561 | /* Tagged command, see if blocked by a | ||
562 | * non-tagged one. | ||
563 | */ | ||
564 | if (lp->non_tagged_cmd || lp->hold) | ||
565 | return -EBUSY; | ||
566 | } | ||
567 | |||
568 | BUG_ON(lp->tagged_cmds[ent->tag[1]]); | ||
569 | |||
570 | lp->tagged_cmds[ent->tag[1]] = ent; | ||
571 | lp->num_tagged++; | ||
572 | |||
573 | return 0; | ||
574 | } | ||
575 | |||
576 | static void esp_free_lun_tag(struct esp_cmd_entry *ent, | ||
577 | struct esp_lun_data *lp) | ||
578 | { | ||
579 | if (ent->tag[0]) { | ||
580 | BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent); | ||
581 | lp->tagged_cmds[ent->tag[1]] = NULL; | ||
582 | lp->num_tagged--; | ||
583 | } else { | ||
584 | BUG_ON(lp->non_tagged_cmd != ent); | ||
585 | lp->non_tagged_cmd = NULL; | ||
586 | } | ||
587 | } | ||
588 | |||
589 | /* When a contingent allegiance conditon is created, we force feed a | ||
590 | * REQUEST_SENSE command to the device to fetch the sense data. I | ||
591 | * tried many other schemes, relying on the scsi error handling layer | ||
592 | * to send out the REQUEST_SENSE automatically, but this was difficult | ||
593 | * to get right especially in the presence of applications like smartd | ||
594 | * which use SG_IO to send out their own REQUEST_SENSE commands. | ||
595 | */ | ||
596 | static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent) | ||
597 | { | ||
598 | struct scsi_cmnd *cmd = ent->cmd; | ||
599 | struct scsi_device *dev = cmd->device; | ||
600 | int tgt, lun; | ||
601 | u8 *p, val; | ||
602 | |||
603 | tgt = dev->id; | ||
604 | lun = dev->lun; | ||
605 | |||
606 | |||
607 | if (!ent->sense_ptr) { | ||
608 | esp_log_autosense("esp%d: Doing auto-sense for " | ||
609 | "tgt[%d] lun[%d]\n", | ||
610 | esp->host->unique_id, tgt, lun); | ||
611 | |||
612 | ent->sense_ptr = cmd->sense_buffer; | ||
613 | ent->sense_dma = esp->ops->map_single(esp, | ||
614 | ent->sense_ptr, | ||
615 | SCSI_SENSE_BUFFERSIZE, | ||
616 | DMA_FROM_DEVICE); | ||
617 | } | ||
618 | ent->saved_sense_ptr = ent->sense_ptr; | ||
619 | |||
620 | esp->active_cmd = ent; | ||
621 | |||
622 | p = esp->command_block; | ||
623 | esp->msg_out_len = 0; | ||
624 | |||
625 | *p++ = IDENTIFY(0, lun); | ||
626 | *p++ = REQUEST_SENSE; | ||
627 | *p++ = ((dev->scsi_level <= SCSI_2) ? | ||
628 | (lun << 5) : 0); | ||
629 | *p++ = 0; | ||
630 | *p++ = 0; | ||
631 | *p++ = SCSI_SENSE_BUFFERSIZE; | ||
632 | *p++ = 0; | ||
633 | |||
634 | esp->select_state = ESP_SELECT_BASIC; | ||
635 | |||
636 | val = tgt; | ||
637 | if (esp->rev == FASHME) | ||
638 | val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT; | ||
639 | esp_write8(val, ESP_BUSID); | ||
640 | |||
641 | esp_write_tgt_sync(esp, tgt); | ||
642 | esp_write_tgt_config3(esp, tgt); | ||
643 | |||
644 | val = (p - esp->command_block); | ||
645 | |||
646 | if (esp->rev == FASHME) | ||
647 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
648 | esp->ops->send_dma_cmd(esp, esp->command_block_dma, | ||
649 | val, 16, 0, ESP_CMD_DMA | ESP_CMD_SELA); | ||
650 | } | ||
651 | |||
652 | static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp) | ||
653 | { | ||
654 | struct esp_cmd_entry *ent; | ||
655 | |||
656 | list_for_each_entry(ent, &esp->queued_cmds, list) { | ||
657 | struct scsi_cmnd *cmd = ent->cmd; | ||
658 | struct scsi_device *dev = cmd->device; | ||
659 | struct esp_lun_data *lp = dev->hostdata; | ||
660 | |||
661 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
662 | ent->tag[0] = 0; | ||
663 | ent->tag[1] = 0; | ||
664 | return ent; | ||
665 | } | ||
666 | |||
667 | if (!scsi_populate_tag_msg(cmd, &ent->tag[0])) { | ||
668 | ent->tag[0] = 0; | ||
669 | ent->tag[1] = 0; | ||
670 | } | ||
671 | |||
672 | if (esp_alloc_lun_tag(ent, lp) < 0) | ||
673 | continue; | ||
674 | |||
675 | return ent; | ||
676 | } | ||
677 | |||
678 | return NULL; | ||
679 | } | ||
680 | |||
681 | static void esp_maybe_execute_command(struct esp *esp) | ||
682 | { | ||
683 | struct esp_target_data *tp; | ||
684 | struct esp_lun_data *lp; | ||
685 | struct scsi_device *dev; | ||
686 | struct scsi_cmnd *cmd; | ||
687 | struct esp_cmd_entry *ent; | ||
688 | int tgt, lun, i; | ||
689 | u32 val, start_cmd; | ||
690 | u8 *p; | ||
691 | |||
692 | if (esp->active_cmd || | ||
693 | (esp->flags & ESP_FLAG_RESETTING)) | ||
694 | return; | ||
695 | |||
696 | ent = find_and_prep_issuable_command(esp); | ||
697 | if (!ent) | ||
698 | return; | ||
699 | |||
700 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
701 | esp_autosense(esp, ent); | ||
702 | return; | ||
703 | } | ||
704 | |||
705 | cmd = ent->cmd; | ||
706 | dev = cmd->device; | ||
707 | tgt = dev->id; | ||
708 | lun = dev->lun; | ||
709 | tp = &esp->target[tgt]; | ||
710 | lp = dev->hostdata; | ||
711 | |||
712 | list_del(&ent->list); | ||
713 | list_add(&ent->list, &esp->active_cmds); | ||
714 | |||
715 | esp->active_cmd = ent; | ||
716 | |||
717 | esp_map_dma(esp, cmd); | ||
718 | esp_save_pointers(esp, ent); | ||
719 | |||
720 | esp_check_command_len(esp, cmd); | ||
721 | |||
722 | p = esp->command_block; | ||
723 | |||
724 | esp->msg_out_len = 0; | ||
725 | if (tp->flags & ESP_TGT_CHECK_NEGO) { | ||
726 | /* Need to negotiate. If the target is broken | ||
727 | * go for synchronous transfers and non-wide. | ||
728 | */ | ||
729 | if (tp->flags & ESP_TGT_BROKEN) { | ||
730 | tp->flags &= ~ESP_TGT_DISCONNECT; | ||
731 | tp->nego_goal_period = 0; | ||
732 | tp->nego_goal_offset = 0; | ||
733 | tp->nego_goal_width = 0; | ||
734 | tp->nego_goal_tags = 0; | ||
735 | } | ||
736 | |||
737 | /* If the settings are not changing, skip this. */ | ||
738 | if (spi_width(tp->starget) == tp->nego_goal_width && | ||
739 | spi_period(tp->starget) == tp->nego_goal_period && | ||
740 | spi_offset(tp->starget) == tp->nego_goal_offset) { | ||
741 | tp->flags &= ~ESP_TGT_CHECK_NEGO; | ||
742 | goto build_identify; | ||
743 | } | ||
744 | |||
745 | if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) { | ||
746 | esp->msg_out_len = | ||
747 | spi_populate_width_msg(&esp->msg_out[0], | ||
748 | (tp->nego_goal_width ? | ||
749 | 1 : 0)); | ||
750 | tp->flags |= ESP_TGT_NEGO_WIDE; | ||
751 | } else if (esp_need_to_nego_sync(tp)) { | ||
752 | esp->msg_out_len = | ||
753 | spi_populate_sync_msg(&esp->msg_out[0], | ||
754 | tp->nego_goal_period, | ||
755 | tp->nego_goal_offset); | ||
756 | tp->flags |= ESP_TGT_NEGO_SYNC; | ||
757 | } else { | ||
758 | tp->flags &= ~ESP_TGT_CHECK_NEGO; | ||
759 | } | ||
760 | |||
761 | /* Process it like a slow command. */ | ||
762 | if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC)) | ||
763 | esp->flags |= ESP_FLAG_DOING_SLOWCMD; | ||
764 | } | ||
765 | |||
766 | build_identify: | ||
767 | /* If we don't have a lun-data struct yet, we're probing | ||
768 | * so do not disconnect. Also, do not disconnect unless | ||
769 | * we have a tag on this command. | ||
770 | */ | ||
771 | if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0]) | ||
772 | *p++ = IDENTIFY(1, lun); | ||
773 | else | ||
774 | *p++ = IDENTIFY(0, lun); | ||
775 | |||
776 | if (ent->tag[0] && esp->rev == ESP100) { | ||
777 | /* ESP100 lacks select w/atn3 command, use select | ||
778 | * and stop instead. | ||
779 | */ | ||
780 | esp->flags |= ESP_FLAG_DOING_SLOWCMD; | ||
781 | } | ||
782 | |||
783 | if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) { | ||
784 | start_cmd = ESP_CMD_DMA | ESP_CMD_SELA; | ||
785 | if (ent->tag[0]) { | ||
786 | *p++ = ent->tag[0]; | ||
787 | *p++ = ent->tag[1]; | ||
788 | |||
789 | start_cmd = ESP_CMD_DMA | ESP_CMD_SA3; | ||
790 | } | ||
791 | |||
792 | for (i = 0; i < cmd->cmd_len; i++) | ||
793 | *p++ = cmd->cmnd[i]; | ||
794 | |||
795 | esp->select_state = ESP_SELECT_BASIC; | ||
796 | } else { | ||
797 | esp->cmd_bytes_left = cmd->cmd_len; | ||
798 | esp->cmd_bytes_ptr = &cmd->cmnd[0]; | ||
799 | |||
800 | if (ent->tag[0]) { | ||
801 | for (i = esp->msg_out_len - 1; | ||
802 | i >= 0; i--) | ||
803 | esp->msg_out[i + 2] = esp->msg_out[i]; | ||
804 | esp->msg_out[0] = ent->tag[0]; | ||
805 | esp->msg_out[1] = ent->tag[1]; | ||
806 | esp->msg_out_len += 2; | ||
807 | } | ||
808 | |||
809 | start_cmd = ESP_CMD_DMA | ESP_CMD_SELAS; | ||
810 | esp->select_state = ESP_SELECT_MSGOUT; | ||
811 | } | ||
812 | val = tgt; | ||
813 | if (esp->rev == FASHME) | ||
814 | val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT; | ||
815 | esp_write8(val, ESP_BUSID); | ||
816 | |||
817 | esp_write_tgt_sync(esp, tgt); | ||
818 | esp_write_tgt_config3(esp, tgt); | ||
819 | |||
820 | val = (p - esp->command_block); | ||
821 | |||
822 | if (esp_debug & ESP_DEBUG_SCSICMD) { | ||
823 | printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun); | ||
824 | for (i = 0; i < cmd->cmd_len; i++) | ||
825 | printk("%02x ", cmd->cmnd[i]); | ||
826 | printk("]\n"); | ||
827 | } | ||
828 | |||
829 | if (esp->rev == FASHME) | ||
830 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
831 | esp->ops->send_dma_cmd(esp, esp->command_block_dma, | ||
832 | val, 16, 0, start_cmd); | ||
833 | } | ||
834 | |||
835 | static struct esp_cmd_entry *esp_get_ent(struct esp *esp) | ||
836 | { | ||
837 | struct list_head *head = &esp->esp_cmd_pool; | ||
838 | struct esp_cmd_entry *ret; | ||
839 | |||
840 | if (list_empty(head)) { | ||
841 | ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC); | ||
842 | } else { | ||
843 | ret = list_entry(head->next, struct esp_cmd_entry, list); | ||
844 | list_del(&ret->list); | ||
845 | memset(ret, 0, sizeof(*ret)); | ||
846 | } | ||
847 | return ret; | ||
848 | } | ||
849 | |||
850 | static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent) | ||
851 | { | ||
852 | list_add(&ent->list, &esp->esp_cmd_pool); | ||
853 | } | ||
854 | |||
855 | static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent, | ||
856 | struct scsi_cmnd *cmd, unsigned int result) | ||
857 | { | ||
858 | struct scsi_device *dev = cmd->device; | ||
859 | int tgt = dev->id; | ||
860 | int lun = dev->lun; | ||
861 | |||
862 | esp->active_cmd = NULL; | ||
863 | esp_unmap_dma(esp, cmd); | ||
864 | esp_free_lun_tag(ent, dev->hostdata); | ||
865 | cmd->result = result; | ||
866 | |||
867 | if (ent->eh_done) { | ||
868 | complete(ent->eh_done); | ||
869 | ent->eh_done = NULL; | ||
870 | } | ||
871 | |||
872 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
873 | esp->ops->unmap_single(esp, ent->sense_dma, | ||
874 | SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); | ||
875 | ent->sense_ptr = NULL; | ||
876 | |||
877 | /* Restore the message/status bytes to what we actually | ||
878 | * saw originally. Also, report that we are providing | ||
879 | * the sense data. | ||
880 | */ | ||
881 | cmd->result = ((DRIVER_SENSE << 24) | | ||
882 | (DID_OK << 16) | | ||
883 | (COMMAND_COMPLETE << 8) | | ||
884 | (SAM_STAT_CHECK_CONDITION << 0)); | ||
885 | |||
886 | ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE; | ||
887 | if (esp_debug & ESP_DEBUG_AUTOSENSE) { | ||
888 | int i; | ||
889 | |||
890 | printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ", | ||
891 | esp->host->unique_id, tgt, lun); | ||
892 | for (i = 0; i < 18; i++) | ||
893 | printk("%02x ", cmd->sense_buffer[i]); | ||
894 | printk("]\n"); | ||
895 | } | ||
896 | } | ||
897 | |||
898 | cmd->scsi_done(cmd); | ||
899 | |||
900 | list_del(&ent->list); | ||
901 | esp_put_ent(esp, ent); | ||
902 | |||
903 | esp_maybe_execute_command(esp); | ||
904 | } | ||
905 | |||
906 | static unsigned int compose_result(unsigned int status, unsigned int message, | ||
907 | unsigned int driver_code) | ||
908 | { | ||
909 | return (status | (message << 8) | (driver_code << 16)); | ||
910 | } | ||
911 | |||
912 | static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent) | ||
913 | { | ||
914 | struct scsi_device *dev = ent->cmd->device; | ||
915 | struct esp_lun_data *lp = dev->hostdata; | ||
916 | |||
917 | scsi_track_queue_full(dev, lp->num_tagged - 1); | ||
918 | } | ||
919 | |||
920 | static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | ||
921 | { | ||
922 | struct scsi_device *dev = cmd->device; | ||
923 | struct esp *esp = host_to_esp(dev->host); | ||
924 | struct esp_cmd_priv *spriv; | ||
925 | struct esp_cmd_entry *ent; | ||
926 | |||
927 | ent = esp_get_ent(esp); | ||
928 | if (!ent) | ||
929 | return SCSI_MLQUEUE_HOST_BUSY; | ||
930 | |||
931 | ent->cmd = cmd; | ||
932 | |||
933 | cmd->scsi_done = done; | ||
934 | |||
935 | spriv = ESP_CMD_PRIV(cmd); | ||
936 | spriv->u.dma_addr = ~(dma_addr_t)0x0; | ||
937 | |||
938 | list_add_tail(&ent->list, &esp->queued_cmds); | ||
939 | |||
940 | esp_maybe_execute_command(esp); | ||
941 | |||
942 | return 0; | ||
943 | } | ||
944 | |||
945 | static int esp_check_gross_error(struct esp *esp) | ||
946 | { | ||
947 | if (esp->sreg & ESP_STAT_SPAM) { | ||
948 | /* Gross Error, could be one of: | ||
949 | * - top of fifo overwritten | ||
950 | * - top of command register overwritten | ||
951 | * - DMA programmed with wrong direction | ||
952 | * - improper phase change | ||
953 | */ | ||
954 | printk(KERN_ERR PFX "esp%d: Gross error sreg[%02x]\n", | ||
955 | esp->host->unique_id, esp->sreg); | ||
956 | /* XXX Reset the chip. XXX */ | ||
957 | return 1; | ||
958 | } | ||
959 | return 0; | ||
960 | } | ||
961 | |||
962 | static int esp_check_spur_intr(struct esp *esp) | ||
963 | { | ||
964 | switch (esp->rev) { | ||
965 | case ESP100: | ||
966 | case ESP100A: | ||
967 | /* The interrupt pending bit of the status register cannot | ||
968 | * be trusted on these revisions. | ||
969 | */ | ||
970 | esp->sreg &= ~ESP_STAT_INTR; | ||
971 | break; | ||
972 | |||
973 | default: | ||
974 | if (!(esp->sreg & ESP_STAT_INTR)) { | ||
975 | esp->ireg = esp_read8(ESP_INTRPT); | ||
976 | if (esp->ireg & ESP_INTR_SR) | ||
977 | return 1; | ||
978 | |||
979 | /* If the DMA is indicating interrupt pending and the | ||
980 | * ESP is not, the only possibility is a DMA error. | ||
981 | */ | ||
982 | if (!esp->ops->dma_error(esp)) { | ||
983 | printk(KERN_ERR PFX "esp%d: Spurious irq, " | ||
984 | "sreg=%x.\n", | ||
985 | esp->host->unique_id, esp->sreg); | ||
986 | return -1; | ||
987 | } | ||
988 | |||
989 | printk(KERN_ERR PFX "esp%d: DMA error\n", | ||
990 | esp->host->unique_id); | ||
991 | |||
992 | /* XXX Reset the chip. XXX */ | ||
993 | return -1; | ||
994 | } | ||
995 | break; | ||
996 | } | ||
997 | |||
998 | return 0; | ||
999 | } | ||
1000 | |||
1001 | static void esp_schedule_reset(struct esp *esp) | ||
1002 | { | ||
1003 | esp_log_reset("ESP: esp_schedule_reset() from %p\n", | ||
1004 | __builtin_return_address(0)); | ||
1005 | esp->flags |= ESP_FLAG_RESETTING; | ||
1006 | esp_event(esp, ESP_EVENT_RESET); | ||
1007 | } | ||
1008 | |||
1009 | /* In order to avoid having to add a special half-reconnected state | ||
1010 | * into the driver we just sit here and poll through the rest of | ||
1011 | * the reselection process to get the tag message bytes. | ||
1012 | */ | ||
1013 | static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp, | ||
1014 | struct esp_lun_data *lp) | ||
1015 | { | ||
1016 | struct esp_cmd_entry *ent; | ||
1017 | int i; | ||
1018 | |||
1019 | if (!lp->num_tagged) { | ||
1020 | printk(KERN_ERR PFX "esp%d: Reconnect w/num_tagged==0\n", | ||
1021 | esp->host->unique_id); | ||
1022 | return NULL; | ||
1023 | } | ||
1024 | |||
1025 | esp_log_reconnect("ESP: reconnect tag, "); | ||
1026 | |||
1027 | for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) { | ||
1028 | if (esp->ops->irq_pending(esp)) | ||
1029 | break; | ||
1030 | } | ||
1031 | if (i == ESP_QUICKIRQ_LIMIT) { | ||
1032 | printk(KERN_ERR PFX "esp%d: Reconnect IRQ1 timeout\n", | ||
1033 | esp->host->unique_id); | ||
1034 | return NULL; | ||
1035 | } | ||
1036 | |||
1037 | esp->sreg = esp_read8(ESP_STATUS); | ||
1038 | esp->ireg = esp_read8(ESP_INTRPT); | ||
1039 | |||
1040 | esp_log_reconnect("IRQ(%d:%x:%x), ", | ||
1041 | i, esp->ireg, esp->sreg); | ||
1042 | |||
1043 | if (esp->ireg & ESP_INTR_DC) { | ||
1044 | printk(KERN_ERR PFX "esp%d: Reconnect, got disconnect.\n", | ||
1045 | esp->host->unique_id); | ||
1046 | return NULL; | ||
1047 | } | ||
1048 | |||
1049 | if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) { | ||
1050 | printk(KERN_ERR PFX "esp%d: Reconnect, not MIP sreg[%02x].\n", | ||
1051 | esp->host->unique_id, esp->sreg); | ||
1052 | return NULL; | ||
1053 | } | ||
1054 | |||
1055 | /* DMA in the tag bytes... */ | ||
1056 | esp->command_block[0] = 0xff; | ||
1057 | esp->command_block[1] = 0xff; | ||
1058 | esp->ops->send_dma_cmd(esp, esp->command_block_dma, | ||
1059 | 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI); | ||
1060 | |||
1061 | /* ACK the msssage. */ | ||
1062 | scsi_esp_cmd(esp, ESP_CMD_MOK); | ||
1063 | |||
1064 | for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) { | ||
1065 | if (esp->ops->irq_pending(esp)) { | ||
1066 | esp->sreg = esp_read8(ESP_STATUS); | ||
1067 | esp->ireg = esp_read8(ESP_INTRPT); | ||
1068 | if (esp->ireg & ESP_INTR_FDONE) | ||
1069 | break; | ||
1070 | } | ||
1071 | udelay(1); | ||
1072 | } | ||
1073 | if (i == ESP_RESELECT_TAG_LIMIT) { | ||
1074 | printk(KERN_ERR PFX "esp%d: Reconnect IRQ2 timeout\n", | ||
1075 | esp->host->unique_id); | ||
1076 | return NULL; | ||
1077 | } | ||
1078 | esp->ops->dma_drain(esp); | ||
1079 | esp->ops->dma_invalidate(esp); | ||
1080 | |||
1081 | esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n", | ||
1082 | i, esp->ireg, esp->sreg, | ||
1083 | esp->command_block[0], | ||
1084 | esp->command_block[1]); | ||
1085 | |||
1086 | if (esp->command_block[0] < SIMPLE_QUEUE_TAG || | ||
1087 | esp->command_block[0] > ORDERED_QUEUE_TAG) { | ||
1088 | printk(KERN_ERR PFX "esp%d: Reconnect, bad tag " | ||
1089 | "type %02x.\n", | ||
1090 | esp->host->unique_id, esp->command_block[0]); | ||
1091 | return NULL; | ||
1092 | } | ||
1093 | |||
1094 | ent = lp->tagged_cmds[esp->command_block[1]]; | ||
1095 | if (!ent) { | ||
1096 | printk(KERN_ERR PFX "esp%d: Reconnect, no entry for " | ||
1097 | "tag %02x.\n", | ||
1098 | esp->host->unique_id, esp->command_block[1]); | ||
1099 | return NULL; | ||
1100 | } | ||
1101 | |||
1102 | return ent; | ||
1103 | } | ||
1104 | |||
1105 | static int esp_reconnect(struct esp *esp) | ||
1106 | { | ||
1107 | struct esp_cmd_entry *ent; | ||
1108 | struct esp_target_data *tp; | ||
1109 | struct esp_lun_data *lp; | ||
1110 | struct scsi_device *dev; | ||
1111 | int target, lun; | ||
1112 | |||
1113 | BUG_ON(esp->active_cmd); | ||
1114 | if (esp->rev == FASHME) { | ||
1115 | /* FASHME puts the target and lun numbers directly | ||
1116 | * into the fifo. | ||
1117 | */ | ||
1118 | target = esp->fifo[0]; | ||
1119 | lun = esp->fifo[1] & 0x7; | ||
1120 | } else { | ||
1121 | u8 bits = esp_read8(ESP_FDATA); | ||
1122 | |||
1123 | /* Older chips put the lun directly into the fifo, but | ||
1124 | * the target is given as a sample of the arbitration | ||
1125 | * lines on the bus at reselection time. So we should | ||
1126 | * see the ID of the ESP and the one reconnecting target | ||
1127 | * set in the bitmap. | ||
1128 | */ | ||
1129 | if (!(bits & esp->scsi_id_mask)) | ||
1130 | goto do_reset; | ||
1131 | bits &= ~esp->scsi_id_mask; | ||
1132 | if (!bits || (bits & (bits - 1))) | ||
1133 | goto do_reset; | ||
1134 | |||
1135 | target = ffs(bits) - 1; | ||
1136 | lun = (esp_read8(ESP_FDATA) & 0x7); | ||
1137 | |||
1138 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1139 | if (esp->rev == ESP100) { | ||
1140 | u8 ireg = esp_read8(ESP_INTRPT); | ||
1141 | /* This chip has a bug during reselection that can | ||
1142 | * cause a spurious illegal-command interrupt, which | ||
1143 | * we simply ACK here. Another possibility is a bus | ||
1144 | * reset so we must check for that. | ||
1145 | */ | ||
1146 | if (ireg & ESP_INTR_SR) | ||
1147 | goto do_reset; | ||
1148 | } | ||
1149 | scsi_esp_cmd(esp, ESP_CMD_NULL); | ||
1150 | } | ||
1151 | |||
1152 | esp_write_tgt_sync(esp, target); | ||
1153 | esp_write_tgt_config3(esp, target); | ||
1154 | |||
1155 | scsi_esp_cmd(esp, ESP_CMD_MOK); | ||
1156 | |||
1157 | if (esp->rev == FASHME) | ||
1158 | esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT, | ||
1159 | ESP_BUSID); | ||
1160 | |||
1161 | tp = &esp->target[target]; | ||
1162 | dev = __scsi_device_lookup_by_target(tp->starget, lun); | ||
1163 | if (!dev) { | ||
1164 | printk(KERN_ERR PFX "esp%d: Reconnect, no lp " | ||
1165 | "tgt[%u] lun[%u]\n", | ||
1166 | esp->host->unique_id, target, lun); | ||
1167 | goto do_reset; | ||
1168 | } | ||
1169 | lp = dev->hostdata; | ||
1170 | |||
1171 | ent = lp->non_tagged_cmd; | ||
1172 | if (!ent) { | ||
1173 | ent = esp_reconnect_with_tag(esp, lp); | ||
1174 | if (!ent) | ||
1175 | goto do_reset; | ||
1176 | } | ||
1177 | |||
1178 | esp->active_cmd = ent; | ||
1179 | |||
1180 | if (ent->flags & ESP_CMD_FLAG_ABORT) { | ||
1181 | esp->msg_out[0] = ABORT_TASK_SET; | ||
1182 | esp->msg_out_len = 1; | ||
1183 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1184 | } | ||
1185 | |||
1186 | esp_event(esp, ESP_EVENT_CHECK_PHASE); | ||
1187 | esp_restore_pointers(esp, ent); | ||
1188 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1189 | return 1; | ||
1190 | |||
1191 | do_reset: | ||
1192 | esp_schedule_reset(esp); | ||
1193 | return 0; | ||
1194 | } | ||
1195 | |||
1196 | static int esp_finish_select(struct esp *esp) | ||
1197 | { | ||
1198 | struct esp_cmd_entry *ent; | ||
1199 | struct scsi_cmnd *cmd; | ||
1200 | u8 orig_select_state; | ||
1201 | |||
1202 | orig_select_state = esp->select_state; | ||
1203 | |||
1204 | /* No longer selecting. */ | ||
1205 | esp->select_state = ESP_SELECT_NONE; | ||
1206 | |||
1207 | esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS; | ||
1208 | ent = esp->active_cmd; | ||
1209 | cmd = ent->cmd; | ||
1210 | |||
1211 | if (esp->ops->dma_error(esp)) { | ||
1212 | /* If we see a DMA error during or as a result of selection, | ||
1213 | * all bets are off. | ||
1214 | */ | ||
1215 | esp_schedule_reset(esp); | ||
1216 | esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16)); | ||
1217 | return 0; | ||
1218 | } | ||
1219 | |||
1220 | esp->ops->dma_invalidate(esp); | ||
1221 | |||
1222 | if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) { | ||
1223 | struct esp_target_data *tp = &esp->target[cmd->device->id]; | ||
1224 | |||
1225 | /* Carefully back out of the selection attempt. Release | ||
1226 | * resources (such as DMA mapping & TAG) and reset state (such | ||
1227 | * as message out and command delivery variables). | ||
1228 | */ | ||
1229 | if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) { | ||
1230 | esp_unmap_dma(esp, cmd); | ||
1231 | esp_free_lun_tag(ent, cmd->device->hostdata); | ||
1232 | tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE); | ||
1233 | esp->flags &= ~ESP_FLAG_DOING_SLOWCMD; | ||
1234 | esp->cmd_bytes_ptr = NULL; | ||
1235 | esp->cmd_bytes_left = 0; | ||
1236 | } else { | ||
1237 | esp->ops->unmap_single(esp, ent->sense_dma, | ||
1238 | SCSI_SENSE_BUFFERSIZE, | ||
1239 | DMA_FROM_DEVICE); | ||
1240 | ent->sense_ptr = NULL; | ||
1241 | } | ||
1242 | |||
1243 | /* Now that the state is unwound properly, put back onto | ||
1244 | * the issue queue. This command is no longer active. | ||
1245 | */ | ||
1246 | list_del(&ent->list); | ||
1247 | list_add(&ent->list, &esp->queued_cmds); | ||
1248 | esp->active_cmd = NULL; | ||
1249 | |||
1250 | /* Return value ignored by caller, it directly invokes | ||
1251 | * esp_reconnect(). | ||
1252 | */ | ||
1253 | return 0; | ||
1254 | } | ||
1255 | |||
1256 | if (esp->ireg == ESP_INTR_DC) { | ||
1257 | struct scsi_device *dev = cmd->device; | ||
1258 | |||
1259 | /* Disconnect. Make sure we re-negotiate sync and | ||
1260 | * wide parameters if this target starts responding | ||
1261 | * again in the future. | ||
1262 | */ | ||
1263 | esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO; | ||
1264 | |||
1265 | scsi_esp_cmd(esp, ESP_CMD_ESEL); | ||
1266 | esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16)); | ||
1267 | return 1; | ||
1268 | } | ||
1269 | |||
1270 | if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) { | ||
1271 | /* Selection successful. On pre-FAST chips we have | ||
1272 | * to do a NOP and possibly clean out the FIFO. | ||
1273 | */ | ||
1274 | if (esp->rev <= ESP236) { | ||
1275 | int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; | ||
1276 | |||
1277 | scsi_esp_cmd(esp, ESP_CMD_NULL); | ||
1278 | |||
1279 | if (!fcnt && | ||
1280 | (!esp->prev_soff || | ||
1281 | ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP))) | ||
1282 | esp_flush_fifo(esp); | ||
1283 | } | ||
1284 | |||
1285 | /* If we are doing a slow command, negotiation, etc. | ||
1286 | * we'll do the right thing as we transition to the | ||
1287 | * next phase. | ||
1288 | */ | ||
1289 | esp_event(esp, ESP_EVENT_CHECK_PHASE); | ||
1290 | return 0; | ||
1291 | } | ||
1292 | |||
1293 | printk("ESP: Unexpected selection completion ireg[%x].\n", | ||
1294 | esp->ireg); | ||
1295 | esp_schedule_reset(esp); | ||
1296 | return 0; | ||
1297 | } | ||
1298 | |||
1299 | static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent, | ||
1300 | struct scsi_cmnd *cmd) | ||
1301 | { | ||
1302 | int fifo_cnt, ecount, bytes_sent, flush_fifo; | ||
1303 | |||
1304 | fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; | ||
1305 | if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE) | ||
1306 | fifo_cnt <<= 1; | ||
1307 | |||
1308 | ecount = 0; | ||
1309 | if (!(esp->sreg & ESP_STAT_TCNT)) { | ||
1310 | ecount = ((unsigned int)esp_read8(ESP_TCLOW) | | ||
1311 | (((unsigned int)esp_read8(ESP_TCMED)) << 8)); | ||
1312 | if (esp->rev == FASHME) | ||
1313 | ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16; | ||
1314 | } | ||
1315 | |||
1316 | bytes_sent = esp->data_dma_len; | ||
1317 | bytes_sent -= ecount; | ||
1318 | |||
1319 | if (!(ent->flags & ESP_CMD_FLAG_WRITE)) | ||
1320 | bytes_sent -= fifo_cnt; | ||
1321 | |||
1322 | flush_fifo = 0; | ||
1323 | if (!esp->prev_soff) { | ||
1324 | /* Synchronous data transfer, always flush fifo. */ | ||
1325 | flush_fifo = 1; | ||
1326 | } else { | ||
1327 | if (esp->rev == ESP100) { | ||
1328 | u32 fflags, phase; | ||
1329 | |||
1330 | /* ESP100 has a chip bug where in the synchronous data | ||
1331 | * phase it can mistake a final long REQ pulse from the | ||
1332 | * target as an extra data byte. Fun. | ||
1333 | * | ||
1334 | * To detect this case we resample the status register | ||
1335 | * and fifo flags. If we're still in a data phase and | ||
1336 | * we see spurious chunks in the fifo, we return error | ||
1337 | * to the caller which should reset and set things up | ||
1338 | * such that we only try future transfers to this | ||
1339 | * target in synchronous mode. | ||
1340 | */ | ||
1341 | esp->sreg = esp_read8(ESP_STATUS); | ||
1342 | phase = esp->sreg & ESP_STAT_PMASK; | ||
1343 | fflags = esp_read8(ESP_FFLAGS); | ||
1344 | |||
1345 | if ((phase == ESP_DOP && | ||
1346 | (fflags & ESP_FF_ONOTZERO)) || | ||
1347 | (phase == ESP_DIP && | ||
1348 | (fflags & ESP_FF_FBYTES))) | ||
1349 | return -1; | ||
1350 | } | ||
1351 | if (!(ent->flags & ESP_CMD_FLAG_WRITE)) | ||
1352 | flush_fifo = 1; | ||
1353 | } | ||
1354 | |||
1355 | if (flush_fifo) | ||
1356 | esp_flush_fifo(esp); | ||
1357 | |||
1358 | return bytes_sent; | ||
1359 | } | ||
1360 | |||
1361 | static void esp_setsync(struct esp *esp, struct esp_target_data *tp, | ||
1362 | u8 scsi_period, u8 scsi_offset, | ||
1363 | u8 esp_stp, u8 esp_soff) | ||
1364 | { | ||
1365 | spi_period(tp->starget) = scsi_period; | ||
1366 | spi_offset(tp->starget) = scsi_offset; | ||
1367 | spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0; | ||
1368 | |||
1369 | if (esp_soff) { | ||
1370 | esp_stp &= 0x1f; | ||
1371 | esp_soff |= esp->radelay; | ||
1372 | if (esp->rev >= FAS236) { | ||
1373 | u8 bit = ESP_CONFIG3_FSCSI; | ||
1374 | if (esp->rev >= FAS100A) | ||
1375 | bit = ESP_CONFIG3_FAST; | ||
1376 | |||
1377 | if (scsi_period < 50) { | ||
1378 | if (esp->rev == FASHME) | ||
1379 | esp_soff &= ~esp->radelay; | ||
1380 | tp->esp_config3 |= bit; | ||
1381 | } else { | ||
1382 | tp->esp_config3 &= ~bit; | ||
1383 | } | ||
1384 | esp->prev_cfg3 = tp->esp_config3; | ||
1385 | esp_write8(esp->prev_cfg3, ESP_CFG3); | ||
1386 | } | ||
1387 | } | ||
1388 | |||
1389 | tp->esp_period = esp->prev_stp = esp_stp; | ||
1390 | tp->esp_offset = esp->prev_soff = esp_soff; | ||
1391 | |||
1392 | esp_write8(esp_soff, ESP_SOFF); | ||
1393 | esp_write8(esp_stp, ESP_STP); | ||
1394 | |||
1395 | tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO); | ||
1396 | |||
1397 | spi_display_xfer_agreement(tp->starget); | ||
1398 | } | ||
1399 | |||
1400 | static void esp_msgin_reject(struct esp *esp) | ||
1401 | { | ||
1402 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1403 | struct scsi_cmnd *cmd = ent->cmd; | ||
1404 | struct esp_target_data *tp; | ||
1405 | int tgt; | ||
1406 | |||
1407 | tgt = cmd->device->id; | ||
1408 | tp = &esp->target[tgt]; | ||
1409 | |||
1410 | if (tp->flags & ESP_TGT_NEGO_WIDE) { | ||
1411 | tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE); | ||
1412 | |||
1413 | if (!esp_need_to_nego_sync(tp)) { | ||
1414 | tp->flags &= ~ESP_TGT_CHECK_NEGO; | ||
1415 | scsi_esp_cmd(esp, ESP_CMD_RATN); | ||
1416 | } else { | ||
1417 | esp->msg_out_len = | ||
1418 | spi_populate_sync_msg(&esp->msg_out[0], | ||
1419 | tp->nego_goal_period, | ||
1420 | tp->nego_goal_offset); | ||
1421 | tp->flags |= ESP_TGT_NEGO_SYNC; | ||
1422 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1423 | } | ||
1424 | return; | ||
1425 | } | ||
1426 | |||
1427 | if (tp->flags & ESP_TGT_NEGO_SYNC) { | ||
1428 | tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO); | ||
1429 | tp->esp_period = 0; | ||
1430 | tp->esp_offset = 0; | ||
1431 | esp_setsync(esp, tp, 0, 0, 0, 0); | ||
1432 | scsi_esp_cmd(esp, ESP_CMD_RATN); | ||
1433 | return; | ||
1434 | } | ||
1435 | |||
1436 | esp->msg_out[0] = ABORT_TASK_SET; | ||
1437 | esp->msg_out_len = 1; | ||
1438 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1439 | } | ||
1440 | |||
1441 | static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp) | ||
1442 | { | ||
1443 | u8 period = esp->msg_in[3]; | ||
1444 | u8 offset = esp->msg_in[4]; | ||
1445 | u8 stp; | ||
1446 | |||
1447 | if (!(tp->flags & ESP_TGT_NEGO_SYNC)) | ||
1448 | goto do_reject; | ||
1449 | |||
1450 | if (offset > 15) | ||
1451 | goto do_reject; | ||
1452 | |||
1453 | if (offset) { | ||
1454 | int rounded_up, one_clock; | ||
1455 | |||
1456 | if (period > esp->max_period) { | ||
1457 | period = offset = 0; | ||
1458 | goto do_sdtr; | ||
1459 | } | ||
1460 | if (period < esp->min_period) | ||
1461 | goto do_reject; | ||
1462 | |||
1463 | one_clock = esp->ccycle / 1000; | ||
1464 | rounded_up = (period << 2); | ||
1465 | rounded_up = (rounded_up + one_clock - 1) / one_clock; | ||
1466 | stp = rounded_up; | ||
1467 | if (stp && esp->rev >= FAS236) { | ||
1468 | if (stp >= 50) | ||
1469 | stp--; | ||
1470 | } | ||
1471 | } else { | ||
1472 | stp = 0; | ||
1473 | } | ||
1474 | |||
1475 | esp_setsync(esp, tp, period, offset, stp, offset); | ||
1476 | return; | ||
1477 | |||
1478 | do_reject: | ||
1479 | esp->msg_out[0] = MESSAGE_REJECT; | ||
1480 | esp->msg_out_len = 1; | ||
1481 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1482 | return; | ||
1483 | |||
1484 | do_sdtr: | ||
1485 | tp->nego_goal_period = period; | ||
1486 | tp->nego_goal_offset = offset; | ||
1487 | esp->msg_out_len = | ||
1488 | spi_populate_sync_msg(&esp->msg_out[0], | ||
1489 | tp->nego_goal_period, | ||
1490 | tp->nego_goal_offset); | ||
1491 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1492 | } | ||
1493 | |||
1494 | static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp) | ||
1495 | { | ||
1496 | int size = 8 << esp->msg_in[3]; | ||
1497 | u8 cfg3; | ||
1498 | |||
1499 | if (esp->rev != FASHME) | ||
1500 | goto do_reject; | ||
1501 | |||
1502 | if (size != 8 && size != 16) | ||
1503 | goto do_reject; | ||
1504 | |||
1505 | if (!(tp->flags & ESP_TGT_NEGO_WIDE)) | ||
1506 | goto do_reject; | ||
1507 | |||
1508 | cfg3 = tp->esp_config3; | ||
1509 | if (size == 16) { | ||
1510 | tp->flags |= ESP_TGT_WIDE; | ||
1511 | cfg3 |= ESP_CONFIG3_EWIDE; | ||
1512 | } else { | ||
1513 | tp->flags &= ~ESP_TGT_WIDE; | ||
1514 | cfg3 &= ~ESP_CONFIG3_EWIDE; | ||
1515 | } | ||
1516 | tp->esp_config3 = cfg3; | ||
1517 | esp->prev_cfg3 = cfg3; | ||
1518 | esp_write8(cfg3, ESP_CFG3); | ||
1519 | |||
1520 | tp->flags &= ~ESP_TGT_NEGO_WIDE; | ||
1521 | |||
1522 | spi_period(tp->starget) = 0; | ||
1523 | spi_offset(tp->starget) = 0; | ||
1524 | if (!esp_need_to_nego_sync(tp)) { | ||
1525 | tp->flags &= ~ESP_TGT_CHECK_NEGO; | ||
1526 | scsi_esp_cmd(esp, ESP_CMD_RATN); | ||
1527 | } else { | ||
1528 | esp->msg_out_len = | ||
1529 | spi_populate_sync_msg(&esp->msg_out[0], | ||
1530 | tp->nego_goal_period, | ||
1531 | tp->nego_goal_offset); | ||
1532 | tp->flags |= ESP_TGT_NEGO_SYNC; | ||
1533 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1534 | } | ||
1535 | return; | ||
1536 | |||
1537 | do_reject: | ||
1538 | esp->msg_out[0] = MESSAGE_REJECT; | ||
1539 | esp->msg_out_len = 1; | ||
1540 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1541 | } | ||
1542 | |||
1543 | static void esp_msgin_extended(struct esp *esp) | ||
1544 | { | ||
1545 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1546 | struct scsi_cmnd *cmd = ent->cmd; | ||
1547 | struct esp_target_data *tp; | ||
1548 | int tgt = cmd->device->id; | ||
1549 | |||
1550 | tp = &esp->target[tgt]; | ||
1551 | if (esp->msg_in[2] == EXTENDED_SDTR) { | ||
1552 | esp_msgin_sdtr(esp, tp); | ||
1553 | return; | ||
1554 | } | ||
1555 | if (esp->msg_in[2] == EXTENDED_WDTR) { | ||
1556 | esp_msgin_wdtr(esp, tp); | ||
1557 | return; | ||
1558 | } | ||
1559 | |||
1560 | printk("ESP: Unexpected extended msg type %x\n", | ||
1561 | esp->msg_in[2]); | ||
1562 | |||
1563 | esp->msg_out[0] = ABORT_TASK_SET; | ||
1564 | esp->msg_out_len = 1; | ||
1565 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1566 | } | ||
1567 | |||
1568 | /* Analyze msgin bytes received from target so far. Return non-zero | ||
1569 | * if there are more bytes needed to complete the message. | ||
1570 | */ | ||
1571 | static int esp_msgin_process(struct esp *esp) | ||
1572 | { | ||
1573 | u8 msg0 = esp->msg_in[0]; | ||
1574 | int len = esp->msg_in_len; | ||
1575 | |||
1576 | if (msg0 & 0x80) { | ||
1577 | /* Identify */ | ||
1578 | printk("ESP: Unexpected msgin identify\n"); | ||
1579 | return 0; | ||
1580 | } | ||
1581 | |||
1582 | switch (msg0) { | ||
1583 | case EXTENDED_MESSAGE: | ||
1584 | if (len == 1) | ||
1585 | return 1; | ||
1586 | if (len < esp->msg_in[1] + 2) | ||
1587 | return 1; | ||
1588 | esp_msgin_extended(esp); | ||
1589 | return 0; | ||
1590 | |||
1591 | case IGNORE_WIDE_RESIDUE: { | ||
1592 | struct esp_cmd_entry *ent; | ||
1593 | struct esp_cmd_priv *spriv; | ||
1594 | if (len == 1) | ||
1595 | return 1; | ||
1596 | |||
1597 | if (esp->msg_in[1] != 1) | ||
1598 | goto do_reject; | ||
1599 | |||
1600 | ent = esp->active_cmd; | ||
1601 | spriv = ESP_CMD_PRIV(ent->cmd); | ||
1602 | |||
1603 | if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) { | ||
1604 | spriv->cur_sg--; | ||
1605 | spriv->cur_residue = 1; | ||
1606 | } else | ||
1607 | spriv->cur_residue++; | ||
1608 | spriv->tot_residue++; | ||
1609 | return 0; | ||
1610 | } | ||
1611 | case NOP: | ||
1612 | return 0; | ||
1613 | case RESTORE_POINTERS: | ||
1614 | esp_restore_pointers(esp, esp->active_cmd); | ||
1615 | return 0; | ||
1616 | case SAVE_POINTERS: | ||
1617 | esp_save_pointers(esp, esp->active_cmd); | ||
1618 | return 0; | ||
1619 | |||
1620 | case COMMAND_COMPLETE: | ||
1621 | case DISCONNECT: { | ||
1622 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1623 | |||
1624 | ent->message = msg0; | ||
1625 | esp_event(esp, ESP_EVENT_FREE_BUS); | ||
1626 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1627 | return 0; | ||
1628 | } | ||
1629 | case MESSAGE_REJECT: | ||
1630 | esp_msgin_reject(esp); | ||
1631 | return 0; | ||
1632 | |||
1633 | default: | ||
1634 | do_reject: | ||
1635 | esp->msg_out[0] = MESSAGE_REJECT; | ||
1636 | esp->msg_out_len = 1; | ||
1637 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1638 | return 0; | ||
1639 | } | ||
1640 | } | ||
1641 | |||
1642 | static int esp_process_event(struct esp *esp) | ||
1643 | { | ||
1644 | int write; | ||
1645 | |||
1646 | again: | ||
1647 | write = 0; | ||
1648 | switch (esp->event) { | ||
1649 | case ESP_EVENT_CHECK_PHASE: | ||
1650 | switch (esp->sreg & ESP_STAT_PMASK) { | ||
1651 | case ESP_DOP: | ||
1652 | esp_event(esp, ESP_EVENT_DATA_OUT); | ||
1653 | break; | ||
1654 | case ESP_DIP: | ||
1655 | esp_event(esp, ESP_EVENT_DATA_IN); | ||
1656 | break; | ||
1657 | case ESP_STATP: | ||
1658 | esp_flush_fifo(esp); | ||
1659 | scsi_esp_cmd(esp, ESP_CMD_ICCSEQ); | ||
1660 | esp_event(esp, ESP_EVENT_STATUS); | ||
1661 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1662 | return 1; | ||
1663 | |||
1664 | case ESP_MOP: | ||
1665 | esp_event(esp, ESP_EVENT_MSGOUT); | ||
1666 | break; | ||
1667 | |||
1668 | case ESP_MIP: | ||
1669 | esp_event(esp, ESP_EVENT_MSGIN); | ||
1670 | break; | ||
1671 | |||
1672 | case ESP_CMDP: | ||
1673 | esp_event(esp, ESP_EVENT_CMD_START); | ||
1674 | break; | ||
1675 | |||
1676 | default: | ||
1677 | printk("ESP: Unexpected phase, sreg=%02x\n", | ||
1678 | esp->sreg); | ||
1679 | esp_schedule_reset(esp); | ||
1680 | return 0; | ||
1681 | } | ||
1682 | goto again; | ||
1683 | break; | ||
1684 | |||
1685 | case ESP_EVENT_DATA_IN: | ||
1686 | write = 1; | ||
1687 | /* fallthru */ | ||
1688 | |||
1689 | case ESP_EVENT_DATA_OUT: { | ||
1690 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1691 | struct scsi_cmnd *cmd = ent->cmd; | ||
1692 | dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd); | ||
1693 | unsigned int dma_len = esp_cur_dma_len(ent, cmd); | ||
1694 | |||
1695 | if (esp->rev == ESP100) | ||
1696 | scsi_esp_cmd(esp, ESP_CMD_NULL); | ||
1697 | |||
1698 | if (write) | ||
1699 | ent->flags |= ESP_CMD_FLAG_WRITE; | ||
1700 | else | ||
1701 | ent->flags &= ~ESP_CMD_FLAG_WRITE; | ||
1702 | |||
1703 | dma_len = esp_dma_length_limit(esp, dma_addr, dma_len); | ||
1704 | esp->data_dma_len = dma_len; | ||
1705 | |||
1706 | if (!dma_len) { | ||
1707 | printk(KERN_ERR PFX "esp%d: DMA length is zero!\n", | ||
1708 | esp->host->unique_id); | ||
1709 | printk(KERN_ERR PFX "esp%d: cur adr[%08x] len[%08x]\n", | ||
1710 | esp->host->unique_id, | ||
1711 | esp_cur_dma_addr(ent, cmd), | ||
1712 | esp_cur_dma_len(ent, cmd)); | ||
1713 | esp_schedule_reset(esp); | ||
1714 | return 0; | ||
1715 | } | ||
1716 | |||
1717 | esp_log_datastart("ESP: start data addr[%08x] len[%u] " | ||
1718 | "write(%d)\n", | ||
1719 | dma_addr, dma_len, write); | ||
1720 | |||
1721 | esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len, | ||
1722 | write, ESP_CMD_DMA | ESP_CMD_TI); | ||
1723 | esp_event(esp, ESP_EVENT_DATA_DONE); | ||
1724 | break; | ||
1725 | } | ||
1726 | case ESP_EVENT_DATA_DONE: { | ||
1727 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1728 | struct scsi_cmnd *cmd = ent->cmd; | ||
1729 | int bytes_sent; | ||
1730 | |||
1731 | if (esp->ops->dma_error(esp)) { | ||
1732 | printk("ESP: data done, DMA error, resetting\n"); | ||
1733 | esp_schedule_reset(esp); | ||
1734 | return 0; | ||
1735 | } | ||
1736 | |||
1737 | if (ent->flags & ESP_CMD_FLAG_WRITE) { | ||
1738 | /* XXX parity errors, etc. XXX */ | ||
1739 | |||
1740 | esp->ops->dma_drain(esp); | ||
1741 | } | ||
1742 | esp->ops->dma_invalidate(esp); | ||
1743 | |||
1744 | if (esp->ireg != ESP_INTR_BSERV) { | ||
1745 | /* We should always see exactly a bus-service | ||
1746 | * interrupt at the end of a successful transfer. | ||
1747 | */ | ||
1748 | printk("ESP: data done, not BSERV, resetting\n"); | ||
1749 | esp_schedule_reset(esp); | ||
1750 | return 0; | ||
1751 | } | ||
1752 | |||
1753 | bytes_sent = esp_data_bytes_sent(esp, ent, cmd); | ||
1754 | |||
1755 | esp_log_datadone("ESP: data done flgs[%x] sent[%d]\n", | ||
1756 | ent->flags, bytes_sent); | ||
1757 | |||
1758 | if (bytes_sent < 0) { | ||
1759 | /* XXX force sync mode for this target XXX */ | ||
1760 | esp_schedule_reset(esp); | ||
1761 | return 0; | ||
1762 | } | ||
1763 | |||
1764 | esp_advance_dma(esp, ent, cmd, bytes_sent); | ||
1765 | esp_event(esp, ESP_EVENT_CHECK_PHASE); | ||
1766 | goto again; | ||
1767 | break; | ||
1768 | } | ||
1769 | |||
1770 | case ESP_EVENT_STATUS: { | ||
1771 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1772 | |||
1773 | if (esp->ireg & ESP_INTR_FDONE) { | ||
1774 | ent->status = esp_read8(ESP_FDATA); | ||
1775 | ent->message = esp_read8(ESP_FDATA); | ||
1776 | scsi_esp_cmd(esp, ESP_CMD_MOK); | ||
1777 | } else if (esp->ireg == ESP_INTR_BSERV) { | ||
1778 | ent->status = esp_read8(ESP_FDATA); | ||
1779 | ent->message = 0xff; | ||
1780 | esp_event(esp, ESP_EVENT_MSGIN); | ||
1781 | return 0; | ||
1782 | } | ||
1783 | |||
1784 | if (ent->message != COMMAND_COMPLETE) { | ||
1785 | printk("ESP: Unexpected message %x in status\n", | ||
1786 | ent->message); | ||
1787 | esp_schedule_reset(esp); | ||
1788 | return 0; | ||
1789 | } | ||
1790 | |||
1791 | esp_event(esp, ESP_EVENT_FREE_BUS); | ||
1792 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1793 | break; | ||
1794 | } | ||
1795 | case ESP_EVENT_FREE_BUS: { | ||
1796 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1797 | struct scsi_cmnd *cmd = ent->cmd; | ||
1798 | |||
1799 | if (ent->message == COMMAND_COMPLETE || | ||
1800 | ent->message == DISCONNECT) | ||
1801 | scsi_esp_cmd(esp, ESP_CMD_ESEL); | ||
1802 | |||
1803 | if (ent->message == COMMAND_COMPLETE) { | ||
1804 | esp_log_cmddone("ESP: Command done status[%x] " | ||
1805 | "message[%x]\n", | ||
1806 | ent->status, ent->message); | ||
1807 | if (ent->status == SAM_STAT_TASK_SET_FULL) | ||
1808 | esp_event_queue_full(esp, ent); | ||
1809 | |||
1810 | if (ent->status == SAM_STAT_CHECK_CONDITION && | ||
1811 | !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) { | ||
1812 | ent->flags |= ESP_CMD_FLAG_AUTOSENSE; | ||
1813 | esp_autosense(esp, ent); | ||
1814 | } else { | ||
1815 | esp_cmd_is_done(esp, ent, cmd, | ||
1816 | compose_result(ent->status, | ||
1817 | ent->message, | ||
1818 | DID_OK)); | ||
1819 | } | ||
1820 | } else if (ent->message == DISCONNECT) { | ||
1821 | esp_log_disconnect("ESP: Disconnecting tgt[%d] " | ||
1822 | "tag[%x:%x]\n", | ||
1823 | cmd->device->id, | ||
1824 | ent->tag[0], ent->tag[1]); | ||
1825 | |||
1826 | esp->active_cmd = NULL; | ||
1827 | esp_maybe_execute_command(esp); | ||
1828 | } else { | ||
1829 | printk("ESP: Unexpected message %x in freebus\n", | ||
1830 | ent->message); | ||
1831 | esp_schedule_reset(esp); | ||
1832 | return 0; | ||
1833 | } | ||
1834 | if (esp->active_cmd) | ||
1835 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1836 | break; | ||
1837 | } | ||
1838 | case ESP_EVENT_MSGOUT: { | ||
1839 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1840 | |||
1841 | if (esp_debug & ESP_DEBUG_MSGOUT) { | ||
1842 | int i; | ||
1843 | printk("ESP: Sending message [ "); | ||
1844 | for (i = 0; i < esp->msg_out_len; i++) | ||
1845 | printk("%02x ", esp->msg_out[i]); | ||
1846 | printk("]\n"); | ||
1847 | } | ||
1848 | |||
1849 | if (esp->rev == FASHME) { | ||
1850 | int i; | ||
1851 | |||
1852 | /* Always use the fifo. */ | ||
1853 | for (i = 0; i < esp->msg_out_len; i++) { | ||
1854 | esp_write8(esp->msg_out[i], ESP_FDATA); | ||
1855 | esp_write8(0, ESP_FDATA); | ||
1856 | } | ||
1857 | scsi_esp_cmd(esp, ESP_CMD_TI); | ||
1858 | } else { | ||
1859 | if (esp->msg_out_len == 1) { | ||
1860 | esp_write8(esp->msg_out[0], ESP_FDATA); | ||
1861 | scsi_esp_cmd(esp, ESP_CMD_TI); | ||
1862 | } else { | ||
1863 | /* Use DMA. */ | ||
1864 | memcpy(esp->command_block, | ||
1865 | esp->msg_out, | ||
1866 | esp->msg_out_len); | ||
1867 | |||
1868 | esp->ops->send_dma_cmd(esp, | ||
1869 | esp->command_block_dma, | ||
1870 | esp->msg_out_len, | ||
1871 | esp->msg_out_len, | ||
1872 | 0, | ||
1873 | ESP_CMD_DMA|ESP_CMD_TI); | ||
1874 | } | ||
1875 | } | ||
1876 | esp_event(esp, ESP_EVENT_MSGOUT_DONE); | ||
1877 | break; | ||
1878 | } | ||
1879 | case ESP_EVENT_MSGOUT_DONE: | ||
1880 | if (esp->rev == FASHME) { | ||
1881 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1882 | } else { | ||
1883 | if (esp->msg_out_len > 1) | ||
1884 | esp->ops->dma_invalidate(esp); | ||
1885 | } | ||
1886 | |||
1887 | if (!(esp->ireg & ESP_INTR_DC)) { | ||
1888 | if (esp->rev != FASHME) | ||
1889 | scsi_esp_cmd(esp, ESP_CMD_NULL); | ||
1890 | } | ||
1891 | esp_event(esp, ESP_EVENT_CHECK_PHASE); | ||
1892 | goto again; | ||
1893 | case ESP_EVENT_MSGIN: | ||
1894 | if (esp->ireg & ESP_INTR_BSERV) { | ||
1895 | if (esp->rev == FASHME) { | ||
1896 | if (!(esp_read8(ESP_STATUS2) & | ||
1897 | ESP_STAT2_FEMPTY)) | ||
1898 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1899 | } else { | ||
1900 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1901 | if (esp->rev == ESP100) | ||
1902 | scsi_esp_cmd(esp, ESP_CMD_NULL); | ||
1903 | } | ||
1904 | scsi_esp_cmd(esp, ESP_CMD_TI); | ||
1905 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1906 | return 1; | ||
1907 | } | ||
1908 | if (esp->ireg & ESP_INTR_FDONE) { | ||
1909 | u8 val; | ||
1910 | |||
1911 | if (esp->rev == FASHME) | ||
1912 | val = esp->fifo[0]; | ||
1913 | else | ||
1914 | val = esp_read8(ESP_FDATA); | ||
1915 | esp->msg_in[esp->msg_in_len++] = val; | ||
1916 | |||
1917 | esp_log_msgin("ESP: Got msgin byte %x\n", val); | ||
1918 | |||
1919 | if (!esp_msgin_process(esp)) | ||
1920 | esp->msg_in_len = 0; | ||
1921 | |||
1922 | if (esp->rev == FASHME) | ||
1923 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1924 | |||
1925 | scsi_esp_cmd(esp, ESP_CMD_MOK); | ||
1926 | |||
1927 | if (esp->event != ESP_EVENT_FREE_BUS) | ||
1928 | esp_event(esp, ESP_EVENT_CHECK_PHASE); | ||
1929 | } else { | ||
1930 | printk("ESP: MSGIN neither BSERV not FDON, resetting"); | ||
1931 | esp_schedule_reset(esp); | ||
1932 | return 0; | ||
1933 | } | ||
1934 | break; | ||
1935 | case ESP_EVENT_CMD_START: | ||
1936 | memcpy(esp->command_block, esp->cmd_bytes_ptr, | ||
1937 | esp->cmd_bytes_left); | ||
1938 | if (esp->rev == FASHME) | ||
1939 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1940 | esp->ops->send_dma_cmd(esp, esp->command_block_dma, | ||
1941 | esp->cmd_bytes_left, 16, 0, | ||
1942 | ESP_CMD_DMA | ESP_CMD_TI); | ||
1943 | esp_event(esp, ESP_EVENT_CMD_DONE); | ||
1944 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1945 | break; | ||
1946 | case ESP_EVENT_CMD_DONE: | ||
1947 | esp->ops->dma_invalidate(esp); | ||
1948 | if (esp->ireg & ESP_INTR_BSERV) { | ||
1949 | esp_event(esp, ESP_EVENT_CHECK_PHASE); | ||
1950 | goto again; | ||
1951 | } | ||
1952 | esp_schedule_reset(esp); | ||
1953 | return 0; | ||
1954 | break; | ||
1955 | |||
1956 | case ESP_EVENT_RESET: | ||
1957 | scsi_esp_cmd(esp, ESP_CMD_RS); | ||
1958 | break; | ||
1959 | |||
1960 | default: | ||
1961 | printk("ESP: Unexpected event %x, resetting\n", | ||
1962 | esp->event); | ||
1963 | esp_schedule_reset(esp); | ||
1964 | return 0; | ||
1965 | break; | ||
1966 | } | ||
1967 | return 1; | ||
1968 | } | ||
1969 | |||
1970 | static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent) | ||
1971 | { | ||
1972 | struct scsi_cmnd *cmd = ent->cmd; | ||
1973 | |||
1974 | esp_unmap_dma(esp, cmd); | ||
1975 | esp_free_lun_tag(ent, cmd->device->hostdata); | ||
1976 | cmd->result = DID_RESET << 16; | ||
1977 | |||
1978 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
1979 | esp->ops->unmap_single(esp, ent->sense_dma, | ||
1980 | SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); | ||
1981 | ent->sense_ptr = NULL; | ||
1982 | } | ||
1983 | |||
1984 | cmd->scsi_done(cmd); | ||
1985 | list_del(&ent->list); | ||
1986 | esp_put_ent(esp, ent); | ||
1987 | } | ||
1988 | |||
1989 | static void esp_clear_hold(struct scsi_device *dev, void *data) | ||
1990 | { | ||
1991 | struct esp_lun_data *lp = dev->hostdata; | ||
1992 | |||
1993 | BUG_ON(lp->num_tagged); | ||
1994 | lp->hold = 0; | ||
1995 | } | ||
1996 | |||
1997 | static void esp_reset_cleanup(struct esp *esp) | ||
1998 | { | ||
1999 | struct esp_cmd_entry *ent, *tmp; | ||
2000 | int i; | ||
2001 | |||
2002 | list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) { | ||
2003 | struct scsi_cmnd *cmd = ent->cmd; | ||
2004 | |||
2005 | list_del(&ent->list); | ||
2006 | cmd->result = DID_RESET << 16; | ||
2007 | cmd->scsi_done(cmd); | ||
2008 | esp_put_ent(esp, ent); | ||
2009 | } | ||
2010 | |||
2011 | list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) { | ||
2012 | if (ent == esp->active_cmd) | ||
2013 | esp->active_cmd = NULL; | ||
2014 | esp_reset_cleanup_one(esp, ent); | ||
2015 | } | ||
2016 | |||
2017 | BUG_ON(esp->active_cmd != NULL); | ||
2018 | |||
2019 | /* Force renegotiation of sync/wide transfers. */ | ||
2020 | for (i = 0; i < ESP_MAX_TARGET; i++) { | ||
2021 | struct esp_target_data *tp = &esp->target[i]; | ||
2022 | |||
2023 | tp->esp_period = 0; | ||
2024 | tp->esp_offset = 0; | ||
2025 | tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE | | ||
2026 | ESP_CONFIG3_FSCSI | | ||
2027 | ESP_CONFIG3_FAST); | ||
2028 | tp->flags &= ~ESP_TGT_WIDE; | ||
2029 | tp->flags |= ESP_TGT_CHECK_NEGO; | ||
2030 | |||
2031 | if (tp->starget) | ||
2032 | starget_for_each_device(tp->starget, NULL, | ||
2033 | esp_clear_hold); | ||
2034 | } | ||
2035 | } | ||
2036 | |||
2037 | /* Runs under host->lock */ | ||
2038 | static void __esp_interrupt(struct esp *esp) | ||
2039 | { | ||
2040 | int finish_reset, intr_done; | ||
2041 | u8 phase; | ||
2042 | |||
2043 | esp->sreg = esp_read8(ESP_STATUS); | ||
2044 | |||
2045 | if (esp->flags & ESP_FLAG_RESETTING) { | ||
2046 | finish_reset = 1; | ||
2047 | } else { | ||
2048 | if (esp_check_gross_error(esp)) | ||
2049 | return; | ||
2050 | |||
2051 | finish_reset = esp_check_spur_intr(esp); | ||
2052 | if (finish_reset < 0) | ||
2053 | return; | ||
2054 | } | ||
2055 | |||
2056 | esp->ireg = esp_read8(ESP_INTRPT); | ||
2057 | |||
2058 | if (esp->ireg & ESP_INTR_SR) | ||
2059 | finish_reset = 1; | ||
2060 | |||
2061 | if (finish_reset) { | ||
2062 | esp_reset_cleanup(esp); | ||
2063 | if (esp->eh_reset) { | ||
2064 | complete(esp->eh_reset); | ||
2065 | esp->eh_reset = NULL; | ||
2066 | } | ||
2067 | return; | ||
2068 | } | ||
2069 | |||
2070 | phase = (esp->sreg & ESP_STAT_PMASK); | ||
2071 | if (esp->rev == FASHME) { | ||
2072 | if (((phase != ESP_DIP && phase != ESP_DOP) && | ||
2073 | esp->select_state == ESP_SELECT_NONE && | ||
2074 | esp->event != ESP_EVENT_STATUS && | ||
2075 | esp->event != ESP_EVENT_DATA_DONE) || | ||
2076 | (esp->ireg & ESP_INTR_RSEL)) { | ||
2077 | esp->sreg2 = esp_read8(ESP_STATUS2); | ||
2078 | if (!(esp->sreg2 & ESP_STAT2_FEMPTY) || | ||
2079 | (esp->sreg2 & ESP_STAT2_F1BYTE)) | ||
2080 | hme_read_fifo(esp); | ||
2081 | } | ||
2082 | } | ||
2083 | |||
2084 | esp_log_intr("ESP: intr sreg[%02x] seqreg[%02x] " | ||
2085 | "sreg2[%02x] ireg[%02x]\n", | ||
2086 | esp->sreg, esp->seqreg, esp->sreg2, esp->ireg); | ||
2087 | |||
2088 | intr_done = 0; | ||
2089 | |||
2090 | if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) { | ||
2091 | printk("ESP: unexpected IREG %02x\n", esp->ireg); | ||
2092 | if (esp->ireg & ESP_INTR_IC) | ||
2093 | esp_dump_cmd_log(esp); | ||
2094 | |||
2095 | esp_schedule_reset(esp); | ||
2096 | } else { | ||
2097 | if (!(esp->ireg & ESP_INTR_RSEL)) { | ||
2098 | /* Some combination of FDONE, BSERV, DC. */ | ||
2099 | if (esp->select_state != ESP_SELECT_NONE) | ||
2100 | intr_done = esp_finish_select(esp); | ||
2101 | } else if (esp->ireg & ESP_INTR_RSEL) { | ||
2102 | if (esp->active_cmd) | ||
2103 | (void) esp_finish_select(esp); | ||
2104 | intr_done = esp_reconnect(esp); | ||
2105 | } | ||
2106 | } | ||
2107 | while (!intr_done) | ||
2108 | intr_done = esp_process_event(esp); | ||
2109 | } | ||
2110 | |||
2111 | irqreturn_t scsi_esp_intr(int irq, void *dev_id) | ||
2112 | { | ||
2113 | struct esp *esp = dev_id; | ||
2114 | unsigned long flags; | ||
2115 | irqreturn_t ret; | ||
2116 | |||
2117 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2118 | ret = IRQ_NONE; | ||
2119 | if (esp->ops->irq_pending(esp)) { | ||
2120 | ret = IRQ_HANDLED; | ||
2121 | for (;;) { | ||
2122 | int i; | ||
2123 | |||
2124 | __esp_interrupt(esp); | ||
2125 | if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK)) | ||
2126 | break; | ||
2127 | esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK; | ||
2128 | |||
2129 | for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) { | ||
2130 | if (esp->ops->irq_pending(esp)) | ||
2131 | break; | ||
2132 | } | ||
2133 | if (i == ESP_QUICKIRQ_LIMIT) | ||
2134 | break; | ||
2135 | } | ||
2136 | } | ||
2137 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2138 | |||
2139 | return ret; | ||
2140 | } | ||
2141 | EXPORT_SYMBOL(scsi_esp_intr); | ||
2142 | |||
2143 | static void __devinit esp_get_revision(struct esp *esp) | ||
2144 | { | ||
2145 | u8 val; | ||
2146 | |||
2147 | esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7)); | ||
2148 | esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY); | ||
2149 | esp_write8(esp->config2, ESP_CFG2); | ||
2150 | |||
2151 | val = esp_read8(ESP_CFG2); | ||
2152 | val &= ~ESP_CONFIG2_MAGIC; | ||
2153 | if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) { | ||
2154 | /* If what we write to cfg2 does not come back, cfg2 is not | ||
2155 | * implemented, therefore this must be a plain esp100. | ||
2156 | */ | ||
2157 | esp->rev = ESP100; | ||
2158 | } else { | ||
2159 | esp->config2 = 0; | ||
2160 | esp_set_all_config3(esp, 5); | ||
2161 | esp->prev_cfg3 = 5; | ||
2162 | esp_write8(esp->config2, ESP_CFG2); | ||
2163 | esp_write8(0, ESP_CFG3); | ||
2164 | esp_write8(esp->prev_cfg3, ESP_CFG3); | ||
2165 | |||
2166 | val = esp_read8(ESP_CFG3); | ||
2167 | if (val != 5) { | ||
2168 | /* The cfg2 register is implemented, however | ||
2169 | * cfg3 is not, must be esp100a. | ||
2170 | */ | ||
2171 | esp->rev = ESP100A; | ||
2172 | } else { | ||
2173 | esp_set_all_config3(esp, 0); | ||
2174 | esp->prev_cfg3 = 0; | ||
2175 | esp_write8(esp->prev_cfg3, ESP_CFG3); | ||
2176 | |||
2177 | /* All of cfg{1,2,3} implemented, must be one of | ||
2178 | * the fas variants, figure out which one. | ||
2179 | */ | ||
2180 | if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) { | ||
2181 | esp->rev = FAST; | ||
2182 | esp->sync_defp = SYNC_DEFP_FAST; | ||
2183 | } else { | ||
2184 | esp->rev = ESP236; | ||
2185 | } | ||
2186 | esp->config2 = 0; | ||
2187 | esp_write8(esp->config2, ESP_CFG2); | ||
2188 | } | ||
2189 | } | ||
2190 | } | ||
2191 | |||
2192 | static void __devinit esp_init_swstate(struct esp *esp) | ||
2193 | { | ||
2194 | int i; | ||
2195 | |||
2196 | INIT_LIST_HEAD(&esp->queued_cmds); | ||
2197 | INIT_LIST_HEAD(&esp->active_cmds); | ||
2198 | INIT_LIST_HEAD(&esp->esp_cmd_pool); | ||
2199 | |||
2200 | /* Start with a clear state, domain validation (via ->slave_configure, | ||
2201 | * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged | ||
2202 | * commands. | ||
2203 | */ | ||
2204 | for (i = 0 ; i < ESP_MAX_TARGET; i++) { | ||
2205 | esp->target[i].flags = 0; | ||
2206 | esp->target[i].nego_goal_period = 0; | ||
2207 | esp->target[i].nego_goal_offset = 0; | ||
2208 | esp->target[i].nego_goal_width = 0; | ||
2209 | esp->target[i].nego_goal_tags = 0; | ||
2210 | } | ||
2211 | } | ||
2212 | |||
2213 | /* This places the ESP into a known state at boot time. */ | ||
2214 | static void __devinit esp_bootup_reset(struct esp *esp) | ||
2215 | { | ||
2216 | u8 val; | ||
2217 | |||
2218 | /* Reset the DMA */ | ||
2219 | esp->ops->reset_dma(esp); | ||
2220 | |||
2221 | /* Reset the ESP */ | ||
2222 | esp_reset_esp(esp); | ||
2223 | |||
2224 | /* Reset the SCSI bus, but tell ESP not to generate an irq */ | ||
2225 | val = esp_read8(ESP_CFG1); | ||
2226 | val |= ESP_CONFIG1_SRRDISAB; | ||
2227 | esp_write8(val, ESP_CFG1); | ||
2228 | |||
2229 | scsi_esp_cmd(esp, ESP_CMD_RS); | ||
2230 | udelay(400); | ||
2231 | |||
2232 | esp_write8(esp->config1, ESP_CFG1); | ||
2233 | |||
2234 | /* Eat any bitrot in the chip and we are done... */ | ||
2235 | esp_read8(ESP_INTRPT); | ||
2236 | } | ||
2237 | |||
2238 | static void __devinit esp_set_clock_params(struct esp *esp) | ||
2239 | { | ||
2240 | int fmhz; | ||
2241 | u8 ccf; | ||
2242 | |||
2243 | /* This is getting messy but it has to be done correctly or else | ||
2244 | * you get weird behavior all over the place. We are trying to | ||
2245 | * basically figure out three pieces of information. | ||
2246 | * | ||
2247 | * a) Clock Conversion Factor | ||
2248 | * | ||
2249 | * This is a representation of the input crystal clock frequency | ||
2250 | * going into the ESP on this machine. Any operation whose timing | ||
2251 | * is longer than 400ns depends on this value being correct. For | ||
2252 | * example, you'll get blips for arbitration/selection during high | ||
2253 | * load or with multiple targets if this is not set correctly. | ||
2254 | * | ||
2255 | * b) Selection Time-Out | ||
2256 | * | ||
2257 | * The ESP isn't very bright and will arbitrate for the bus and try | ||
2258 | * to select a target forever if you let it. This value tells the | ||
2259 | * ESP when it has taken too long to negotiate and that it should | ||
2260 | * interrupt the CPU so we can see what happened. The value is | ||
2261 | * computed as follows (from NCR/Symbios chip docs). | ||
2262 | * | ||
2263 | * (Time Out Period) * (Input Clock) | ||
2264 | * STO = ---------------------------------- | ||
2265 | * (8192) * (Clock Conversion Factor) | ||
2266 | * | ||
2267 | * We use a time out period of 250ms (ESP_BUS_TIMEOUT). | ||
2268 | * | ||
2269 | * c) Imperical constants for synchronous offset and transfer period | ||
2270 | * register values | ||
2271 | * | ||
2272 | * This entails the smallest and largest sync period we could ever | ||
2273 | * handle on this ESP. | ||
2274 | */ | ||
2275 | fmhz = esp->cfreq; | ||
2276 | |||
2277 | ccf = ((fmhz / 1000000) + 4) / 5; | ||
2278 | if (ccf == 1) | ||
2279 | ccf = 2; | ||
2280 | |||
2281 | /* If we can't find anything reasonable, just assume 20MHZ. | ||
2282 | * This is the clock frequency of the older sun4c's where I've | ||
2283 | * been unable to find the clock-frequency PROM property. All | ||
2284 | * other machines provide useful values it seems. | ||
2285 | */ | ||
2286 | if (fmhz <= 5000000 || ccf < 1 || ccf > 8) { | ||
2287 | fmhz = 20000000; | ||
2288 | ccf = 4; | ||
2289 | } | ||
2290 | |||
2291 | esp->cfact = (ccf == 8 ? 0 : ccf); | ||
2292 | esp->cfreq = fmhz; | ||
2293 | esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz); | ||
2294 | esp->ctick = ESP_TICK(ccf, esp->ccycle); | ||
2295 | esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf); | ||
2296 | esp->sync_defp = SYNC_DEFP_SLOW; | ||
2297 | } | ||
2298 | |||
2299 | static const char *esp_chip_names[] = { | ||
2300 | "ESP100", | ||
2301 | "ESP100A", | ||
2302 | "ESP236", | ||
2303 | "FAS236", | ||
2304 | "FAS100A", | ||
2305 | "FAST", | ||
2306 | "FASHME", | ||
2307 | }; | ||
2308 | |||
2309 | static struct scsi_transport_template *esp_transport_template; | ||
2310 | |||
2311 | int __devinit scsi_esp_register(struct esp *esp, struct device *dev) | ||
2312 | { | ||
2313 | static int instance; | ||
2314 | int err; | ||
2315 | |||
2316 | esp->host->transportt = esp_transport_template; | ||
2317 | esp->host->max_lun = ESP_MAX_LUN; | ||
2318 | esp->host->cmd_per_lun = 2; | ||
2319 | |||
2320 | esp_set_clock_params(esp); | ||
2321 | |||
2322 | esp_get_revision(esp); | ||
2323 | |||
2324 | esp_init_swstate(esp); | ||
2325 | |||
2326 | esp_bootup_reset(esp); | ||
2327 | |||
2328 | printk(KERN_INFO PFX "esp%u, regs[%1p:%1p] irq[%u]\n", | ||
2329 | esp->host->unique_id, esp->regs, esp->dma_regs, | ||
2330 | esp->host->irq); | ||
2331 | printk(KERN_INFO PFX "esp%u is a %s, %u MHz (ccf=%u), SCSI ID %u\n", | ||
2332 | esp->host->unique_id, esp_chip_names[esp->rev], | ||
2333 | esp->cfreq / 1000000, esp->cfact, esp->scsi_id); | ||
2334 | |||
2335 | /* Let the SCSI bus reset settle. */ | ||
2336 | ssleep(esp_bus_reset_settle); | ||
2337 | |||
2338 | err = scsi_add_host(esp->host, dev); | ||
2339 | if (err) | ||
2340 | return err; | ||
2341 | |||
2342 | esp->host->unique_id = instance++; | ||
2343 | |||
2344 | scsi_scan_host(esp->host); | ||
2345 | |||
2346 | return 0; | ||
2347 | } | ||
2348 | EXPORT_SYMBOL(scsi_esp_register); | ||
2349 | |||
2350 | void __devexit scsi_esp_unregister(struct esp *esp) | ||
2351 | { | ||
2352 | scsi_remove_host(esp->host); | ||
2353 | } | ||
2354 | EXPORT_SYMBOL(scsi_esp_unregister); | ||
2355 | |||
2356 | static int esp_slave_alloc(struct scsi_device *dev) | ||
2357 | { | ||
2358 | struct esp *esp = host_to_esp(dev->host); | ||
2359 | struct esp_target_data *tp = &esp->target[dev->id]; | ||
2360 | struct esp_lun_data *lp; | ||
2361 | |||
2362 | lp = kzalloc(sizeof(*lp), GFP_KERNEL); | ||
2363 | if (!lp) | ||
2364 | return -ENOMEM; | ||
2365 | dev->hostdata = lp; | ||
2366 | |||
2367 | tp->starget = dev->sdev_target; | ||
2368 | |||
2369 | spi_min_period(tp->starget) = esp->min_period; | ||
2370 | spi_max_offset(tp->starget) = 15; | ||
2371 | |||
2372 | if (esp->flags & ESP_FLAG_WIDE_CAPABLE) | ||
2373 | spi_max_width(tp->starget) = 1; | ||
2374 | else | ||
2375 | spi_max_width(tp->starget) = 0; | ||
2376 | |||
2377 | return 0; | ||
2378 | } | ||
2379 | |||
2380 | static int esp_slave_configure(struct scsi_device *dev) | ||
2381 | { | ||
2382 | struct esp *esp = host_to_esp(dev->host); | ||
2383 | struct esp_target_data *tp = &esp->target[dev->id]; | ||
2384 | int goal_tags, queue_depth; | ||
2385 | |||
2386 | goal_tags = 0; | ||
2387 | |||
2388 | if (dev->tagged_supported) { | ||
2389 | /* XXX make this configurable somehow XXX */ | ||
2390 | goal_tags = ESP_DEFAULT_TAGS; | ||
2391 | |||
2392 | if (goal_tags > ESP_MAX_TAG) | ||
2393 | goal_tags = ESP_MAX_TAG; | ||
2394 | } | ||
2395 | |||
2396 | queue_depth = goal_tags; | ||
2397 | if (queue_depth < dev->host->cmd_per_lun) | ||
2398 | queue_depth = dev->host->cmd_per_lun; | ||
2399 | |||
2400 | if (goal_tags) { | ||
2401 | scsi_set_tag_type(dev, MSG_ORDERED_TAG); | ||
2402 | scsi_activate_tcq(dev, queue_depth); | ||
2403 | } else { | ||
2404 | scsi_deactivate_tcq(dev, queue_depth); | ||
2405 | } | ||
2406 | tp->flags |= ESP_TGT_DISCONNECT; | ||
2407 | |||
2408 | if (!spi_initial_dv(dev->sdev_target)) | ||
2409 | spi_dv_device(dev); | ||
2410 | |||
2411 | return 0; | ||
2412 | } | ||
2413 | |||
2414 | static void esp_slave_destroy(struct scsi_device *dev) | ||
2415 | { | ||
2416 | struct esp_lun_data *lp = dev->hostdata; | ||
2417 | |||
2418 | kfree(lp); | ||
2419 | dev->hostdata = NULL; | ||
2420 | } | ||
2421 | |||
2422 | static int esp_eh_abort_handler(struct scsi_cmnd *cmd) | ||
2423 | { | ||
2424 | struct esp *esp = host_to_esp(cmd->device->host); | ||
2425 | struct esp_cmd_entry *ent, *tmp; | ||
2426 | struct completion eh_done; | ||
2427 | unsigned long flags; | ||
2428 | |||
2429 | /* XXX This helps a lot with debugging but might be a bit | ||
2430 | * XXX much for the final driver. | ||
2431 | */ | ||
2432 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2433 | printk(KERN_ERR PFX "esp%d: Aborting command [%p:%02x]\n", | ||
2434 | esp->host->unique_id, cmd, cmd->cmnd[0]); | ||
2435 | ent = esp->active_cmd; | ||
2436 | if (ent) | ||
2437 | printk(KERN_ERR PFX "esp%d: Current command [%p:%02x]\n", | ||
2438 | esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]); | ||
2439 | list_for_each_entry(ent, &esp->queued_cmds, list) { | ||
2440 | printk(KERN_ERR PFX "esp%d: Queued command [%p:%02x]\n", | ||
2441 | esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]); | ||
2442 | } | ||
2443 | list_for_each_entry(ent, &esp->active_cmds, list) { | ||
2444 | printk(KERN_ERR PFX "esp%d: Active command [%p:%02x]\n", | ||
2445 | esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]); | ||
2446 | } | ||
2447 | esp_dump_cmd_log(esp); | ||
2448 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2449 | |||
2450 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2451 | |||
2452 | ent = NULL; | ||
2453 | list_for_each_entry(tmp, &esp->queued_cmds, list) { | ||
2454 | if (tmp->cmd == cmd) { | ||
2455 | ent = tmp; | ||
2456 | break; | ||
2457 | } | ||
2458 | } | ||
2459 | |||
2460 | if (ent) { | ||
2461 | /* Easiest case, we didn't even issue the command | ||
2462 | * yet so it is trivial to abort. | ||
2463 | */ | ||
2464 | list_del(&ent->list); | ||
2465 | |||
2466 | cmd->result = DID_ABORT << 16; | ||
2467 | cmd->scsi_done(cmd); | ||
2468 | |||
2469 | esp_put_ent(esp, ent); | ||
2470 | |||
2471 | goto out_success; | ||
2472 | } | ||
2473 | |||
2474 | init_completion(&eh_done); | ||
2475 | |||
2476 | ent = esp->active_cmd; | ||
2477 | if (ent && ent->cmd == cmd) { | ||
2478 | /* Command is the currently active command on | ||
2479 | * the bus. If we already have an output message | ||
2480 | * pending, no dice. | ||
2481 | */ | ||
2482 | if (esp->msg_out_len) | ||
2483 | goto out_failure; | ||
2484 | |||
2485 | /* Send out an abort, encouraging the target to | ||
2486 | * go to MSGOUT phase by asserting ATN. | ||
2487 | */ | ||
2488 | esp->msg_out[0] = ABORT_TASK_SET; | ||
2489 | esp->msg_out_len = 1; | ||
2490 | ent->eh_done = &eh_done; | ||
2491 | |||
2492 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
2493 | } else { | ||
2494 | /* The command is disconnected. This is not easy to | ||
2495 | * abort. For now we fail and let the scsi error | ||
2496 | * handling layer go try a scsi bus reset or host | ||
2497 | * reset. | ||
2498 | * | ||
2499 | * What we could do is put together a scsi command | ||
2500 | * solely for the purpose of sending an abort message | ||
2501 | * to the target. Coming up with all the code to | ||
2502 | * cook up scsi commands, special case them everywhere, | ||
2503 | * etc. is for questionable gain and it would be better | ||
2504 | * if the generic scsi error handling layer could do at | ||
2505 | * least some of that for us. | ||
2506 | * | ||
2507 | * Anyways this is an area for potential future improvement | ||
2508 | * in this driver. | ||
2509 | */ | ||
2510 | goto out_failure; | ||
2511 | } | ||
2512 | |||
2513 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2514 | |||
2515 | if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) { | ||
2516 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2517 | ent->eh_done = NULL; | ||
2518 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2519 | |||
2520 | return FAILED; | ||
2521 | } | ||
2522 | |||
2523 | return SUCCESS; | ||
2524 | |||
2525 | out_success: | ||
2526 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2527 | return SUCCESS; | ||
2528 | |||
2529 | out_failure: | ||
2530 | /* XXX This might be a good location to set ESP_TGT_BROKEN | ||
2531 | * XXX since we know which target/lun in particular is | ||
2532 | * XXX causing trouble. | ||
2533 | */ | ||
2534 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2535 | return FAILED; | ||
2536 | } | ||
2537 | |||
2538 | static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd) | ||
2539 | { | ||
2540 | struct esp *esp = host_to_esp(cmd->device->host); | ||
2541 | struct completion eh_reset; | ||
2542 | unsigned long flags; | ||
2543 | |||
2544 | init_completion(&eh_reset); | ||
2545 | |||
2546 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2547 | |||
2548 | esp->eh_reset = &eh_reset; | ||
2549 | |||
2550 | /* XXX This is too simple... We should add lots of | ||
2551 | * XXX checks here so that if we find that the chip is | ||
2552 | * XXX very wedged we return failure immediately so | ||
2553 | * XXX that we can perform a full chip reset. | ||
2554 | */ | ||
2555 | esp->flags |= ESP_FLAG_RESETTING; | ||
2556 | scsi_esp_cmd(esp, ESP_CMD_RS); | ||
2557 | |||
2558 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2559 | |||
2560 | ssleep(esp_bus_reset_settle); | ||
2561 | |||
2562 | if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) { | ||
2563 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2564 | esp->eh_reset = NULL; | ||
2565 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2566 | |||
2567 | return FAILED; | ||
2568 | } | ||
2569 | |||
2570 | return SUCCESS; | ||
2571 | } | ||
2572 | |||
2573 | /* All bets are off, reset the entire device. */ | ||
2574 | static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd) | ||
2575 | { | ||
2576 | struct esp *esp = host_to_esp(cmd->device->host); | ||
2577 | unsigned long flags; | ||
2578 | |||
2579 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2580 | esp_bootup_reset(esp); | ||
2581 | esp_reset_cleanup(esp); | ||
2582 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2583 | |||
2584 | ssleep(esp_bus_reset_settle); | ||
2585 | |||
2586 | return SUCCESS; | ||
2587 | } | ||
2588 | |||
2589 | static const char *esp_info(struct Scsi_Host *host) | ||
2590 | { | ||
2591 | return "esp"; | ||
2592 | } | ||
2593 | |||
2594 | struct scsi_host_template scsi_esp_template = { | ||
2595 | .module = THIS_MODULE, | ||
2596 | .name = "esp", | ||
2597 | .info = esp_info, | ||
2598 | .queuecommand = esp_queuecommand, | ||
2599 | .slave_alloc = esp_slave_alloc, | ||
2600 | .slave_configure = esp_slave_configure, | ||
2601 | .slave_destroy = esp_slave_destroy, | ||
2602 | .eh_abort_handler = esp_eh_abort_handler, | ||
2603 | .eh_bus_reset_handler = esp_eh_bus_reset_handler, | ||
2604 | .eh_host_reset_handler = esp_eh_host_reset_handler, | ||
2605 | .can_queue = 7, | ||
2606 | .this_id = 7, | ||
2607 | .sg_tablesize = SG_ALL, | ||
2608 | .use_clustering = ENABLE_CLUSTERING, | ||
2609 | .max_sectors = 0xffff, | ||
2610 | .skip_settle_delay = 1, | ||
2611 | }; | ||
2612 | EXPORT_SYMBOL(scsi_esp_template); | ||
2613 | |||
2614 | static void esp_get_signalling(struct Scsi_Host *host) | ||
2615 | { | ||
2616 | struct esp *esp = host_to_esp(host); | ||
2617 | enum spi_signal_type type; | ||
2618 | |||
2619 | if (esp->flags & ESP_FLAG_DIFFERENTIAL) | ||
2620 | type = SPI_SIGNAL_HVD; | ||
2621 | else | ||
2622 | type = SPI_SIGNAL_SE; | ||
2623 | |||
2624 | spi_signalling(host) = type; | ||
2625 | } | ||
2626 | |||
2627 | static void esp_set_offset(struct scsi_target *target, int offset) | ||
2628 | { | ||
2629 | struct Scsi_Host *host = dev_to_shost(target->dev.parent); | ||
2630 | struct esp *esp = host_to_esp(host); | ||
2631 | struct esp_target_data *tp = &esp->target[target->id]; | ||
2632 | |||
2633 | tp->nego_goal_offset = offset; | ||
2634 | tp->flags |= ESP_TGT_CHECK_NEGO; | ||
2635 | } | ||
2636 | |||
2637 | static void esp_set_period(struct scsi_target *target, int period) | ||
2638 | { | ||
2639 | struct Scsi_Host *host = dev_to_shost(target->dev.parent); | ||
2640 | struct esp *esp = host_to_esp(host); | ||
2641 | struct esp_target_data *tp = &esp->target[target->id]; | ||
2642 | |||
2643 | tp->nego_goal_period = period; | ||
2644 | tp->flags |= ESP_TGT_CHECK_NEGO; | ||
2645 | } | ||
2646 | |||
2647 | static void esp_set_width(struct scsi_target *target, int width) | ||
2648 | { | ||
2649 | struct Scsi_Host *host = dev_to_shost(target->dev.parent); | ||
2650 | struct esp *esp = host_to_esp(host); | ||
2651 | struct esp_target_data *tp = &esp->target[target->id]; | ||
2652 | |||
2653 | tp->nego_goal_width = (width ? 1 : 0); | ||
2654 | tp->flags |= ESP_TGT_CHECK_NEGO; | ||
2655 | } | ||
2656 | |||
2657 | static struct spi_function_template esp_transport_ops = { | ||
2658 | .set_offset = esp_set_offset, | ||
2659 | .show_offset = 1, | ||
2660 | .set_period = esp_set_period, | ||
2661 | .show_period = 1, | ||
2662 | .set_width = esp_set_width, | ||
2663 | .show_width = 1, | ||
2664 | .get_signalling = esp_get_signalling, | ||
2665 | }; | ||
2666 | |||
2667 | static int __init esp_init(void) | ||
2668 | { | ||
2669 | BUILD_BUG_ON(sizeof(struct scsi_pointer) < | ||
2670 | sizeof(struct esp_cmd_priv)); | ||
2671 | |||
2672 | esp_transport_template = spi_attach_transport(&esp_transport_ops); | ||
2673 | if (!esp_transport_template) | ||
2674 | return -ENODEV; | ||
2675 | |||
2676 | return 0; | ||
2677 | } | ||
2678 | |||
2679 | static void __exit esp_exit(void) | ||
2680 | { | ||
2681 | spi_release_transport(esp_transport_template); | ||
2682 | } | ||
2683 | |||
2684 | MODULE_DESCRIPTION("ESP SCSI driver core"); | ||
2685 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | ||
2686 | MODULE_LICENSE("GPL"); | ||
2687 | MODULE_VERSION(DRV_VERSION); | ||
2688 | |||
2689 | module_param(esp_bus_reset_settle, int, 0); | ||
2690 | MODULE_PARM_DESC(esp_bus_reset_settle, | ||
2691 | "ESP scsi bus reset delay in seconds"); | ||
2692 | |||
2693 | module_param(esp_debug, int, 0); | ||
2694 | MODULE_PARM_DESC(esp_debug, | ||
2695 | "ESP bitmapped debugging message enable value:\n" | ||
2696 | " 0x00000001 Log interrupt events\n" | ||
2697 | " 0x00000002 Log scsi commands\n" | ||
2698 | " 0x00000004 Log resets\n" | ||
2699 | " 0x00000008 Log message in events\n" | ||
2700 | " 0x00000010 Log message out events\n" | ||
2701 | " 0x00000020 Log command completion\n" | ||
2702 | " 0x00000040 Log disconnects\n" | ||
2703 | " 0x00000080 Log data start\n" | ||
2704 | " 0x00000100 Log data done\n" | ||
2705 | " 0x00000200 Log reconnects\n" | ||
2706 | " 0x00000400 Log auto-sense data\n" | ||
2707 | ); | ||
2708 | |||
2709 | module_init(esp_init); | ||
2710 | module_exit(esp_exit); | ||
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h new file mode 100644 index 000000000000..8d4a6690401f --- /dev/null +++ b/drivers/scsi/esp_scsi.h | |||
@@ -0,0 +1,560 @@ | |||
1 | /* esp_scsi.h: Defines and structures for the ESP drier. | ||
2 | * | ||
3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #ifndef _ESP_SCSI_H | ||
7 | #define _ESP_SCSI_H | ||
8 | |||
9 | /* Access Description Offset */ | ||
10 | #define ESP_TCLOW 0x00UL /* rw Low bits transfer count 0x00 */ | ||
11 | #define ESP_TCMED 0x01UL /* rw Mid bits transfer count 0x04 */ | ||
12 | #define ESP_FDATA 0x02UL /* rw FIFO data bits 0x08 */ | ||
13 | #define ESP_CMD 0x03UL /* rw SCSI command bits 0x0c */ | ||
14 | #define ESP_STATUS 0x04UL /* ro ESP status register 0x10 */ | ||
15 | #define ESP_BUSID ESP_STATUS /* wo BusID for sel/resel 0x10 */ | ||
16 | #define ESP_INTRPT 0x05UL /* ro Kind of interrupt 0x14 */ | ||
17 | #define ESP_TIMEO ESP_INTRPT /* wo Timeout for sel/resel 0x14 */ | ||
18 | #define ESP_SSTEP 0x06UL /* ro Sequence step register 0x18 */ | ||
19 | #define ESP_STP ESP_SSTEP /* wo Transfer period/sync 0x18 */ | ||
20 | #define ESP_FFLAGS 0x07UL /* ro Bits current FIFO info 0x1c */ | ||
21 | #define ESP_SOFF ESP_FFLAGS /* wo Sync offset 0x1c */ | ||
22 | #define ESP_CFG1 0x08UL /* rw First cfg register 0x20 */ | ||
23 | #define ESP_CFACT 0x09UL /* wo Clock conv factor 0x24 */ | ||
24 | #define ESP_STATUS2 ESP_CFACT /* ro HME status2 register 0x24 */ | ||
25 | #define ESP_CTEST 0x0aUL /* wo Chip test register 0x28 */ | ||
26 | #define ESP_CFG2 0x0bUL /* rw Second cfg register 0x2c */ | ||
27 | #define ESP_CFG3 0x0cUL /* rw Third cfg register 0x30 */ | ||
28 | #define ESP_TCHI 0x0eUL /* rw High bits transf count 0x38 */ | ||
29 | #define ESP_UID ESP_TCHI /* ro Unique ID code 0x38 */ | ||
30 | #define FAS_RLO ESP_TCHI /* rw HME extended counter 0x38 */ | ||
31 | #define ESP_FGRND 0x0fUL /* rw Data base for fifo 0x3c */ | ||
32 | #define FAS_RHI ESP_FGRND /* rw HME extended counter 0x3c */ | ||
33 | |||
34 | #define SBUS_ESP_REG_SIZE 0x40UL | ||
35 | |||
36 | /* Bitfield meanings for the above registers. */ | ||
37 | |||
38 | /* ESP config reg 1, read-write, found on all ESP chips */ | ||
39 | #define ESP_CONFIG1_ID 0x07 /* My BUS ID bits */ | ||
40 | #define ESP_CONFIG1_CHTEST 0x08 /* Enable ESP chip tests */ | ||
41 | #define ESP_CONFIG1_PENABLE 0x10 /* Enable parity checks */ | ||
42 | #define ESP_CONFIG1_PARTEST 0x20 /* Parity test mode enabled? */ | ||
43 | #define ESP_CONFIG1_SRRDISAB 0x40 /* Disable SCSI reset reports */ | ||
44 | #define ESP_CONFIG1_SLCABLE 0x80 /* Enable slow cable mode */ | ||
45 | |||
46 | /* ESP config reg 2, read-write, found only on esp100a+esp200+esp236 chips */ | ||
47 | #define ESP_CONFIG2_DMAPARITY 0x01 /* enable DMA Parity (200,236) */ | ||
48 | #define ESP_CONFIG2_REGPARITY 0x02 /* enable reg Parity (200,236) */ | ||
49 | #define ESP_CONFIG2_BADPARITY 0x04 /* Bad parity target abort */ | ||
50 | #define ESP_CONFIG2_SCSI2ENAB 0x08 /* Enable SCSI-2 features (tgtmode) */ | ||
51 | #define ESP_CONFIG2_HI 0x10 /* High Impedance DREQ ??? */ | ||
52 | #define ESP_CONFIG2_HMEFENAB 0x10 /* HME features enable */ | ||
53 | #define ESP_CONFIG2_BCM 0x20 /* Enable byte-ctrl (236) */ | ||
54 | #define ESP_CONFIG2_DISPINT 0x20 /* Disable pause irq (hme) */ | ||
55 | #define ESP_CONFIG2_FENAB 0x40 /* Enable features (fas100,216) */ | ||
56 | #define ESP_CONFIG2_SPL 0x40 /* Enable status-phase latch (236) */ | ||
57 | #define ESP_CONFIG2_MKDONE 0x40 /* HME magic feature */ | ||
58 | #define ESP_CONFIG2_HME32 0x80 /* HME 32 extended */ | ||
59 | #define ESP_CONFIG2_MAGIC 0xe0 /* Invalid bits... */ | ||
60 | |||
61 | /* ESP config register 3 read-write, found only esp236+fas236+fas100a+hme chips */ | ||
62 | #define ESP_CONFIG3_FCLOCK 0x01 /* FAST SCSI clock rate (esp100a/hme) */ | ||
63 | #define ESP_CONFIG3_TEM 0x01 /* Enable thresh-8 mode (esp/fas236) */ | ||
64 | #define ESP_CONFIG3_FAST 0x02 /* Enable FAST SCSI (esp100a/hme) */ | ||
65 | #define ESP_CONFIG3_ADMA 0x02 /* Enable alternate-dma (esp/fas236) */ | ||
66 | #define ESP_CONFIG3_TENB 0x04 /* group2 SCSI2 support (esp100a/hme) */ | ||
67 | #define ESP_CONFIG3_SRB 0x04 /* Save residual byte (esp/fas236) */ | ||
68 | #define ESP_CONFIG3_TMS 0x08 /* Three-byte msg's ok (esp100a/hme) */ | ||
69 | #define ESP_CONFIG3_FCLK 0x08 /* Fast SCSI clock rate (esp/fas236) */ | ||
70 | #define ESP_CONFIG3_IDMSG 0x10 /* ID message checking (esp100a/hme) */ | ||
71 | #define ESP_CONFIG3_FSCSI 0x10 /* Enable FAST SCSI (esp/fas236) */ | ||
72 | #define ESP_CONFIG3_GTM 0x20 /* group2 SCSI2 support (esp/fas236) */ | ||
73 | #define ESP_CONFIG3_IDBIT3 0x20 /* Bit 3 of HME SCSI-ID (hme) */ | ||
74 | #define ESP_CONFIG3_TBMS 0x40 /* Three-byte msg's ok (esp/fas236) */ | ||
75 | #define ESP_CONFIG3_EWIDE 0x40 /* Enable Wide-SCSI (hme) */ | ||
76 | #define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236) */ | ||
77 | #define ESP_CONFIG3_OBPUSH 0x80 /* Push odd-byte to dma (hme) */ | ||
78 | |||
79 | /* ESP command register read-write */ | ||
80 | /* Group 1 commands: These may be sent at any point in time to the ESP | ||
81 | * chip. None of them can generate interrupts 'cept | ||
82 | * the "SCSI bus reset" command if you have not disabled | ||
83 | * SCSI reset interrupts in the config1 ESP register. | ||
84 | */ | ||
85 | #define ESP_CMD_NULL 0x00 /* Null command, ie. a nop */ | ||
86 | #define ESP_CMD_FLUSH 0x01 /* FIFO Flush */ | ||
87 | #define ESP_CMD_RC 0x02 /* Chip reset */ | ||
88 | #define ESP_CMD_RS 0x03 /* SCSI bus reset */ | ||
89 | |||
90 | /* Group 2 commands: ESP must be an initiator and connected to a target | ||
91 | * for these commands to work. | ||
92 | */ | ||
93 | #define ESP_CMD_TI 0x10 /* Transfer Information */ | ||
94 | #define ESP_CMD_ICCSEQ 0x11 /* Initiator cmd complete sequence */ | ||
95 | #define ESP_CMD_MOK 0x12 /* Message okie-dokie */ | ||
96 | #define ESP_CMD_TPAD 0x18 /* Transfer Pad */ | ||
97 | #define ESP_CMD_SATN 0x1a /* Set ATN */ | ||
98 | #define ESP_CMD_RATN 0x1b /* De-assert ATN */ | ||
99 | |||
100 | /* Group 3 commands: ESP must be in the MSGOUT or MSGIN state and be connected | ||
101 | * to a target as the initiator for these commands to work. | ||
102 | */ | ||
103 | #define ESP_CMD_SMSG 0x20 /* Send message */ | ||
104 | #define ESP_CMD_SSTAT 0x21 /* Send status */ | ||
105 | #define ESP_CMD_SDATA 0x22 /* Send data */ | ||
106 | #define ESP_CMD_DSEQ 0x23 /* Discontinue Sequence */ | ||
107 | #define ESP_CMD_TSEQ 0x24 /* Terminate Sequence */ | ||
108 | #define ESP_CMD_TCCSEQ 0x25 /* Target cmd cmplt sequence */ | ||
109 | #define ESP_CMD_DCNCT 0x27 /* Disconnect */ | ||
110 | #define ESP_CMD_RMSG 0x28 /* Receive Message */ | ||
111 | #define ESP_CMD_RCMD 0x29 /* Receive Command */ | ||
112 | #define ESP_CMD_RDATA 0x2a /* Receive Data */ | ||
113 | #define ESP_CMD_RCSEQ 0x2b /* Receive cmd sequence */ | ||
114 | |||
115 | /* Group 4 commands: The ESP must be in the disconnected state and must | ||
116 | * not be connected to any targets as initiator for | ||
117 | * these commands to work. | ||
118 | */ | ||
119 | #define ESP_CMD_RSEL 0x40 /* Reselect */ | ||
120 | #define ESP_CMD_SEL 0x41 /* Select w/o ATN */ | ||
121 | #define ESP_CMD_SELA 0x42 /* Select w/ATN */ | ||
122 | #define ESP_CMD_SELAS 0x43 /* Select w/ATN & STOP */ | ||
123 | #define ESP_CMD_ESEL 0x44 /* Enable selection */ | ||
124 | #define ESP_CMD_DSEL 0x45 /* Disable selections */ | ||
125 | #define ESP_CMD_SA3 0x46 /* Select w/ATN3 */ | ||
126 | #define ESP_CMD_RSEL3 0x47 /* Reselect3 */ | ||
127 | |||
128 | /* This bit enables the ESP's DMA on the SBus */ | ||
129 | #define ESP_CMD_DMA 0x80 /* Do DMA? */ | ||
130 | |||
131 | /* ESP status register read-only */ | ||
132 | #define ESP_STAT_PIO 0x01 /* IO phase bit */ | ||
133 | #define ESP_STAT_PCD 0x02 /* CD phase bit */ | ||
134 | #define ESP_STAT_PMSG 0x04 /* MSG phase bit */ | ||
135 | #define ESP_STAT_PMASK 0x07 /* Mask of phase bits */ | ||
136 | #define ESP_STAT_TDONE 0x08 /* Transfer Completed */ | ||
137 | #define ESP_STAT_TCNT 0x10 /* Transfer Counter Is Zero */ | ||
138 | #define ESP_STAT_PERR 0x20 /* Parity error */ | ||
139 | #define ESP_STAT_SPAM 0x40 /* Real bad error */ | ||
140 | /* This indicates the 'interrupt pending' condition on esp236, it is a reserved | ||
141 | * bit on other revs of the ESP. | ||
142 | */ | ||
143 | #define ESP_STAT_INTR 0x80 /* Interrupt */ | ||
144 | |||
145 | /* The status register can be masked with ESP_STAT_PMASK and compared | ||
146 | * with the following values to determine the current phase the ESP | ||
147 | * (at least thinks it) is in. For our purposes we also add our own | ||
148 | * software 'done' bit for our phase management engine. | ||
149 | */ | ||
150 | #define ESP_DOP (0) /* Data Out */ | ||
151 | #define ESP_DIP (ESP_STAT_PIO) /* Data In */ | ||
152 | #define ESP_CMDP (ESP_STAT_PCD) /* Command */ | ||
153 | #define ESP_STATP (ESP_STAT_PCD|ESP_STAT_PIO) /* Status */ | ||
154 | #define ESP_MOP (ESP_STAT_PMSG|ESP_STAT_PCD) /* Message Out */ | ||
155 | #define ESP_MIP (ESP_STAT_PMSG|ESP_STAT_PCD|ESP_STAT_PIO) /* Message In */ | ||
156 | |||
157 | /* HME only: status 2 register */ | ||
158 | #define ESP_STAT2_SCHBIT 0x01 /* Upper bits 3-7 of sstep enabled */ | ||
159 | #define ESP_STAT2_FFLAGS 0x02 /* The fifo flags are now latched */ | ||
160 | #define ESP_STAT2_XCNT 0x04 /* The transfer counter is latched */ | ||
161 | #define ESP_STAT2_CREGA 0x08 /* The command reg is active now */ | ||
162 | #define ESP_STAT2_WIDE 0x10 /* Interface on this adapter is wide */ | ||
163 | #define ESP_STAT2_F1BYTE 0x20 /* There is one byte at top of fifo */ | ||
164 | #define ESP_STAT2_FMSB 0x40 /* Next byte in fifo is most significant */ | ||
165 | #define ESP_STAT2_FEMPTY 0x80 /* FIFO is empty */ | ||
166 | |||
167 | /* ESP interrupt register read-only */ | ||
168 | #define ESP_INTR_S 0x01 /* Select w/o ATN */ | ||
169 | #define ESP_INTR_SATN 0x02 /* Select w/ATN */ | ||
170 | #define ESP_INTR_RSEL 0x04 /* Reselected */ | ||
171 | #define ESP_INTR_FDONE 0x08 /* Function done */ | ||
172 | #define ESP_INTR_BSERV 0x10 /* Bus service */ | ||
173 | #define ESP_INTR_DC 0x20 /* Disconnect */ | ||
174 | #define ESP_INTR_IC 0x40 /* Illegal command given */ | ||
175 | #define ESP_INTR_SR 0x80 /* SCSI bus reset detected */ | ||
176 | |||
177 | /* ESP sequence step register read-only */ | ||
178 | #define ESP_STEP_VBITS 0x07 /* Valid bits */ | ||
179 | #define ESP_STEP_ASEL 0x00 /* Selection&Arbitrate cmplt */ | ||
180 | #define ESP_STEP_SID 0x01 /* One msg byte sent */ | ||
181 | #define ESP_STEP_NCMD 0x02 /* Was not in command phase */ | ||
182 | #define ESP_STEP_PPC 0x03 /* Early phase chg caused cmnd | ||
183 | * bytes to be lost | ||
184 | */ | ||
185 | #define ESP_STEP_FINI4 0x04 /* Command was sent ok */ | ||
186 | |||
187 | /* Ho hum, some ESP's set the step register to this as well... */ | ||
188 | #define ESP_STEP_FINI5 0x05 | ||
189 | #define ESP_STEP_FINI6 0x06 | ||
190 | #define ESP_STEP_FINI7 0x07 | ||
191 | |||
192 | /* ESP chip-test register read-write */ | ||
193 | #define ESP_TEST_TARG 0x01 /* Target test mode */ | ||
194 | #define ESP_TEST_INI 0x02 /* Initiator test mode */ | ||
195 | #define ESP_TEST_TS 0x04 /* Tristate test mode */ | ||
196 | |||
197 | /* ESP unique ID register read-only, found on fas236+fas100a only */ | ||
198 | #define ESP_UID_F100A 0x00 /* ESP FAS100A */ | ||
199 | #define ESP_UID_F236 0x02 /* ESP FAS236 */ | ||
200 | #define ESP_UID_REV 0x07 /* ESP revision */ | ||
201 | #define ESP_UID_FAM 0xf8 /* ESP family */ | ||
202 | |||
203 | /* ESP fifo flags register read-only */ | ||
204 | /* Note that the following implies a 16 byte FIFO on the ESP. */ | ||
205 | #define ESP_FF_FBYTES 0x1f /* Num bytes in FIFO */ | ||
206 | #define ESP_FF_ONOTZERO 0x20 /* offset ctr not zero (esp100) */ | ||
207 | #define ESP_FF_SSTEP 0xe0 /* Sequence step */ | ||
208 | |||
209 | /* ESP clock conversion factor register write-only */ | ||
210 | #define ESP_CCF_F0 0x00 /* 35.01MHz - 40MHz */ | ||
211 | #define ESP_CCF_NEVER 0x01 /* Set it to this and die */ | ||
212 | #define ESP_CCF_F2 0x02 /* 10MHz */ | ||
213 | #define ESP_CCF_F3 0x03 /* 10.01MHz - 15MHz */ | ||
214 | #define ESP_CCF_F4 0x04 /* 15.01MHz - 20MHz */ | ||
215 | #define ESP_CCF_F5 0x05 /* 20.01MHz - 25MHz */ | ||
216 | #define ESP_CCF_F6 0x06 /* 25.01MHz - 30MHz */ | ||
217 | #define ESP_CCF_F7 0x07 /* 30.01MHz - 35MHz */ | ||
218 | |||
219 | /* HME only... */ | ||
220 | #define ESP_BUSID_RESELID 0x10 | ||
221 | #define ESP_BUSID_CTR32BIT 0x40 | ||
222 | |||
223 | #define ESP_BUS_TIMEOUT 250 /* In milli-seconds */ | ||
224 | #define ESP_TIMEO_CONST 8192 | ||
225 | #define ESP_NEG_DEFP(mhz, cfact) \ | ||
226 | ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact))) | ||
227 | #define ESP_MHZ_TO_CYCLE(mhertz) ((1000000000) / ((mhertz) / 1000)) | ||
228 | #define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000)) | ||
229 | |||
230 | /* For slow to medium speed input clock rates we shoot for 5mb/s, but for high | ||
231 | * input clock rates we try to do 10mb/s although I don't think a transfer can | ||
232 | * even run that fast with an ESP even with DMA2 scatter gather pipelining. | ||
233 | */ | ||
234 | #define SYNC_DEFP_SLOW 0x32 /* 5mb/s */ | ||
235 | #define SYNC_DEFP_FAST 0x19 /* 10mb/s */ | ||
236 | |||
237 | struct esp_cmd_priv { | ||
238 | union { | ||
239 | dma_addr_t dma_addr; | ||
240 | int num_sg; | ||
241 | } u; | ||
242 | |||
243 | unsigned int cur_residue; | ||
244 | struct scatterlist *cur_sg; | ||
245 | unsigned int tot_residue; | ||
246 | }; | ||
247 | #define ESP_CMD_PRIV(CMD) ((struct esp_cmd_priv *)(&(CMD)->SCp)) | ||
248 | |||
249 | enum esp_rev { | ||
250 | ESP100 = 0x00, /* NCR53C90 - very broken */ | ||
251 | ESP100A = 0x01, /* NCR53C90A */ | ||
252 | ESP236 = 0x02, | ||
253 | FAS236 = 0x03, | ||
254 | FAS100A = 0x04, | ||
255 | FAST = 0x05, | ||
256 | FASHME = 0x06, | ||
257 | }; | ||
258 | |||
259 | struct esp_cmd_entry { | ||
260 | struct list_head list; | ||
261 | |||
262 | struct scsi_cmnd *cmd; | ||
263 | |||
264 | unsigned int saved_cur_residue; | ||
265 | struct scatterlist *saved_cur_sg; | ||
266 | unsigned int saved_tot_residue; | ||
267 | |||
268 | u8 flags; | ||
269 | #define ESP_CMD_FLAG_WRITE 0x01 /* DMA is a write */ | ||
270 | #define ESP_CMD_FLAG_ABORT 0x02 /* being aborted */ | ||
271 | #define ESP_CMD_FLAG_AUTOSENSE 0x04 /* Doing automatic REQUEST_SENSE */ | ||
272 | |||
273 | u8 tag[2]; | ||
274 | |||
275 | u8 status; | ||
276 | u8 message; | ||
277 | |||
278 | unsigned char *sense_ptr; | ||
279 | unsigned char *saved_sense_ptr; | ||
280 | dma_addr_t sense_dma; | ||
281 | |||
282 | struct completion *eh_done; | ||
283 | }; | ||
284 | |||
285 | /* XXX make this configurable somehow XXX */ | ||
286 | #define ESP_DEFAULT_TAGS 16 | ||
287 | |||
288 | #define ESP_MAX_TARGET 16 | ||
289 | #define ESP_MAX_LUN 8 | ||
290 | #define ESP_MAX_TAG 256 | ||
291 | |||
292 | struct esp_lun_data { | ||
293 | struct esp_cmd_entry *non_tagged_cmd; | ||
294 | int num_tagged; | ||
295 | int hold; | ||
296 | struct esp_cmd_entry *tagged_cmds[ESP_MAX_TAG]; | ||
297 | }; | ||
298 | |||
299 | struct esp_target_data { | ||
300 | /* These are the ESP_STP, ESP_SOFF, and ESP_CFG3 register values which | ||
301 | * match the currently negotiated settings for this target. The SCSI | ||
302 | * protocol values are maintained in spi_{offset,period,wide}(starget). | ||
303 | */ | ||
304 | u8 esp_period; | ||
305 | u8 esp_offset; | ||
306 | u8 esp_config3; | ||
307 | |||
308 | u8 flags; | ||
309 | #define ESP_TGT_WIDE 0x01 | ||
310 | #define ESP_TGT_DISCONNECT 0x02 | ||
311 | #define ESP_TGT_NEGO_WIDE 0x04 | ||
312 | #define ESP_TGT_NEGO_SYNC 0x08 | ||
313 | #define ESP_TGT_CHECK_NEGO 0x40 | ||
314 | #define ESP_TGT_BROKEN 0x80 | ||
315 | |||
316 | /* When ESP_TGT_CHECK_NEGO is set, on the next scsi command to this | ||
317 | * device we will try to negotiate the following parameters. | ||
318 | */ | ||
319 | u8 nego_goal_period; | ||
320 | u8 nego_goal_offset; | ||
321 | u8 nego_goal_width; | ||
322 | u8 nego_goal_tags; | ||
323 | |||
324 | struct scsi_target *starget; | ||
325 | }; | ||
326 | |||
327 | struct esp_event_ent { | ||
328 | u8 type; | ||
329 | #define ESP_EVENT_TYPE_EVENT 0x01 | ||
330 | #define ESP_EVENT_TYPE_CMD 0x02 | ||
331 | u8 val; | ||
332 | |||
333 | u8 sreg; | ||
334 | u8 seqreg; | ||
335 | u8 sreg2; | ||
336 | u8 ireg; | ||
337 | u8 select_state; | ||
338 | u8 event; | ||
339 | u8 __pad; | ||
340 | }; | ||
341 | |||
342 | struct esp; | ||
343 | struct esp_driver_ops { | ||
344 | /* Read and write the ESP 8-bit registers. On some | ||
345 | * applications of the ESP chip the registers are at 4-byte | ||
346 | * instead of 1-byte intervals. | ||
347 | */ | ||
348 | void (*esp_write8)(struct esp *esp, u8 val, unsigned long reg); | ||
349 | u8 (*esp_read8)(struct esp *esp, unsigned long reg); | ||
350 | |||
351 | /* Map and unmap DMA memory. Eventually the driver will be | ||
352 | * converted to the generic DMA API as soon as SBUS is able to | ||
353 | * cope with that. At such time we can remove this. | ||
354 | */ | ||
355 | dma_addr_t (*map_single)(struct esp *esp, void *buf, | ||
356 | size_t sz, int dir); | ||
357 | int (*map_sg)(struct esp *esp, struct scatterlist *sg, | ||
358 | int num_sg, int dir); | ||
359 | void (*unmap_single)(struct esp *esp, dma_addr_t addr, | ||
360 | size_t sz, int dir); | ||
361 | void (*unmap_sg)(struct esp *esp, struct scatterlist *sg, | ||
362 | int num_sg, int dir); | ||
363 | |||
364 | /* Return non-zero if there is an IRQ pending. Usually this | ||
365 | * status bit lives in the DMA controller sitting in front of | ||
366 | * the ESP. This has to be accurate or else the ESP interrupt | ||
367 | * handler will not run. | ||
368 | */ | ||
369 | int (*irq_pending)(struct esp *esp); | ||
370 | |||
371 | /* Reset the DMA engine entirely. On return, ESP interrupts | ||
372 | * should be enabled. Often the interrupt enabling is | ||
373 | * controlled in the DMA engine. | ||
374 | */ | ||
375 | void (*reset_dma)(struct esp *esp); | ||
376 | |||
377 | /* Drain any pending DMA in the DMA engine after a transfer. | ||
378 | * This is for writes to memory. | ||
379 | */ | ||
380 | void (*dma_drain)(struct esp *esp); | ||
381 | |||
382 | /* Invalidate the DMA engine after a DMA transfer. */ | ||
383 | void (*dma_invalidate)(struct esp *esp); | ||
384 | |||
385 | /* Setup an ESP command that will use a DMA transfer. | ||
386 | * The 'esp_count' specifies what transfer length should be | ||
387 | * programmed into the ESP transfer counter registers, whereas | ||
388 | * the 'dma_count' is the length that should be programmed into | ||
389 | * the DMA controller. Usually they are the same. If 'write' | ||
390 | * is non-zero, this transfer is a write into memory. 'cmd' | ||
391 | * holds the ESP command that should be issued by calling | ||
392 | * scsi_esp_cmd() at the appropriate time while programming | ||
393 | * the DMA hardware. | ||
394 | */ | ||
395 | void (*send_dma_cmd)(struct esp *esp, u32 dma_addr, u32 esp_count, | ||
396 | u32 dma_count, int write, u8 cmd); | ||
397 | |||
398 | /* Return non-zero if the DMA engine is reporting an error | ||
399 | * currently. | ||
400 | */ | ||
401 | int (*dma_error)(struct esp *esp); | ||
402 | }; | ||
403 | |||
404 | #define ESP_MAX_MSG_SZ 8 | ||
405 | #define ESP_EVENT_LOG_SZ 32 | ||
406 | |||
407 | #define ESP_QUICKIRQ_LIMIT 100 | ||
408 | #define ESP_RESELECT_TAG_LIMIT 2500 | ||
409 | |||
410 | struct esp { | ||
411 | void __iomem *regs; | ||
412 | void __iomem *dma_regs; | ||
413 | |||
414 | const struct esp_driver_ops *ops; | ||
415 | |||
416 | struct Scsi_Host *host; | ||
417 | void *dev; | ||
418 | |||
419 | struct esp_cmd_entry *active_cmd; | ||
420 | |||
421 | struct list_head queued_cmds; | ||
422 | struct list_head active_cmds; | ||
423 | |||
424 | u8 *command_block; | ||
425 | dma_addr_t command_block_dma; | ||
426 | |||
427 | unsigned int data_dma_len; | ||
428 | |||
429 | /* The following are used to determine the cause of an IRQ. Upon every | ||
430 | * IRQ entry we synchronize these with the hardware registers. | ||
431 | */ | ||
432 | u8 sreg; | ||
433 | u8 seqreg; | ||
434 | u8 sreg2; | ||
435 | u8 ireg; | ||
436 | |||
437 | u32 prev_hme_dmacsr; | ||
438 | u8 prev_soff; | ||
439 | u8 prev_stp; | ||
440 | u8 prev_cfg3; | ||
441 | u8 __pad; | ||
442 | |||
443 | struct list_head esp_cmd_pool; | ||
444 | |||
445 | struct esp_target_data target[ESP_MAX_TARGET]; | ||
446 | |||
447 | int fifo_cnt; | ||
448 | u8 fifo[16]; | ||
449 | |||
450 | struct esp_event_ent esp_event_log[ESP_EVENT_LOG_SZ]; | ||
451 | int esp_event_cur; | ||
452 | |||
453 | u8 msg_out[ESP_MAX_MSG_SZ]; | ||
454 | int msg_out_len; | ||
455 | |||
456 | u8 msg_in[ESP_MAX_MSG_SZ]; | ||
457 | int msg_in_len; | ||
458 | |||
459 | u8 bursts; | ||
460 | u8 config1; | ||
461 | u8 config2; | ||
462 | |||
463 | u8 scsi_id; | ||
464 | u32 scsi_id_mask; | ||
465 | |||
466 | enum esp_rev rev; | ||
467 | |||
468 | u32 flags; | ||
469 | #define ESP_FLAG_DIFFERENTIAL 0x00000001 | ||
470 | #define ESP_FLAG_RESETTING 0x00000002 | ||
471 | #define ESP_FLAG_DOING_SLOWCMD 0x00000004 | ||
472 | #define ESP_FLAG_WIDE_CAPABLE 0x00000008 | ||
473 | #define ESP_FLAG_QUICKIRQ_CHECK 0x00000010 | ||
474 | |||
475 | u8 select_state; | ||
476 | #define ESP_SELECT_NONE 0x00 /* Not selecting */ | ||
477 | #define ESP_SELECT_BASIC 0x01 /* Select w/o MSGOUT phase */ | ||
478 | #define ESP_SELECT_MSGOUT 0x02 /* Select with MSGOUT */ | ||
479 | |||
480 | /* When we are not selecting, we are expecting an event. */ | ||
481 | u8 event; | ||
482 | #define ESP_EVENT_NONE 0x00 | ||
483 | #define ESP_EVENT_CMD_START 0x01 | ||
484 | #define ESP_EVENT_CMD_DONE 0x02 | ||
485 | #define ESP_EVENT_DATA_IN 0x03 | ||
486 | #define ESP_EVENT_DATA_OUT 0x04 | ||
487 | #define ESP_EVENT_DATA_DONE 0x05 | ||
488 | #define ESP_EVENT_MSGIN 0x06 | ||
489 | #define ESP_EVENT_MSGIN_MORE 0x07 | ||
490 | #define ESP_EVENT_MSGIN_DONE 0x08 | ||
491 | #define ESP_EVENT_MSGOUT 0x09 | ||
492 | #define ESP_EVENT_MSGOUT_DONE 0x0a | ||
493 | #define ESP_EVENT_STATUS 0x0b | ||
494 | #define ESP_EVENT_FREE_BUS 0x0c | ||
495 | #define ESP_EVENT_CHECK_PHASE 0x0d | ||
496 | #define ESP_EVENT_RESET 0x10 | ||
497 | |||
498 | /* Probed in esp_get_clock_params() */ | ||
499 | u32 cfact; | ||
500 | u32 cfreq; | ||
501 | u32 ccycle; | ||
502 | u32 ctick; | ||
503 | u32 neg_defp; | ||
504 | u32 sync_defp; | ||
505 | |||
506 | /* Computed in esp_reset_esp() */ | ||
507 | u32 max_period; | ||
508 | u32 min_period; | ||
509 | u32 radelay; | ||
510 | |||
511 | /* Slow command state. */ | ||
512 | u8 *cmd_bytes_ptr; | ||
513 | int cmd_bytes_left; | ||
514 | |||
515 | struct completion *eh_reset; | ||
516 | |||
517 | struct sbus_dma *dma; | ||
518 | }; | ||
519 | |||
520 | #define host_to_esp(host) ((struct esp *)(host)->hostdata) | ||
521 | |||
522 | /* A front-end driver for the ESP chip should do the following in | ||
523 | * it's device probe routine: | ||
524 | * 1) Allocate the host and private area using scsi_host_alloc() | ||
525 | * with size 'sizeof(struct esp)'. The first argument to | ||
526 | * scsi_host_alloc() should be &scsi_esp_template. | ||
527 | * 2) Set host->max_id as appropriate. | ||
528 | * 3) Set esp->host to the scsi_host itself, and esp->dev | ||
529 | * to the device object pointer. | ||
530 | * 4) Hook up esp->ops to the front-end implementation. | ||
531 | * 5) If the ESP chip supports wide transfers, set ESP_FLAG_WIDE_CAPABLE | ||
532 | * in esp->flags. | ||
533 | * 6) Map the DMA and ESP chip registers. | ||
534 | * 7) DMA map the ESP command block, store the DMA address | ||
535 | * in esp->command_block_dma. | ||
536 | * 8) Register the scsi_esp_intr() interrupt handler. | ||
537 | * 9) Probe for and provide the following chip properties: | ||
538 | * esp->scsi_id (assign to esp->host->this_id too) | ||
539 | * esp->scsi_id_mask | ||
540 | * If ESP bus is differential, set ESP_FLAG_DIFFERENTIAL | ||
541 | * esp->cfreq | ||
542 | * DMA burst bit mask in esp->bursts, if necessary | ||
543 | * 10) Perform any actions necessary before the ESP device can | ||
544 | * be programmed for the first time. On some configs, for | ||
545 | * example, the DMA engine has to be reset before ESP can | ||
546 | * be programmed. | ||
547 | * 11) If necessary, call dev_set_drvdata() as needed. | ||
548 | * 12) Call scsi_esp_register() with prepared 'esp' structure | ||
549 | * and a device pointer if possible. | ||
550 | * 13) Check scsi_esp_register() return value, release all resources | ||
551 | * if an error was returned. | ||
552 | */ | ||
553 | extern struct scsi_host_template scsi_esp_template; | ||
554 | extern int scsi_esp_register(struct esp *, struct device *); | ||
555 | |||
556 | extern void scsi_esp_unregister(struct esp *); | ||
557 | extern irqreturn_t scsi_esp_intr(int, void *); | ||
558 | extern void scsi_esp_cmd(struct esp *, u8); | ||
559 | |||
560 | #endif /* !(_ESP_SCSI_H) */ | ||
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index 9f10689905a8..c4195ea869e9 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c | |||
@@ -1403,7 +1403,7 @@ static int __devinit qpti_sbus_probe(struct of_device *dev, const struct of_devi | |||
1403 | struct scsi_host_template *tpnt = match->data; | 1403 | struct scsi_host_template *tpnt = match->data; |
1404 | struct Scsi_Host *host; | 1404 | struct Scsi_Host *host; |
1405 | struct qlogicpti *qpti; | 1405 | struct qlogicpti *qpti; |
1406 | char *fcode; | 1406 | const char *fcode; |
1407 | 1407 | ||
1408 | /* Sometimes Antares cards come up not completely | 1408 | /* Sometimes Antares cards come up not completely |
1409 | * setup, and we get a report of a zero IRQ. | 1409 | * setup, and we get a report of a zero IRQ. |
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c new file mode 100644 index 000000000000..8c766bcd1095 --- /dev/null +++ b/drivers/scsi/sun_esp.c | |||
@@ -0,0 +1,634 @@ | |||
1 | /* sun_esp.c: ESP front-end for Sparc SBUS systems. | ||
2 | * | ||
3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/init.h> | ||
10 | |||
11 | #include <asm/irq.h> | ||
12 | #include <asm/io.h> | ||
13 | #include <asm/dma.h> | ||
14 | |||
15 | #include <asm/sbus.h> | ||
16 | |||
17 | #include <scsi/scsi_host.h> | ||
18 | |||
19 | #include "esp_scsi.h" | ||
20 | |||
21 | #define DRV_MODULE_NAME "sun_esp" | ||
22 | #define PFX DRV_MODULE_NAME ": " | ||
23 | #define DRV_VERSION "1.000" | ||
24 | #define DRV_MODULE_RELDATE "April 19, 2007" | ||
25 | |||
26 | #define dma_read32(REG) \ | ||
27 | sbus_readl(esp->dma_regs + (REG)) | ||
28 | #define dma_write32(VAL, REG) \ | ||
29 | sbus_writel((VAL), esp->dma_regs + (REG)) | ||
30 | |||
31 | static int __devinit esp_sbus_find_dma(struct esp *esp, struct sbus_dev *dma_sdev) | ||
32 | { | ||
33 | struct sbus_dev *sdev = esp->dev; | ||
34 | struct sbus_dma *dma; | ||
35 | |||
36 | if (dma_sdev != NULL) { | ||
37 | for_each_dvma(dma) { | ||
38 | if (dma->sdev == dma_sdev) | ||
39 | break; | ||
40 | } | ||
41 | } else { | ||
42 | for_each_dvma(dma) { | ||
43 | if (dma->sdev == NULL) | ||
44 | break; | ||
45 | |||
46 | /* If bus + slot are the same and it has the | ||
47 | * correct OBP name, it's ours. | ||
48 | */ | ||
49 | if (sdev->bus == dma->sdev->bus && | ||
50 | sdev->slot == dma->sdev->slot && | ||
51 | (!strcmp(dma->sdev->prom_name, "dma") || | ||
52 | !strcmp(dma->sdev->prom_name, "espdma"))) | ||
53 | break; | ||
54 | } | ||
55 | } | ||
56 | |||
57 | if (dma == NULL) { | ||
58 | printk(KERN_ERR PFX "[%s] Cannot find dma.\n", | ||
59 | sdev->ofdev.node->full_name); | ||
60 | return -ENODEV; | ||
61 | } | ||
62 | esp->dma = dma; | ||
63 | esp->dma_regs = dma->regs; | ||
64 | |||
65 | return 0; | ||
66 | |||
67 | } | ||
68 | |||
69 | static int __devinit esp_sbus_map_regs(struct esp *esp, int hme) | ||
70 | { | ||
71 | struct sbus_dev *sdev = esp->dev; | ||
72 | struct resource *res; | ||
73 | |||
74 | /* On HME, two reg sets exist, first is DVMA, | ||
75 | * second is ESP registers. | ||
76 | */ | ||
77 | if (hme) | ||
78 | res = &sdev->resource[1]; | ||
79 | else | ||
80 | res = &sdev->resource[0]; | ||
81 | |||
82 | esp->regs = sbus_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP"); | ||
83 | if (!esp->regs) | ||
84 | return -ENOMEM; | ||
85 | |||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | static int __devinit esp_sbus_map_command_block(struct esp *esp) | ||
90 | { | ||
91 | struct sbus_dev *sdev = esp->dev; | ||
92 | |||
93 | esp->command_block = sbus_alloc_consistent(sdev, 16, | ||
94 | &esp->command_block_dma); | ||
95 | if (!esp->command_block) | ||
96 | return -ENOMEM; | ||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | static int __devinit esp_sbus_register_irq(struct esp *esp) | ||
101 | { | ||
102 | struct Scsi_Host *host = esp->host; | ||
103 | struct sbus_dev *sdev = esp->dev; | ||
104 | |||
105 | host->irq = sdev->irqs[0]; | ||
106 | return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp); | ||
107 | } | ||
108 | |||
109 | static void __devinit esp_get_scsi_id(struct esp *esp) | ||
110 | { | ||
111 | struct sbus_dev *sdev = esp->dev; | ||
112 | struct device_node *dp = sdev->ofdev.node; | ||
113 | |||
114 | esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff); | ||
115 | if (esp->scsi_id != 0xff) | ||
116 | goto done; | ||
117 | |||
118 | esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff); | ||
119 | if (esp->scsi_id != 0xff) | ||
120 | goto done; | ||
121 | |||
122 | if (!sdev->bus) { | ||
123 | /* SUN4 */ | ||
124 | esp->scsi_id = 7; | ||
125 | goto done; | ||
126 | } | ||
127 | |||
128 | esp->scsi_id = of_getintprop_default(sdev->bus->ofdev.node, | ||
129 | "scsi-initiator-id", 7); | ||
130 | |||
131 | done: | ||
132 | esp->host->this_id = esp->scsi_id; | ||
133 | esp->scsi_id_mask = (1 << esp->scsi_id); | ||
134 | } | ||
135 | |||
136 | static void __devinit esp_get_differential(struct esp *esp) | ||
137 | { | ||
138 | struct sbus_dev *sdev = esp->dev; | ||
139 | struct device_node *dp = sdev->ofdev.node; | ||
140 | |||
141 | if (of_find_property(dp, "differential", NULL)) | ||
142 | esp->flags |= ESP_FLAG_DIFFERENTIAL; | ||
143 | else | ||
144 | esp->flags &= ~ESP_FLAG_DIFFERENTIAL; | ||
145 | } | ||
146 | |||
147 | static void __devinit esp_get_clock_params(struct esp *esp) | ||
148 | { | ||
149 | struct sbus_dev *sdev = esp->dev; | ||
150 | struct device_node *dp = sdev->ofdev.node; | ||
151 | struct device_node *bus_dp; | ||
152 | int fmhz; | ||
153 | |||
154 | bus_dp = NULL; | ||
155 | if (sdev != NULL && sdev->bus != NULL) | ||
156 | bus_dp = sdev->bus->ofdev.node; | ||
157 | |||
158 | fmhz = of_getintprop_default(dp, "clock-frequency", 0); | ||
159 | if (fmhz == 0) | ||
160 | fmhz = (!bus_dp) ? 0 : | ||
161 | of_getintprop_default(bus_dp, "clock-frequency", 0); | ||
162 | |||
163 | esp->cfreq = fmhz; | ||
164 | } | ||
165 | |||
166 | static void __devinit esp_get_bursts(struct esp *esp, struct sbus_dev *dma) | ||
167 | { | ||
168 | struct sbus_dev *sdev = esp->dev; | ||
169 | struct device_node *dp = sdev->ofdev.node; | ||
170 | u8 bursts; | ||
171 | |||
172 | bursts = of_getintprop_default(dp, "burst-sizes", 0xff); | ||
173 | if (dma) { | ||
174 | struct device_node *dma_dp = dma->ofdev.node; | ||
175 | u8 val = of_getintprop_default(dma_dp, "burst-sizes", 0xff); | ||
176 | if (val != 0xff) | ||
177 | bursts &= val; | ||
178 | } | ||
179 | |||
180 | if (sdev->bus) { | ||
181 | u8 val = of_getintprop_default(sdev->bus->ofdev.node, | ||
182 | "burst-sizes", 0xff); | ||
183 | if (val != 0xff) | ||
184 | bursts &= val; | ||
185 | } | ||
186 | |||
187 | if (bursts == 0xff || | ||
188 | (bursts & DMA_BURST16) == 0 || | ||
189 | (bursts & DMA_BURST32) == 0) | ||
190 | bursts = (DMA_BURST32 - 1); | ||
191 | |||
192 | esp->bursts = bursts; | ||
193 | } | ||
194 | |||
195 | static void __devinit esp_sbus_get_props(struct esp *esp, struct sbus_dev *espdma) | ||
196 | { | ||
197 | esp_get_scsi_id(esp); | ||
198 | esp_get_differential(esp); | ||
199 | esp_get_clock_params(esp); | ||
200 | esp_get_bursts(esp, espdma); | ||
201 | } | ||
202 | |||
203 | static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg) | ||
204 | { | ||
205 | sbus_writeb(val, esp->regs + (reg * 4UL)); | ||
206 | } | ||
207 | |||
208 | static u8 sbus_esp_read8(struct esp *esp, unsigned long reg) | ||
209 | { | ||
210 | return sbus_readb(esp->regs + (reg * 4UL)); | ||
211 | } | ||
212 | |||
213 | static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf, | ||
214 | size_t sz, int dir) | ||
215 | { | ||
216 | return sbus_map_single(esp->dev, buf, sz, dir); | ||
217 | } | ||
218 | |||
219 | static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg, | ||
220 | int num_sg, int dir) | ||
221 | { | ||
222 | return sbus_map_sg(esp->dev, sg, num_sg, dir); | ||
223 | } | ||
224 | |||
225 | static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr, | ||
226 | size_t sz, int dir) | ||
227 | { | ||
228 | sbus_unmap_single(esp->dev, addr, sz, dir); | ||
229 | } | ||
230 | |||
231 | static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg, | ||
232 | int num_sg, int dir) | ||
233 | { | ||
234 | sbus_unmap_sg(esp->dev, sg, num_sg, dir); | ||
235 | } | ||
236 | |||
237 | static int sbus_esp_irq_pending(struct esp *esp) | ||
238 | { | ||
239 | if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR)) | ||
240 | return 1; | ||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | static void sbus_esp_reset_dma(struct esp *esp) | ||
245 | { | ||
246 | int can_do_burst16, can_do_burst32, can_do_burst64; | ||
247 | int can_do_sbus64, lim; | ||
248 | u32 val; | ||
249 | |||
250 | can_do_burst16 = (esp->bursts & DMA_BURST16) != 0; | ||
251 | can_do_burst32 = (esp->bursts & DMA_BURST32) != 0; | ||
252 | can_do_burst64 = 0; | ||
253 | can_do_sbus64 = 0; | ||
254 | if (sbus_can_dma_64bit(esp->dev)) | ||
255 | can_do_sbus64 = 1; | ||
256 | if (sbus_can_burst64(esp->sdev)) | ||
257 | can_do_burst64 = (esp->bursts & DMA_BURST64) != 0; | ||
258 | |||
259 | /* Put the DVMA into a known state. */ | ||
260 | if (esp->dma->revision != dvmahme) { | ||
261 | val = dma_read32(DMA_CSR); | ||
262 | dma_write32(val | DMA_RST_SCSI, DMA_CSR); | ||
263 | dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); | ||
264 | } | ||
265 | switch (esp->dma->revision) { | ||
266 | case dvmahme: | ||
267 | dma_write32(DMA_RESET_FAS366, DMA_CSR); | ||
268 | dma_write32(DMA_RST_SCSI, DMA_CSR); | ||
269 | |||
270 | esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS | | ||
271 | DMA_SCSI_DISAB | DMA_INT_ENAB); | ||
272 | |||
273 | esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE | | ||
274 | DMA_BRST_SZ); | ||
275 | |||
276 | if (can_do_burst64) | ||
277 | esp->prev_hme_dmacsr |= DMA_BRST64; | ||
278 | else if (can_do_burst32) | ||
279 | esp->prev_hme_dmacsr |= DMA_BRST32; | ||
280 | |||
281 | if (can_do_sbus64) { | ||
282 | esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64; | ||
283 | sbus_set_sbus64(esp->dev, esp->bursts); | ||
284 | } | ||
285 | |||
286 | lim = 1000; | ||
287 | while (dma_read32(DMA_CSR) & DMA_PEND_READ) { | ||
288 | if (--lim == 0) { | ||
289 | printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ " | ||
290 | "will not clear!\n", | ||
291 | esp->host->unique_id); | ||
292 | break; | ||
293 | } | ||
294 | udelay(1); | ||
295 | } | ||
296 | |||
297 | dma_write32(0, DMA_CSR); | ||
298 | dma_write32(esp->prev_hme_dmacsr, DMA_CSR); | ||
299 | |||
300 | dma_write32(0, DMA_ADDR); | ||
301 | break; | ||
302 | |||
303 | case dvmarev2: | ||
304 | if (esp->rev != ESP100) { | ||
305 | val = dma_read32(DMA_CSR); | ||
306 | dma_write32(val | DMA_3CLKS, DMA_CSR); | ||
307 | } | ||
308 | break; | ||
309 | |||
310 | case dvmarev3: | ||
311 | val = dma_read32(DMA_CSR); | ||
312 | val &= ~DMA_3CLKS; | ||
313 | val |= DMA_2CLKS; | ||
314 | if (can_do_burst32) { | ||
315 | val &= ~DMA_BRST_SZ; | ||
316 | val |= DMA_BRST32; | ||
317 | } | ||
318 | dma_write32(val, DMA_CSR); | ||
319 | break; | ||
320 | |||
321 | case dvmaesc1: | ||
322 | val = dma_read32(DMA_CSR); | ||
323 | val |= DMA_ADD_ENABLE; | ||
324 | val &= ~DMA_BCNT_ENAB; | ||
325 | if (!can_do_burst32 && can_do_burst16) { | ||
326 | val |= DMA_ESC_BURST; | ||
327 | } else { | ||
328 | val &= ~(DMA_ESC_BURST); | ||
329 | } | ||
330 | dma_write32(val, DMA_CSR); | ||
331 | break; | ||
332 | |||
333 | default: | ||
334 | break; | ||
335 | } | ||
336 | |||
337 | /* Enable interrupts. */ | ||
338 | val = dma_read32(DMA_CSR); | ||
339 | dma_write32(val | DMA_INT_ENAB, DMA_CSR); | ||
340 | } | ||
341 | |||
342 | static void sbus_esp_dma_drain(struct esp *esp) | ||
343 | { | ||
344 | u32 csr; | ||
345 | int lim; | ||
346 | |||
347 | if (esp->dma->revision == dvmahme) | ||
348 | return; | ||
349 | |||
350 | csr = dma_read32(DMA_CSR); | ||
351 | if (!(csr & DMA_FIFO_ISDRAIN)) | ||
352 | return; | ||
353 | |||
354 | if (esp->dma->revision != dvmarev3 && esp->dma->revision != dvmaesc1) | ||
355 | dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR); | ||
356 | |||
357 | lim = 1000; | ||
358 | while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) { | ||
359 | if (--lim == 0) { | ||
360 | printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n", | ||
361 | esp->host->unique_id); | ||
362 | break; | ||
363 | } | ||
364 | udelay(1); | ||
365 | } | ||
366 | } | ||
367 | |||
368 | static void sbus_esp_dma_invalidate(struct esp *esp) | ||
369 | { | ||
370 | if (esp->dma->revision == dvmahme) { | ||
371 | dma_write32(DMA_RST_SCSI, DMA_CSR); | ||
372 | |||
373 | esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr | | ||
374 | (DMA_PARITY_OFF | DMA_2CLKS | | ||
375 | DMA_SCSI_DISAB | DMA_INT_ENAB)) & | ||
376 | ~(DMA_ST_WRITE | DMA_ENABLE)); | ||
377 | |||
378 | dma_write32(0, DMA_CSR); | ||
379 | dma_write32(esp->prev_hme_dmacsr, DMA_CSR); | ||
380 | |||
381 | /* This is necessary to avoid having the SCSI channel | ||
382 | * engine lock up on us. | ||
383 | */ | ||
384 | dma_write32(0, DMA_ADDR); | ||
385 | } else { | ||
386 | u32 val; | ||
387 | int lim; | ||
388 | |||
389 | lim = 1000; | ||
390 | while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) { | ||
391 | if (--lim == 0) { | ||
392 | printk(KERN_ALERT PFX "esp%d: DMA will not " | ||
393 | "invalidate!\n", esp->host->unique_id); | ||
394 | break; | ||
395 | } | ||
396 | udelay(1); | ||
397 | } | ||
398 | |||
399 | val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB); | ||
400 | val |= DMA_FIFO_INV; | ||
401 | dma_write32(val, DMA_CSR); | ||
402 | val &= ~DMA_FIFO_INV; | ||
403 | dma_write32(val, DMA_CSR); | ||
404 | } | ||
405 | } | ||
406 | |||
407 | static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count, | ||
408 | u32 dma_count, int write, u8 cmd) | ||
409 | { | ||
410 | u32 csr; | ||
411 | |||
412 | BUG_ON(!(cmd & ESP_CMD_DMA)); | ||
413 | |||
414 | sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); | ||
415 | sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); | ||
416 | if (esp->rev == FASHME) { | ||
417 | sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO); | ||
418 | sbus_esp_write8(esp, 0, FAS_RHI); | ||
419 | |||
420 | scsi_esp_cmd(esp, cmd); | ||
421 | |||
422 | csr = esp->prev_hme_dmacsr; | ||
423 | csr |= DMA_SCSI_DISAB | DMA_ENABLE; | ||
424 | if (write) | ||
425 | csr |= DMA_ST_WRITE; | ||
426 | else | ||
427 | csr &= ~DMA_ST_WRITE; | ||
428 | esp->prev_hme_dmacsr = csr; | ||
429 | |||
430 | dma_write32(dma_count, DMA_COUNT); | ||
431 | dma_write32(addr, DMA_ADDR); | ||
432 | dma_write32(csr, DMA_CSR); | ||
433 | } else { | ||
434 | csr = dma_read32(DMA_CSR); | ||
435 | csr |= DMA_ENABLE; | ||
436 | if (write) | ||
437 | csr |= DMA_ST_WRITE; | ||
438 | else | ||
439 | csr &= ~DMA_ST_WRITE; | ||
440 | dma_write32(csr, DMA_CSR); | ||
441 | if (esp->dma->revision == dvmaesc1) { | ||
442 | u32 end = PAGE_ALIGN(addr + dma_count + 16U); | ||
443 | dma_write32(end - addr, DMA_COUNT); | ||
444 | } | ||
445 | dma_write32(addr, DMA_ADDR); | ||
446 | |||
447 | scsi_esp_cmd(esp, cmd); | ||
448 | } | ||
449 | |||
450 | } | ||
451 | |||
452 | static int sbus_esp_dma_error(struct esp *esp) | ||
453 | { | ||
454 | u32 csr = dma_read32(DMA_CSR); | ||
455 | |||
456 | if (csr & DMA_HNDL_ERROR) | ||
457 | return 1; | ||
458 | |||
459 | return 0; | ||
460 | } | ||
461 | |||
462 | static const struct esp_driver_ops sbus_esp_ops = { | ||
463 | .esp_write8 = sbus_esp_write8, | ||
464 | .esp_read8 = sbus_esp_read8, | ||
465 | .map_single = sbus_esp_map_single, | ||
466 | .map_sg = sbus_esp_map_sg, | ||
467 | .unmap_single = sbus_esp_unmap_single, | ||
468 | .unmap_sg = sbus_esp_unmap_sg, | ||
469 | .irq_pending = sbus_esp_irq_pending, | ||
470 | .reset_dma = sbus_esp_reset_dma, | ||
471 | .dma_drain = sbus_esp_dma_drain, | ||
472 | .dma_invalidate = sbus_esp_dma_invalidate, | ||
473 | .send_dma_cmd = sbus_esp_send_dma_cmd, | ||
474 | .dma_error = sbus_esp_dma_error, | ||
475 | }; | ||
476 | |||
477 | static int __devinit esp_sbus_probe_one(struct device *dev, | ||
478 | struct sbus_dev *esp_dev, | ||
479 | struct sbus_dev *espdma, | ||
480 | struct sbus_bus *sbus, | ||
481 | int hme) | ||
482 | { | ||
483 | struct scsi_host_template *tpnt = &scsi_esp_template; | ||
484 | struct Scsi_Host *host; | ||
485 | struct esp *esp; | ||
486 | int err; | ||
487 | |||
488 | host = scsi_host_alloc(tpnt, sizeof(struct esp)); | ||
489 | |||
490 | err = -ENOMEM; | ||
491 | if (!host) | ||
492 | goto fail; | ||
493 | |||
494 | host->max_id = (hme ? 16 : 8); | ||
495 | esp = host_to_esp(host); | ||
496 | |||
497 | esp->host = host; | ||
498 | esp->dev = esp_dev; | ||
499 | esp->ops = &sbus_esp_ops; | ||
500 | |||
501 | if (hme) | ||
502 | esp->flags |= ESP_FLAG_WIDE_CAPABLE; | ||
503 | |||
504 | err = esp_sbus_find_dma(esp, espdma); | ||
505 | if (err < 0) | ||
506 | goto fail_unlink; | ||
507 | |||
508 | err = esp_sbus_map_regs(esp, hme); | ||
509 | if (err < 0) | ||
510 | goto fail_unlink; | ||
511 | |||
512 | err = esp_sbus_map_command_block(esp); | ||
513 | if (err < 0) | ||
514 | goto fail_unmap_regs; | ||
515 | |||
516 | err = esp_sbus_register_irq(esp); | ||
517 | if (err < 0) | ||
518 | goto fail_unmap_command_block; | ||
519 | |||
520 | esp_sbus_get_props(esp, espdma); | ||
521 | |||
522 | /* Before we try to touch the ESP chip, ESC1 dma can | ||
523 | * come up with the reset bit set, so make sure that | ||
524 | * is clear first. | ||
525 | */ | ||
526 | if (esp->dma->revision == dvmaesc1) { | ||
527 | u32 val = dma_read32(DMA_CSR); | ||
528 | |||
529 | dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); | ||
530 | } | ||
531 | |||
532 | dev_set_drvdata(&esp_dev->ofdev.dev, esp); | ||
533 | |||
534 | err = scsi_esp_register(esp, dev); | ||
535 | if (err) | ||
536 | goto fail_free_irq; | ||
537 | |||
538 | return 0; | ||
539 | |||
540 | fail_free_irq: | ||
541 | free_irq(host->irq, esp); | ||
542 | fail_unmap_command_block: | ||
543 | sbus_free_consistent(esp->dev, 16, | ||
544 | esp->command_block, | ||
545 | esp->command_block_dma); | ||
546 | fail_unmap_regs: | ||
547 | sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE); | ||
548 | fail_unlink: | ||
549 | scsi_host_put(host); | ||
550 | fail: | ||
551 | return err; | ||
552 | } | ||
553 | |||
554 | static int __devinit esp_sbus_probe(struct of_device *dev, const struct of_device_id *match) | ||
555 | { | ||
556 | struct sbus_dev *sdev = to_sbus_device(&dev->dev); | ||
557 | struct device_node *dp = dev->node; | ||
558 | struct sbus_dev *dma_sdev = NULL; | ||
559 | int hme = 0; | ||
560 | |||
561 | if (dp->parent && | ||
562 | (!strcmp(dp->parent->name, "espdma") || | ||
563 | !strcmp(dp->parent->name, "dma"))) | ||
564 | dma_sdev = sdev->parent; | ||
565 | else if (!strcmp(dp->name, "SUNW,fas")) { | ||
566 | dma_sdev = sdev; | ||
567 | hme = 1; | ||
568 | } | ||
569 | |||
570 | return esp_sbus_probe_one(&dev->dev, sdev, dma_sdev, | ||
571 | sdev->bus, hme); | ||
572 | } | ||
573 | |||
574 | static int __devexit esp_sbus_remove(struct of_device *dev) | ||
575 | { | ||
576 | struct esp *esp = dev_get_drvdata(&dev->dev); | ||
577 | unsigned int irq = esp->host->irq; | ||
578 | u32 val; | ||
579 | |||
580 | scsi_esp_unregister(esp); | ||
581 | |||
582 | /* Disable interrupts. */ | ||
583 | val = dma_read32(DMA_CSR); | ||
584 | dma_write32(val & ~DMA_INT_ENAB, DMA_CSR); | ||
585 | |||
586 | free_irq(irq, esp); | ||
587 | sbus_free_consistent(esp->dev, 16, | ||
588 | esp->command_block, | ||
589 | esp->command_block_dma); | ||
590 | sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE); | ||
591 | |||
592 | scsi_host_put(esp->host); | ||
593 | |||
594 | return 0; | ||
595 | } | ||
596 | |||
597 | static struct of_device_id esp_match[] = { | ||
598 | { | ||
599 | .name = "SUNW,esp", | ||
600 | }, | ||
601 | { | ||
602 | .name = "SUNW,fas", | ||
603 | }, | ||
604 | { | ||
605 | .name = "esp", | ||
606 | }, | ||
607 | {}, | ||
608 | }; | ||
609 | MODULE_DEVICE_TABLE(of, esp_match); | ||
610 | |||
611 | static struct of_platform_driver esp_sbus_driver = { | ||
612 | .name = "esp", | ||
613 | .match_table = esp_match, | ||
614 | .probe = esp_sbus_probe, | ||
615 | .remove = __devexit_p(esp_sbus_remove), | ||
616 | }; | ||
617 | |||
618 | static int __init sunesp_init(void) | ||
619 | { | ||
620 | return of_register_driver(&esp_sbus_driver, &sbus_bus_type); | ||
621 | } | ||
622 | |||
623 | static void __exit sunesp_exit(void) | ||
624 | { | ||
625 | of_unregister_driver(&esp_sbus_driver); | ||
626 | } | ||
627 | |||
628 | MODULE_DESCRIPTION("Sun ESP SCSI driver"); | ||
629 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | ||
630 | MODULE_LICENSE("GPL"); | ||
631 | MODULE_VERSION(DRV_VERSION); | ||
632 | |||
633 | module_init(sunesp_init); | ||
634 | module_exit(sunesp_exit); | ||
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c index 96a852aa1903..bfd44177a215 100644 --- a/drivers/serial/sunsu.c +++ b/drivers/serial/sunsu.c | |||
@@ -1387,8 +1387,8 @@ static enum su_type __devinit su_get_type(struct device_node *dp) | |||
1387 | struct device_node *ap = of_find_node_by_path("/aliases"); | 1387 | struct device_node *ap = of_find_node_by_path("/aliases"); |
1388 | 1388 | ||
1389 | if (ap) { | 1389 | if (ap) { |
1390 | char *keyb = of_get_property(ap, "keyboard", NULL); | 1390 | const char *keyb = of_get_property(ap, "keyboard", NULL); |
1391 | char *ms = of_get_property(ap, "mouse", NULL); | 1391 | const char *ms = of_get_property(ap, "mouse", NULL); |
1392 | 1392 | ||
1393 | if (keyb) { | 1393 | if (keyb) { |
1394 | if (dp == of_find_node_by_path(keyb)) | 1394 | if (dp == of_find_node_by_path(keyb)) |
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c index d7627fc4f11e..8514f2a6f060 100644 --- a/drivers/video/aty/atyfb_base.c +++ b/drivers/video/aty/atyfb_base.c | |||
@@ -2899,7 +2899,7 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev, | |||
2899 | struct fb_info *info, unsigned long addr) | 2899 | struct fb_info *info, unsigned long addr) |
2900 | { | 2900 | { |
2901 | struct atyfb_par *par = info->par; | 2901 | struct atyfb_par *par = info->par; |
2902 | struct pcidev_cookie *pcp; | 2902 | struct device_node *dp; |
2903 | char prop[128]; | 2903 | char prop[128]; |
2904 | int node, len, i, j, ret; | 2904 | int node, len, i, j, ret; |
2905 | u32 mem, chip_id; | 2905 | u32 mem, chip_id; |
@@ -3037,8 +3037,8 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev, | |||
3037 | node = 0; | 3037 | node = 0; |
3038 | } | 3038 | } |
3039 | 3039 | ||
3040 | pcp = pdev->sysdata; | 3040 | dp = pci_device_to_OF_node(pdev); |
3041 | if (node == pcp->prom_node->node) { | 3041 | if (node == dp->node) { |
3042 | struct fb_var_screeninfo *var = &default_var; | 3042 | struct fb_var_screeninfo *var = &default_var; |
3043 | unsigned int N, P, Q, M, T, R; | 3043 | unsigned int N, P, Q, M, T, R; |
3044 | u32 v_total, h_total; | 3044 | u32 v_total, h_total; |
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c index 1bf6f42eb400..a4b3fd185de7 100644 --- a/drivers/video/aty/radeon_base.c +++ b/drivers/video/aty/radeon_base.c | |||
@@ -410,7 +410,7 @@ static int __devinit radeon_find_mem_vbios(struct radeonfb_info *rinfo) | |||
410 | } | 410 | } |
411 | #endif | 411 | #endif |
412 | 412 | ||
413 | #ifdef CONFIG_PPC_OF | 413 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
414 | /* | 414 | /* |
415 | * Read XTAL (ref clock), SCLK and MCLK from Open Firmware device | 415 | * Read XTAL (ref clock), SCLK and MCLK from Open Firmware device |
416 | * tree. Hopefully, ATI OF driver is kind enough to fill these | 416 | * tree. Hopefully, ATI OF driver is kind enough to fill these |
@@ -440,7 +440,7 @@ static int __devinit radeon_read_xtal_OF (struct radeonfb_info *rinfo) | |||
440 | 440 | ||
441 | return 0; | 441 | return 0; |
442 | } | 442 | } |
443 | #endif /* CONFIG_PPC_OF */ | 443 | #endif /* CONFIG_PPC_OF || CONFIG_SPARC */ |
444 | 444 | ||
445 | /* | 445 | /* |
446 | * Read PLL infos from chip registers | 446 | * Read PLL infos from chip registers |
@@ -645,7 +645,7 @@ static void __devinit radeon_get_pllinfo(struct radeonfb_info *rinfo) | |||
645 | rinfo->pll.ref_div = INPLL(PPLL_REF_DIV) & PPLL_REF_DIV_MASK; | 645 | rinfo->pll.ref_div = INPLL(PPLL_REF_DIV) & PPLL_REF_DIV_MASK; |
646 | 646 | ||
647 | 647 | ||
648 | #ifdef CONFIG_PPC_OF | 648 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
649 | /* | 649 | /* |
650 | * Retrieve PLL infos from Open Firmware first | 650 | * Retrieve PLL infos from Open Firmware first |
651 | */ | 651 | */ |
@@ -653,7 +653,7 @@ static void __devinit radeon_get_pllinfo(struct radeonfb_info *rinfo) | |||
653 | printk(KERN_INFO "radeonfb: Retrieved PLL infos from Open Firmware\n"); | 653 | printk(KERN_INFO "radeonfb: Retrieved PLL infos from Open Firmware\n"); |
654 | goto found; | 654 | goto found; |
655 | } | 655 | } |
656 | #endif /* CONFIG_PPC_OF */ | 656 | #endif /* CONFIG_PPC_OF || CONFIG_SPARC */ |
657 | 657 | ||
658 | /* | 658 | /* |
659 | * Check out if we have an X86 which gave us some PLL informations | 659 | * Check out if we have an X86 which gave us some PLL informations |
@@ -2231,7 +2231,7 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev, | |||
2231 | rinfo->family == CHIP_FAMILY_RS200) | 2231 | rinfo->family == CHIP_FAMILY_RS200) |
2232 | rinfo->errata |= CHIP_ERRATA_PLL_DELAY; | 2232 | rinfo->errata |= CHIP_ERRATA_PLL_DELAY; |
2233 | 2233 | ||
2234 | #ifdef CONFIG_PPC_OF | 2234 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
2235 | /* On PPC, we obtain the OF device-node pointer to the firmware | 2235 | /* On PPC, we obtain the OF device-node pointer to the firmware |
2236 | * data for this chip | 2236 | * data for this chip |
2237 | */ | 2237 | */ |
@@ -2240,6 +2240,8 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev, | |||
2240 | printk(KERN_WARNING "radeonfb (%s): Cannot match card to OF node !\n", | 2240 | printk(KERN_WARNING "radeonfb (%s): Cannot match card to OF node !\n", |
2241 | pci_name(rinfo->pdev)); | 2241 | pci_name(rinfo->pdev)); |
2242 | 2242 | ||
2243 | #endif /* CONFIG_PPC_OF || CONFIG_SPARC */ | ||
2244 | #ifdef CONFIG_PPC_OF | ||
2243 | /* On PPC, the firmware sets up a memory mapping that tends | 2245 | /* On PPC, the firmware sets up a memory mapping that tends |
2244 | * to cause lockups when enabling the engine. We reconfigure | 2246 | * to cause lockups when enabling the engine. We reconfigure |
2245 | * the card internal memory mappings properly | 2247 | * the card internal memory mappings properly |
diff --git a/drivers/video/aty/radeon_monitor.c b/drivers/video/aty/radeon_monitor.c index 38c7dbf8c151..737b5c09dbdb 100644 --- a/drivers/video/aty/radeon_monitor.c +++ b/drivers/video/aty/radeon_monitor.c | |||
@@ -52,7 +52,7 @@ static char *radeon_get_mon_name(int type) | |||
52 | } | 52 | } |
53 | 53 | ||
54 | 54 | ||
55 | #ifdef CONFIG_PPC_OF | 55 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
56 | /* | 56 | /* |
57 | * Try to find monitor informations & EDID data out of the Open Firmware | 57 | * Try to find monitor informations & EDID data out of the Open Firmware |
58 | * device-tree. This also contains some "hacks" to work around a few machine | 58 | * device-tree. This also contains some "hacks" to work around a few machine |
@@ -156,7 +156,7 @@ static int __devinit radeon_probe_OF_head(struct radeonfb_info *rinfo, int head_ | |||
156 | } | 156 | } |
157 | return MT_NONE; | 157 | return MT_NONE; |
158 | } | 158 | } |
159 | #endif /* CONFIG_PPC_OF */ | 159 | #endif /* CONFIG_PPC_OF || CONFIG_SPARC */ |
160 | 160 | ||
161 | 161 | ||
162 | static int __devinit radeon_get_panel_info_BIOS(struct radeonfb_info *rinfo) | 162 | static int __devinit radeon_get_panel_info_BIOS(struct radeonfb_info *rinfo) |
@@ -495,11 +495,11 @@ void __devinit radeon_probe_screens(struct radeonfb_info *rinfo, | |||
495 | * Old single head cards | 495 | * Old single head cards |
496 | */ | 496 | */ |
497 | if (!rinfo->has_CRTC2) { | 497 | if (!rinfo->has_CRTC2) { |
498 | #ifdef CONFIG_PPC_OF | 498 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
499 | if (rinfo->mon1_type == MT_NONE) | 499 | if (rinfo->mon1_type == MT_NONE) |
500 | rinfo->mon1_type = radeon_probe_OF_head(rinfo, 0, | 500 | rinfo->mon1_type = radeon_probe_OF_head(rinfo, 0, |
501 | &rinfo->mon1_EDID); | 501 | &rinfo->mon1_EDID); |
502 | #endif /* CONFIG_PPC_OF */ | 502 | #endif /* CONFIG_PPC_OF || CONFIG_SPARC */ |
503 | #ifdef CONFIG_FB_RADEON_I2C | 503 | #ifdef CONFIG_FB_RADEON_I2C |
504 | if (rinfo->mon1_type == MT_NONE) | 504 | if (rinfo->mon1_type == MT_NONE) |
505 | rinfo->mon1_type = | 505 | rinfo->mon1_type = |
@@ -544,11 +544,11 @@ void __devinit radeon_probe_screens(struct radeonfb_info *rinfo, | |||
544 | /* | 544 | /* |
545 | * Probe primary head (DVI or laptop internal panel) | 545 | * Probe primary head (DVI or laptop internal panel) |
546 | */ | 546 | */ |
547 | #ifdef CONFIG_PPC_OF | 547 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
548 | if (rinfo->mon1_type == MT_NONE) | 548 | if (rinfo->mon1_type == MT_NONE) |
549 | rinfo->mon1_type = radeon_probe_OF_head(rinfo, 0, | 549 | rinfo->mon1_type = radeon_probe_OF_head(rinfo, 0, |
550 | &rinfo->mon1_EDID); | 550 | &rinfo->mon1_EDID); |
551 | #endif /* CONFIG_PPC_OF */ | 551 | #endif /* CONFIG_PPC_OF || CONFIG_SPARC */ |
552 | #ifdef CONFIG_FB_RADEON_I2C | 552 | #ifdef CONFIG_FB_RADEON_I2C |
553 | if (rinfo->mon1_type == MT_NONE) | 553 | if (rinfo->mon1_type == MT_NONE) |
554 | rinfo->mon1_type = radeon_probe_i2c_connector(rinfo, ddc_dvi, | 554 | rinfo->mon1_type = radeon_probe_i2c_connector(rinfo, ddc_dvi, |
@@ -572,11 +572,11 @@ void __devinit radeon_probe_screens(struct radeonfb_info *rinfo, | |||
572 | /* | 572 | /* |
573 | * Probe secondary head (mostly VGA, can be DVI) | 573 | * Probe secondary head (mostly VGA, can be DVI) |
574 | */ | 574 | */ |
575 | #ifdef CONFIG_PPC_OF | 575 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
576 | if (rinfo->mon2_type == MT_NONE) | 576 | if (rinfo->mon2_type == MT_NONE) |
577 | rinfo->mon2_type = radeon_probe_OF_head(rinfo, 1, | 577 | rinfo->mon2_type = radeon_probe_OF_head(rinfo, 1, |
578 | &rinfo->mon2_EDID); | 578 | &rinfo->mon2_EDID); |
579 | #endif /* CONFIG_PPC_OF */ | 579 | #endif /* CONFIG_PPC_OF || defined(CONFIG_SPARC) */ |
580 | #ifdef CONFIG_FB_RADEON_I2C | 580 | #ifdef CONFIG_FB_RADEON_I2C |
581 | if (rinfo->mon2_type == MT_NONE) | 581 | if (rinfo->mon2_type == MT_NONE) |
582 | rinfo->mon2_type = radeon_probe_i2c_connector(rinfo, ddc_vga, | 582 | rinfo->mon2_type = radeon_probe_i2c_connector(rinfo, ddc_vga, |
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h index d5ff224a6258..319000360285 100644 --- a/drivers/video/aty/radeonfb.h +++ b/drivers/video/aty/radeonfb.h | |||
@@ -16,7 +16,7 @@ | |||
16 | 16 | ||
17 | #include <asm/io.h> | 17 | #include <asm/io.h> |
18 | 18 | ||
19 | #ifdef CONFIG_PPC_OF | 19 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
20 | #include <asm/prom.h> | 20 | #include <asm/prom.h> |
21 | #endif | 21 | #endif |
22 | 22 | ||
@@ -292,7 +292,7 @@ struct radeonfb_info { | |||
292 | unsigned long fb_local_base; | 292 | unsigned long fb_local_base; |
293 | 293 | ||
294 | struct pci_dev *pdev; | 294 | struct pci_dev *pdev; |
295 | #ifdef CONFIG_PPC_OF | 295 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
296 | struct device_node *of_node; | 296 | struct device_node *of_node; |
297 | #endif | 297 | #endif |
298 | 298 | ||
diff --git a/drivers/video/cg3.c b/drivers/video/cg3.c index 767c850f8eb7..f042428a84f4 100644 --- a/drivers/video/cg3.c +++ b/drivers/video/cg3.c | |||
@@ -266,7 +266,7 @@ static void __devinit cg3_init_fix(struct fb_info *info, int linebytes, | |||
266 | static void __devinit cg3_rdi_maybe_fixup_var(struct fb_var_screeninfo *var, | 266 | static void __devinit cg3_rdi_maybe_fixup_var(struct fb_var_screeninfo *var, |
267 | struct device_node *dp) | 267 | struct device_node *dp) |
268 | { | 268 | { |
269 | char *params; | 269 | const char *params; |
270 | char *p; | 270 | char *p; |
271 | int ww, hh; | 271 | int ww, hh; |
272 | 272 | ||
diff --git a/drivers/video/igafb.c b/drivers/video/igafb.c index 90592fb59156..eb1a4812ad1d 100644 --- a/drivers/video/igafb.c +++ b/drivers/video/igafb.c | |||
@@ -44,8 +44,8 @@ | |||
44 | 44 | ||
45 | #include <asm/io.h> | 45 | #include <asm/io.h> |
46 | 46 | ||
47 | #ifdef __sparc__ | 47 | #ifdef CONFIG_SPARC |
48 | #include <asm/pbm.h> | 48 | #include <asm/prom.h> |
49 | #include <asm/pcic.h> | 49 | #include <asm/pcic.h> |
50 | #endif | 50 | #endif |
51 | 51 | ||
@@ -96,7 +96,7 @@ struct fb_var_screeninfo default_var = { | |||
96 | .vmode = FB_VMODE_NONINTERLACED | 96 | .vmode = FB_VMODE_NONINTERLACED |
97 | }; | 97 | }; |
98 | 98 | ||
99 | #ifdef __sparc__ | 99 | #ifdef CONFIG_SPARC |
100 | struct fb_var_screeninfo default_var_1024x768 __initdata = { | 100 | struct fb_var_screeninfo default_var_1024x768 __initdata = { |
101 | /* 1024x768, 75 Hz, Non-Interlaced (78.75 MHz dotclock) */ | 101 | /* 1024x768, 75 Hz, Non-Interlaced (78.75 MHz dotclock) */ |
102 | .xres = 1024, | 102 | .xres = 1024, |
@@ -188,7 +188,7 @@ static inline void iga_outb(struct iga_par *par, unsigned char val, | |||
188 | pci_outb(par, val, reg+1); | 188 | pci_outb(par, val, reg+1); |
189 | } | 189 | } |
190 | 190 | ||
191 | #endif /* __sparc__ */ | 191 | #endif /* CONFIG_SPARC */ |
192 | 192 | ||
193 | /* | 193 | /* |
194 | * Very important functionality for the JavaEngine1 computer: | 194 | * Very important functionality for the JavaEngine1 computer: |
@@ -217,7 +217,7 @@ static void iga_blank_border(struct iga_par *par) | |||
217 | iga_outb(par, 0, IGA_EXT_CNTRL, IGA_IDX_OVERSCAN_COLOR + i); | 217 | iga_outb(par, 0, IGA_EXT_CNTRL, IGA_IDX_OVERSCAN_COLOR + i); |
218 | } | 218 | } |
219 | 219 | ||
220 | #ifdef __sparc__ | 220 | #ifdef CONFIG_SPARC |
221 | static int igafb_mmap(struct fb_info *info, | 221 | static int igafb_mmap(struct fb_info *info, |
222 | struct vm_area_struct *vma) | 222 | struct vm_area_struct *vma) |
223 | { | 223 | { |
@@ -271,7 +271,7 @@ static int igafb_mmap(struct fb_info *info, | |||
271 | vma->vm_flags |= VM_IO; | 271 | vma->vm_flags |= VM_IO; |
272 | return 0; | 272 | return 0; |
273 | } | 273 | } |
274 | #endif /* __sparc__ */ | 274 | #endif /* CONFIG_SPARC */ |
275 | 275 | ||
276 | static int igafb_setcolreg(unsigned regno, unsigned red, unsigned green, | 276 | static int igafb_setcolreg(unsigned regno, unsigned red, unsigned green, |
277 | unsigned blue, unsigned transp, | 277 | unsigned blue, unsigned transp, |
@@ -323,7 +323,7 @@ static struct fb_ops igafb_ops = { | |||
323 | .fb_fillrect = cfb_fillrect, | 323 | .fb_fillrect = cfb_fillrect, |
324 | .fb_copyarea = cfb_copyarea, | 324 | .fb_copyarea = cfb_copyarea, |
325 | .fb_imageblit = cfb_imageblit, | 325 | .fb_imageblit = cfb_imageblit, |
326 | #ifdef __sparc__ | 326 | #ifdef CONFIG_SPARC |
327 | .fb_mmap = igafb_mmap, | 327 | .fb_mmap = igafb_mmap, |
328 | #endif | 328 | #endif |
329 | }; | 329 | }; |
@@ -424,7 +424,7 @@ int __init igafb_init(void) | |||
424 | 424 | ||
425 | par->frame_buffer_phys = addr & PCI_BASE_ADDRESS_MEM_MASK; | 425 | par->frame_buffer_phys = addr & PCI_BASE_ADDRESS_MEM_MASK; |
426 | 426 | ||
427 | #ifdef __sparc__ | 427 | #ifdef CONFIG_SPARC |
428 | /* | 428 | /* |
429 | * The following is sparc specific and this is why: | 429 | * The following is sparc specific and this is why: |
430 | * | 430 | * |
@@ -477,8 +477,8 @@ int __init igafb_init(void) | |||
477 | * Set default vmode and cmode from PROM properties. | 477 | * Set default vmode and cmode from PROM properties. |
478 | */ | 478 | */ |
479 | { | 479 | { |
480 | struct pcidev_cookie *cookie = pdev->sysdata; | 480 | struct device_node *dp = pci_device_to_OF_node(pdev); |
481 | int node = cookie->prom_node; | 481 | int node = dp->node; |
482 | int width = prom_getintdefault(node, "width", 1024); | 482 | int width = prom_getintdefault(node, "width", 1024); |
483 | int height = prom_getintdefault(node, "height", 768); | 483 | int height = prom_getintdefault(node, "height", 768); |
484 | int depth = prom_getintdefault(node, "depth", 8); | 484 | int depth = prom_getintdefault(node, "depth", 8); |
@@ -534,7 +534,7 @@ int __init igafb_init(void) | |||
534 | kfree(info); | 534 | kfree(info); |
535 | } | 535 | } |
536 | 536 | ||
537 | #ifdef __sparc__ | 537 | #ifdef CONFIG_SPARC |
538 | /* | 538 | /* |
539 | * Add /dev/fb mmap values. | 539 | * Add /dev/fb mmap values. |
540 | */ | 540 | */ |
@@ -552,7 +552,7 @@ int __init igafb_init(void) | |||
552 | par->mmap_map[1].size = PAGE_SIZE * 2; /* X wants 2 pages */ | 552 | par->mmap_map[1].size = PAGE_SIZE * 2; /* X wants 2 pages */ |
553 | par->mmap_map[1].prot_mask = SRMMU_CACHE; | 553 | par->mmap_map[1].prot_mask = SRMMU_CACHE; |
554 | par->mmap_map[1].prot_flag = SRMMU_WRITE; | 554 | par->mmap_map[1].prot_flag = SRMMU_WRITE; |
555 | #endif /* __sparc__ */ | 555 | #endif /* CONFIG_SPARC */ |
556 | 556 | ||
557 | return 0; | 557 | return 0; |
558 | } | 558 | } |
diff --git a/include/asm-alpha/string.h b/include/asm-alpha/string.h index 9e44fea669bf..b02b8a282940 100644 --- a/include/asm-alpha/string.h +++ b/include/asm-alpha/string.h | |||
@@ -61,8 +61,6 @@ extern void * __memsetw(void *dest, unsigned short, size_t count); | |||
61 | ? __constant_c_memset((s),0x0001000100010001UL*(unsigned short)(c),(n)) \ | 61 | ? __constant_c_memset((s),0x0001000100010001UL*(unsigned short)(c),(n)) \ |
62 | : __memsetw((s),(c),(n))) | 62 | : __memsetw((s),(c),(n))) |
63 | 63 | ||
64 | extern int strcasecmp(const char *, const char *); | ||
65 | |||
66 | #endif /* __KERNEL__ */ | 64 | #endif /* __KERNEL__ */ |
67 | 65 | ||
68 | #endif /* __ALPHA_STRING_H__ */ | 66 | #endif /* __ALPHA_STRING_H__ */ |
diff --git a/include/asm-powerpc/string.h b/include/asm-powerpc/string.h index faa407f33c6b..aa40f92c298d 100644 --- a/include/asm-powerpc/string.h +++ b/include/asm-powerpc/string.h | |||
@@ -14,8 +14,6 @@ | |||
14 | #define __HAVE_ARCH_MEMCMP | 14 | #define __HAVE_ARCH_MEMCMP |
15 | #define __HAVE_ARCH_MEMCHR | 15 | #define __HAVE_ARCH_MEMCHR |
16 | 16 | ||
17 | extern int strcasecmp(const char *, const char *); | ||
18 | extern int strncasecmp(const char *, const char *, __kernel_size_t); | ||
19 | extern char * strcpy(char *,const char *); | 17 | extern char * strcpy(char *,const char *); |
20 | extern char * strncpy(char *,const char *, __kernel_size_t); | 18 | extern char * strncpy(char *,const char *, __kernel_size_t); |
21 | extern __kernel_size_t strlen(const char *); | 19 | extern __kernel_size_t strlen(const char *); |
diff --git a/include/asm-sh/string.h b/include/asm-sh/string.h index 95bc7db006b0..55f8db6bc1d7 100644 --- a/include/asm-sh/string.h +++ b/include/asm-sh/string.h | |||
@@ -126,9 +126,6 @@ extern void *memchr(const void *__s, int __c, size_t __n); | |||
126 | #define __HAVE_ARCH_STRLEN | 126 | #define __HAVE_ARCH_STRLEN |
127 | extern size_t strlen(const char *); | 127 | extern size_t strlen(const char *); |
128 | 128 | ||
129 | /* arch/sh/lib/strcasecmp.c */ | ||
130 | extern int strcasecmp(const char *, const char *); | ||
131 | |||
132 | #endif /* __KERNEL__ */ | 129 | #endif /* __KERNEL__ */ |
133 | 130 | ||
134 | #endif /* __ASM_SH_STRING_H */ | 131 | #endif /* __ASM_SH_STRING_H */ |
diff --git a/include/asm-sparc/prom.h b/include/asm-sparc/prom.h index 274868d8598d..9ea105ebe2ff 100644 --- a/include/asm-sparc/prom.h +++ b/include/asm-sparc/prom.h | |||
@@ -35,8 +35,8 @@ struct property { | |||
35 | }; | 35 | }; |
36 | 36 | ||
37 | struct device_node { | 37 | struct device_node { |
38 | char *name; | 38 | const char *name; |
39 | char *type; | 39 | const char *type; |
40 | phandle node; | 40 | phandle node; |
41 | char *path_component_name; | 41 | char *path_component_name; |
42 | char *full_name; | 42 | char *full_name; |
@@ -85,12 +85,14 @@ extern struct device_node *of_find_node_by_phandle(phandle handle); | |||
85 | extern struct device_node *of_get_parent(const struct device_node *node); | 85 | extern struct device_node *of_get_parent(const struct device_node *node); |
86 | extern struct device_node *of_get_next_child(const struct device_node *node, | 86 | extern struct device_node *of_get_next_child(const struct device_node *node, |
87 | struct device_node *prev); | 87 | struct device_node *prev); |
88 | extern struct property *of_find_property(struct device_node *np, | 88 | extern struct property *of_find_property(const struct device_node *np, |
89 | const char *name, | 89 | const char *name, |
90 | int *lenp); | 90 | int *lenp); |
91 | extern int of_device_is_compatible(struct device_node *device, const char *); | 91 | extern int of_device_is_compatible(const struct device_node *device, |
92 | extern void *of_get_property(struct device_node *node, const char *name, | 92 | const char *); |
93 | int *lenp); | 93 | extern const void *of_get_property(const struct device_node *node, |
94 | const char *name, | ||
95 | int *lenp); | ||
94 | #define get_property(node,name,lenp) of_get_property(node,name,lenp) | 96 | #define get_property(node,name,lenp) of_get_property(node,name,lenp) |
95 | extern int of_set_property(struct device_node *node, const char *name, void *val, int len); | 97 | extern int of_set_property(struct device_node *node, const char *name, void *val, int len); |
96 | extern int of_getintprop_default(struct device_node *np, | 98 | extern int of_getintprop_default(struct device_node *np, |
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index f2cc9411b4c7..e89922d6718c 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h | |||
@@ -17,8 +17,8 @@ | |||
17 | typedef struct { | 17 | typedef struct { |
18 | /* Dcache line 1 */ | 18 | /* Dcache line 1 */ |
19 | unsigned int __softirq_pending; /* must be 1st, see rtrap.S */ | 19 | unsigned int __softirq_pending; /* must be 1st, see rtrap.S */ |
20 | unsigned int multiplier; | 20 | unsigned int __pad0_1; |
21 | unsigned int counter; | 21 | unsigned int __pad0_2; |
22 | unsigned int __pad1; | 22 | unsigned int __pad1; |
23 | unsigned long clock_tick; /* %tick's per second */ | 23 | unsigned long clock_tick; /* %tick's per second */ |
24 | unsigned long udelay_val; | 24 | unsigned long udelay_val; |
diff --git a/include/asm-sparc64/device.h b/include/asm-sparc64/device.h index d8f9872b0e2d..d5a4559b9555 100644 --- a/include/asm-sparc64/device.h +++ b/include/asm-sparc64/device.h | |||
@@ -3,5 +3,21 @@ | |||
3 | * | 3 | * |
4 | * This file is released under the GPLv2 | 4 | * This file is released under the GPLv2 |
5 | */ | 5 | */ |
6 | #include <asm-generic/device.h> | 6 | #ifndef _ASM_SPARC64_DEVICE_H |
7 | #define _ASM_SPARC64_DEVICE_H | ||
7 | 8 | ||
9 | struct device_node; | ||
10 | struct of_device; | ||
11 | |||
12 | struct dev_archdata { | ||
13 | void *iommu; | ||
14 | void *stc; | ||
15 | void *host_controller; | ||
16 | |||
17 | struct device_node *prom_node; | ||
18 | struct of_device *op; | ||
19 | |||
20 | unsigned int msi_num; | ||
21 | }; | ||
22 | |||
23 | #endif /* _ASM_SPARC64_DEVICE_H */ | ||
diff --git a/include/asm-sparc64/ebus.h b/include/asm-sparc64/ebus.h index a4afe9d5703a..9c1c6db2a790 100644 --- a/include/asm-sparc64/ebus.h +++ b/include/asm-sparc64/ebus.h | |||
@@ -8,7 +8,6 @@ | |||
8 | #ifndef __SPARC64_EBUS_H | 8 | #ifndef __SPARC64_EBUS_H |
9 | #define __SPARC64_EBUS_H | 9 | #define __SPARC64_EBUS_H |
10 | 10 | ||
11 | #include <asm/pbm.h> | ||
12 | #include <asm/oplib.h> | 11 | #include <asm/oplib.h> |
13 | #include <asm/prom.h> | 12 | #include <asm/prom.h> |
14 | #include <asm/of_device.h> | 13 | #include <asm/of_device.h> |
@@ -41,7 +40,6 @@ struct linux_ebus { | |||
41 | struct of_device ofdev; | 40 | struct of_device ofdev; |
42 | struct linux_ebus *next; | 41 | struct linux_ebus *next; |
43 | struct linux_ebus_device *devices; | 42 | struct linux_ebus_device *devices; |
44 | struct pci_pbm_info *parent; | ||
45 | struct pci_dev *self; | 43 | struct pci_dev *self; |
46 | int index; | 44 | int index; |
47 | int is_rio; | 45 | int is_rio; |
diff --git a/include/asm-sparc64/floppy.h b/include/asm-sparc64/floppy.h index 331013a0053e..4aa0925e1b1b 100644 --- a/include/asm-sparc64/floppy.h +++ b/include/asm-sparc64/floppy.h | |||
@@ -549,7 +549,7 @@ static int __init ebus_fdthree_p(struct linux_ebus_device *edev) | |||
549 | if (!strcmp(edev->prom_node->name, "fdthree")) | 549 | if (!strcmp(edev->prom_node->name, "fdthree")) |
550 | return 1; | 550 | return 1; |
551 | if (!strcmp(edev->prom_node->name, "floppy")) { | 551 | if (!strcmp(edev->prom_node->name, "floppy")) { |
552 | char *compat; | 552 | const char *compat; |
553 | 553 | ||
554 | compat = of_get_property(edev->prom_node, | 554 | compat = of_get_property(edev->prom_node, |
555 | "compatible", NULL); | 555 | "compatible", NULL); |
@@ -661,7 +661,7 @@ static unsigned long __init sun_floppy_init(void) | |||
661 | struct linux_ebus_device *edev = NULL; | 661 | struct linux_ebus_device *edev = NULL; |
662 | unsigned long config = 0; | 662 | unsigned long config = 0; |
663 | void __iomem *auxio_reg; | 663 | void __iomem *auxio_reg; |
664 | char *state_prop; | 664 | const char *state_prop; |
665 | 665 | ||
666 | for_each_ebus(ebus) { | 666 | for_each_ebus(ebus) { |
667 | for_each_ebusdev(edev, ebus) { | 667 | for_each_ebusdev(edev, ebus) { |
diff --git a/include/asm-sparc64/io.h b/include/asm-sparc64/io.h index 30b912d8e8bc..ad595b679842 100644 --- a/include/asm-sparc64/io.h +++ b/include/asm-sparc64/io.h | |||
@@ -24,14 +24,6 @@ extern unsigned long kern_base, kern_size; | |||
24 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) | 24 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) |
25 | #define BIO_VMERGE_BOUNDARY 8192 | 25 | #define BIO_VMERGE_BOUNDARY 8192 |
26 | 26 | ||
27 | /* Different PCI controllers we support have their PCI MEM space | ||
28 | * mapped to an either 2GB (Psycho) or 4GB (Sabre) aligned area, | ||
29 | * so need to chop off the top 33 or 32 bits. | ||
30 | */ | ||
31 | extern unsigned long pci_memspace_mask; | ||
32 | |||
33 | #define bus_dvma_to_mem(__vaddr) ((__vaddr) & pci_memspace_mask) | ||
34 | |||
35 | static __inline__ u8 _inb(unsigned long addr) | 27 | static __inline__ u8 _inb(unsigned long addr) |
36 | { | 28 | { |
37 | u8 ret; | 29 | u8 ret; |
diff --git a/include/asm-sparc64/iommu.h b/include/asm-sparc64/iommu.h index 0de7a3da79cd..e199594a1e9b 100644 --- a/include/asm-sparc64/iommu.h +++ b/include/asm-sparc64/iommu.h | |||
@@ -7,15 +7,50 @@ | |||
7 | #define _SPARC64_IOMMU_H | 7 | #define _SPARC64_IOMMU_H |
8 | 8 | ||
9 | /* The format of an iopte in the page tables. */ | 9 | /* The format of an iopte in the page tables. */ |
10 | #define IOPTE_VALID 0x8000000000000000UL /* IOPTE is valid */ | 10 | #define IOPTE_VALID 0x8000000000000000UL |
11 | #define IOPTE_64K 0x2000000000000000UL /* IOPTE is for 64k page */ | 11 | #define IOPTE_64K 0x2000000000000000UL |
12 | #define IOPTE_STBUF 0x1000000000000000UL /* DVMA can use streaming buffer */ | 12 | #define IOPTE_STBUF 0x1000000000000000UL |
13 | #define IOPTE_INTRA 0x0800000000000000UL /* SBUS slot-->slot direct transfer*/ | 13 | #define IOPTE_INTRA 0x0800000000000000UL |
14 | #define IOPTE_CONTEXT 0x07ff800000000000UL /* Context number */ | 14 | #define IOPTE_CONTEXT 0x07ff800000000000UL |
15 | #define IOPTE_PAGE 0x00007fffffffe000UL /* Physical page number (PA[42:13])*/ | 15 | #define IOPTE_PAGE 0x00007fffffffe000UL |
16 | #define IOPTE_CACHE 0x0000000000000010UL /* Cached (in UPA E-cache) */ | 16 | #define IOPTE_CACHE 0x0000000000000010UL |
17 | #define IOPTE_WRITE 0x0000000000000002UL /* Writeable */ | 17 | #define IOPTE_WRITE 0x0000000000000002UL |
18 | 18 | ||
19 | #define IOMMU_NUM_CTXS 4096 | 19 | #define IOMMU_NUM_CTXS 4096 |
20 | 20 | ||
21 | struct iommu_arena { | ||
22 | unsigned long *map; | ||
23 | unsigned int hint; | ||
24 | unsigned int limit; | ||
25 | }; | ||
26 | |||
27 | struct iommu { | ||
28 | spinlock_t lock; | ||
29 | struct iommu_arena arena; | ||
30 | iopte_t *page_table; | ||
31 | u32 page_table_map_base; | ||
32 | unsigned long iommu_control; | ||
33 | unsigned long iommu_tsbbase; | ||
34 | unsigned long iommu_flush; | ||
35 | unsigned long iommu_ctxflush; | ||
36 | unsigned long write_complete_reg; | ||
37 | unsigned long dummy_page; | ||
38 | unsigned long dummy_page_pa; | ||
39 | unsigned long ctx_lowest_free; | ||
40 | DECLARE_BITMAP(ctx_bitmap, IOMMU_NUM_CTXS); | ||
41 | u32 dma_addr_mask; | ||
42 | }; | ||
43 | |||
44 | struct strbuf { | ||
45 | int strbuf_enabled; | ||
46 | unsigned long strbuf_control; | ||
47 | unsigned long strbuf_pflush; | ||
48 | unsigned long strbuf_fsync; | ||
49 | unsigned long strbuf_ctxflush; | ||
50 | unsigned long strbuf_ctxmatch_base; | ||
51 | unsigned long strbuf_flushflag_pa; | ||
52 | volatile unsigned long *strbuf_flushflag; | ||
53 | volatile unsigned long __flushflag_buf[(64+(64-1)) / sizeof(long)]; | ||
54 | }; | ||
55 | |||
21 | #endif /* !(_SPARC_IOMMU_H) */ | 56 | #endif /* !(_SPARC_IOMMU_H) */ |
diff --git a/include/asm-sparc64/isa.h b/include/asm-sparc64/isa.h index d9728b9031fc..ecd9290f78d4 100644 --- a/include/asm-sparc64/isa.h +++ b/include/asm-sparc64/isa.h | |||
@@ -7,7 +7,6 @@ | |||
7 | #ifndef __SPARC64_ISA_H | 7 | #ifndef __SPARC64_ISA_H |
8 | #define __SPARC64_ISA_H | 8 | #define __SPARC64_ISA_H |
9 | 9 | ||
10 | #include <asm/pbm.h> | ||
11 | #include <asm/oplib.h> | 10 | #include <asm/oplib.h> |
12 | #include <asm/prom.h> | 11 | #include <asm/prom.h> |
13 | #include <asm/of_device.h> | 12 | #include <asm/of_device.h> |
@@ -29,7 +28,6 @@ struct sparc_isa_bridge { | |||
29 | struct of_device ofdev; | 28 | struct of_device ofdev; |
30 | struct sparc_isa_bridge *next; | 29 | struct sparc_isa_bridge *next; |
31 | struct sparc_isa_device *devices; | 30 | struct sparc_isa_device *devices; |
32 | struct pci_pbm_info *parent; | ||
33 | struct pci_dev *self; | 31 | struct pci_dev *self; |
34 | int index; | 32 | int index; |
35 | struct device_node *prom_node; | 33 | struct device_node *prom_node; |
diff --git a/include/asm-sparc64/parport.h b/include/asm-sparc64/parport.h index 284dfd01a33d..6340a5253a34 100644 --- a/include/asm-sparc64/parport.h +++ b/include/asm-sparc64/parport.h | |||
@@ -103,7 +103,7 @@ static int ebus_ecpp_p(struct linux_ebus_device *edev) | |||
103 | if (!strcmp(edev->prom_node->name, "ecpp")) | 103 | if (!strcmp(edev->prom_node->name, "ecpp")) |
104 | return 1; | 104 | return 1; |
105 | if (!strcmp(edev->prom_node->name, "parallel")) { | 105 | if (!strcmp(edev->prom_node->name, "parallel")) { |
106 | char *compat; | 106 | const char *compat; |
107 | 107 | ||
108 | compat = of_get_property(edev->prom_node, | 108 | compat = of_get_property(edev->prom_node, |
109 | "compatible", NULL); | 109 | "compatible", NULL); |
diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h index 7a246d8a1828..c008cecca149 100644 --- a/include/asm-sparc64/pbm.h +++ b/include/asm-sparc64/pbm.h | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: pbm.h,v 1.27 2001/08/12 13:18:23 davem Exp $ | 1 | /* pbm.h: UltraSparc PCI controller software state. |
2 | * pbm.h: UltraSparc PCI controller software state. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com) | 3 | * Copyright (C) 1997, 1998, 1999, 2007 David S. Miller (davem@davemloft.net) |
5 | */ | 4 | */ |
6 | 5 | ||
7 | #ifndef __SPARC64_PBM_H | 6 | #ifndef __SPARC64_PBM_H |
@@ -30,90 +29,7 @@ | |||
30 | * PCI bus. | 29 | * PCI bus. |
31 | */ | 30 | */ |
32 | 31 | ||
33 | struct pci_controller_info; | 32 | extern void pci_iommu_table_init(struct iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask); |
34 | |||
35 | /* This contains the software state necessary to drive a PCI | ||
36 | * controller's IOMMU. | ||
37 | */ | ||
38 | struct pci_iommu_arena { | ||
39 | unsigned long *map; | ||
40 | unsigned int hint; | ||
41 | unsigned int limit; | ||
42 | }; | ||
43 | |||
44 | struct pci_iommu { | ||
45 | /* This protects the controller's IOMMU and all | ||
46 | * streaming buffers underneath. | ||
47 | */ | ||
48 | spinlock_t lock; | ||
49 | |||
50 | struct pci_iommu_arena arena; | ||
51 | |||
52 | /* IOMMU page table, a linear array of ioptes. */ | ||
53 | iopte_t *page_table; /* The page table itself. */ | ||
54 | |||
55 | /* Base PCI memory space address where IOMMU mappings | ||
56 | * begin. | ||
57 | */ | ||
58 | u32 page_table_map_base; | ||
59 | |||
60 | /* IOMMU Controller Registers */ | ||
61 | unsigned long iommu_control; /* IOMMU control register */ | ||
62 | unsigned long iommu_tsbbase; /* IOMMU page table base register */ | ||
63 | unsigned long iommu_flush; /* IOMMU page flush register */ | ||
64 | unsigned long iommu_ctxflush; /* IOMMU context flush register */ | ||
65 | |||
66 | /* This is a register in the PCI controller, which if | ||
67 | * read will have no side-effects but will guarantee | ||
68 | * completion of all previous writes into IOMMU/STC. | ||
69 | */ | ||
70 | unsigned long write_complete_reg; | ||
71 | |||
72 | /* In order to deal with some buggy third-party PCI bridges that | ||
73 | * do wrong prefetching, we never mark valid mappings as invalid. | ||
74 | * Instead we point them at this dummy page. | ||
75 | */ | ||
76 | unsigned long dummy_page; | ||
77 | unsigned long dummy_page_pa; | ||
78 | |||
79 | /* CTX allocation. */ | ||
80 | unsigned long ctx_lowest_free; | ||
81 | unsigned long ctx_bitmap[IOMMU_NUM_CTXS / (sizeof(unsigned long) * 8)]; | ||
82 | |||
83 | /* Here a PCI controller driver describes the areas of | ||
84 | * PCI memory space where DMA to/from physical memory | ||
85 | * are addressed. Drivers interrogate the PCI layer | ||
86 | * if their device has addressing limitations. They | ||
87 | * do so via pci_dma_supported, and pass in a mask of | ||
88 | * DMA address bits their device can actually drive. | ||
89 | * | ||
90 | * The test for being usable is: | ||
91 | * (device_mask & dma_addr_mask) == dma_addr_mask | ||
92 | */ | ||
93 | u32 dma_addr_mask; | ||
94 | }; | ||
95 | |||
96 | extern void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask); | ||
97 | |||
98 | /* This describes a PCI bus module's streaming buffer. */ | ||
99 | struct pci_strbuf { | ||
100 | int strbuf_enabled; /* Present and using it? */ | ||
101 | |||
102 | /* Streaming Buffer Control Registers */ | ||
103 | unsigned long strbuf_control; /* STC control register */ | ||
104 | unsigned long strbuf_pflush; /* STC page flush register */ | ||
105 | unsigned long strbuf_fsync; /* STC flush synchronization reg */ | ||
106 | unsigned long strbuf_ctxflush; /* STC context flush register */ | ||
107 | unsigned long strbuf_ctxmatch_base; /* STC context flush match reg */ | ||
108 | unsigned long strbuf_flushflag_pa; /* Physical address of flush flag */ | ||
109 | volatile unsigned long *strbuf_flushflag; /* The flush flag itself */ | ||
110 | |||
111 | /* And this is the actual flush flag area. | ||
112 | * We allocate extra because the chips require | ||
113 | * a 64-byte aligned area. | ||
114 | */ | ||
115 | volatile unsigned long __flushflag_buf[(64 + (64 - 1)) / sizeof(long)]; | ||
116 | }; | ||
117 | 33 | ||
118 | #define PCI_STC_FLUSHFLAG_INIT(STC) \ | 34 | #define PCI_STC_FLUSHFLAG_INIT(STC) \ |
119 | (*((STC)->strbuf_flushflag) = 0UL) | 35 | (*((STC)->strbuf_flushflag) = 0UL) |
@@ -126,6 +42,8 @@ struct pci_strbuf { | |||
126 | #define PROM_PCIRNG_MAX 64 | 42 | #define PROM_PCIRNG_MAX 64 |
127 | #define PROM_PCIIMAP_MAX 64 | 43 | #define PROM_PCIIMAP_MAX 64 |
128 | 44 | ||
45 | struct pci_controller_info; | ||
46 | |||
129 | struct pci_pbm_info { | 47 | struct pci_pbm_info { |
130 | /* PCI controller we sit under. */ | 48 | /* PCI controller we sit under. */ |
131 | struct pci_controller_info *parent; | 49 | struct pci_controller_info *parent; |
@@ -160,11 +78,6 @@ struct pci_pbm_info { | |||
160 | 78 | ||
161 | /* OBP specific information. */ | 79 | /* OBP specific information. */ |
162 | struct device_node *prom_node; | 80 | struct device_node *prom_node; |
163 | struct linux_prom_pci_ranges *pbm_ranges; | ||
164 | int num_pbm_ranges; | ||
165 | struct linux_prom_pci_intmap *pbm_intmap; | ||
166 | int num_pbm_intmap; | ||
167 | struct linux_prom_pci_intmask *pbm_intmask; | ||
168 | u64 ino_bitmap; | 81 | u64 ino_bitmap; |
169 | 82 | ||
170 | /* PBM I/O and Memory space resources. */ | 83 | /* PBM I/O and Memory space resources. */ |
@@ -197,13 +110,10 @@ struct pci_pbm_info { | |||
197 | #endif /* !(CONFIG_PCI_MSI) */ | 110 | #endif /* !(CONFIG_PCI_MSI) */ |
198 | 111 | ||
199 | /* This PBM's streaming buffer. */ | 112 | /* This PBM's streaming buffer. */ |
200 | struct pci_strbuf stc; | 113 | struct strbuf stc; |
201 | 114 | ||
202 | /* IOMMU state, potentially shared by both PBM segments. */ | 115 | /* IOMMU state, potentially shared by both PBM segments. */ |
203 | struct pci_iommu *iommu; | 116 | struct iommu *iommu; |
204 | |||
205 | /* PCI slot mapping. */ | ||
206 | unsigned int pci_first_slot; | ||
207 | 117 | ||
208 | /* Now things for the actual PCI bus probes. */ | 118 | /* Now things for the actual PCI bus probes. */ |
209 | unsigned int pci_first_busno; | 119 | unsigned int pci_first_busno; |
@@ -220,17 +130,12 @@ struct pci_controller_info { | |||
220 | */ | 130 | */ |
221 | int index; | 131 | int index; |
222 | 132 | ||
223 | /* Do the PBMs both exist in the same PCI domain? */ | ||
224 | int pbms_same_domain; | ||
225 | |||
226 | /* The PCI bus modules controlled by us. */ | 133 | /* The PCI bus modules controlled by us. */ |
227 | struct pci_pbm_info pbm_A; | 134 | struct pci_pbm_info pbm_A; |
228 | struct pci_pbm_info pbm_B; | 135 | struct pci_pbm_info pbm_B; |
229 | 136 | ||
230 | /* Operations which are controller specific. */ | 137 | /* Operations which are controller specific. */ |
231 | void (*scan_bus)(struct pci_controller_info *); | 138 | void (*scan_bus)(struct pci_controller_info *); |
232 | void (*base_address_update)(struct pci_dev *, int); | ||
233 | void (*resource_adjust)(struct pci_dev *, struct resource *, struct resource *); | ||
234 | 139 | ||
235 | #ifdef CONFIG_PCI_MSI | 140 | #ifdef CONFIG_PCI_MSI |
236 | int (*setup_msi_irq)(unsigned int *virt_irq_p, struct pci_dev *pdev, | 141 | int (*setup_msi_irq)(unsigned int *virt_irq_p, struct pci_dev *pdev, |
@@ -244,27 +149,4 @@ struct pci_controller_info { | |||
244 | unsigned int pci_last_busno; | 149 | unsigned int pci_last_busno; |
245 | }; | 150 | }; |
246 | 151 | ||
247 | /* PCI devices which are not bridges have this placed in their pci_dev | ||
248 | * sysdata member. This makes OBP aware PCI device drivers easier to | ||
249 | * code. | ||
250 | */ | ||
251 | struct pcidev_cookie { | ||
252 | struct pci_pbm_info *pbm; | ||
253 | struct device_node *prom_node; | ||
254 | struct of_device *op; | ||
255 | struct linux_prom_pci_registers prom_regs[PROMREG_MAX]; | ||
256 | int num_prom_regs; | ||
257 | struct linux_prom_pci_registers prom_assignments[PROMREG_MAX]; | ||
258 | int num_prom_assignments; | ||
259 | #ifdef CONFIG_PCI_MSI | ||
260 | unsigned int msi_num; | ||
261 | #endif | ||
262 | }; | ||
263 | |||
264 | /* Currently these are the same across all PCI controllers | ||
265 | * we support. Someday they may not be... | ||
266 | */ | ||
267 | #define PCI_IRQ_IGN 0x000007c0 /* Interrupt Group Number */ | ||
268 | #define PCI_IRQ_INO 0x0000003f /* Interrupt Number */ | ||
269 | |||
270 | #endif /* !(__SPARC64_PBM_H) */ | 152 | #endif /* !(__SPARC64_PBM_H) */ |
diff --git a/include/asm-sparc64/pci.h b/include/asm-sparc64/pci.h index b14a725b430d..47cea16e1bad 100644 --- a/include/asm-sparc64/pci.h +++ b/include/asm-sparc64/pci.h | |||
@@ -54,7 +54,7 @@ struct pci_iommu_ops { | |||
54 | void (*dma_sync_sg_for_cpu)(struct pci_dev *, struct scatterlist *, int, int); | 54 | void (*dma_sync_sg_for_cpu)(struct pci_dev *, struct scatterlist *, int, int); |
55 | }; | 55 | }; |
56 | 56 | ||
57 | extern struct pci_iommu_ops *pci_iommu_ops; | 57 | extern const struct pci_iommu_ops *pci_iommu_ops; |
58 | 58 | ||
59 | /* Allocate and map kernel buffer using consistent mode DMA for a device. | 59 | /* Allocate and map kernel buffer using consistent mode DMA for a device. |
60 | * hwdev should be valid struct pci_dev pointer for PCI devices. | 60 | * hwdev should be valid struct pci_dev pointer for PCI devices. |
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index b12be7a869f6..46705ef47d27 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h | |||
@@ -737,20 +737,6 @@ extern unsigned long pte_file(pte_t); | |||
737 | extern pte_t pgoff_to_pte(unsigned long); | 737 | extern pte_t pgoff_to_pte(unsigned long); |
738 | #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) | 738 | #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) |
739 | 739 | ||
740 | extern unsigned long prom_virt_to_phys(unsigned long, int *); | ||
741 | |||
742 | extern unsigned long sun4u_get_pte(unsigned long); | ||
743 | |||
744 | static inline unsigned long __get_phys(unsigned long addr) | ||
745 | { | ||
746 | return sun4u_get_pte(addr); | ||
747 | } | ||
748 | |||
749 | static inline int __get_iospace(unsigned long addr) | ||
750 | { | ||
751 | return ((sun4u_get_pte(addr) & 0xf0000000) >> 28); | ||
752 | } | ||
753 | |||
754 | extern unsigned long *sparc64_valid_addr_bitmap; | 740 | extern unsigned long *sparc64_valid_addr_bitmap; |
755 | 741 | ||
756 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ | 742 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ |
@@ -791,6 +777,8 @@ extern void pgtable_cache_init(void); | |||
791 | extern void sun4v_register_fault_status(void); | 777 | extern void sun4v_register_fault_status(void); |
792 | extern void sun4v_ktsb_register(void); | 778 | extern void sun4v_ktsb_register(void); |
793 | 779 | ||
780 | extern unsigned long cmdline_memory_size; | ||
781 | |||
794 | #endif /* !(__ASSEMBLY__) */ | 782 | #endif /* !(__ASSEMBLY__) */ |
795 | 783 | ||
796 | #endif /* !(_SPARC64_PGTABLE_H) */ | 784 | #endif /* !(_SPARC64_PGTABLE_H) */ |
diff --git a/include/asm-sparc64/prom.h b/include/asm-sparc64/prom.h index 0eca2d98627f..ddad5f99ac7f 100644 --- a/include/asm-sparc64/prom.h +++ b/include/asm-sparc64/prom.h | |||
@@ -36,8 +36,8 @@ struct property { | |||
36 | 36 | ||
37 | struct of_irq_controller; | 37 | struct of_irq_controller; |
38 | struct device_node { | 38 | struct device_node { |
39 | char *name; | 39 | const char *name; |
40 | char *type; | 40 | const char *type; |
41 | phandle node; | 41 | phandle node; |
42 | char *path_component_name; | 42 | char *path_component_name; |
43 | char *full_name; | 43 | char *full_name; |
@@ -93,11 +93,13 @@ extern struct device_node *of_find_node_by_phandle(phandle handle); | |||
93 | extern struct device_node *of_get_parent(const struct device_node *node); | 93 | extern struct device_node *of_get_parent(const struct device_node *node); |
94 | extern struct device_node *of_get_next_child(const struct device_node *node, | 94 | extern struct device_node *of_get_next_child(const struct device_node *node, |
95 | struct device_node *prev); | 95 | struct device_node *prev); |
96 | extern struct property *of_find_property(struct device_node *np, | 96 | extern struct property *of_find_property(const struct device_node *np, |
97 | const char *name, | 97 | const char *name, |
98 | int *lenp); | 98 | int *lenp); |
99 | extern int of_device_is_compatible(struct device_node *device, const char *); | 99 | extern int of_device_is_compatible(const struct device_node *device, |
100 | extern void *of_get_property(struct device_node *node, const char *name, | 100 | const char *); |
101 | extern const void *of_get_property(const struct device_node *node, | ||
102 | const char *name, | ||
101 | int *lenp); | 103 | int *lenp); |
102 | #define get_property(node,name,lenp) of_get_property(node,name,lenp) | 104 | #define get_property(node,name,lenp) of_get_property(node,name,lenp) |
103 | extern int of_set_property(struct device_node *node, const char *name, void *val, int len); | 105 | extern int of_set_property(struct device_node *node, const char *name, void *val, int len); |
diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h index 388249b751c3..cca54804b722 100644 --- a/include/asm-sparc64/smp.h +++ b/include/asm-sparc64/smp.h | |||
@@ -42,15 +42,15 @@ extern int hard_smp_processor_id(void); | |||
42 | #define raw_smp_processor_id() (current_thread_info()->cpu) | 42 | #define raw_smp_processor_id() (current_thread_info()->cpu) |
43 | 43 | ||
44 | extern void smp_setup_cpu_possible_map(void); | 44 | extern void smp_setup_cpu_possible_map(void); |
45 | extern unsigned char boot_cpu_id; | ||
45 | 46 | ||
46 | #endif /* !(__ASSEMBLY__) */ | 47 | #endif /* !(__ASSEMBLY__) */ |
47 | 48 | ||
48 | #else | 49 | #else |
49 | 50 | ||
50 | #define smp_setup_cpu_possible_map() do { } while (0) | 51 | #define smp_setup_cpu_possible_map() do { } while (0) |
52 | #define boot_cpu_id (0) | ||
51 | 53 | ||
52 | #endif /* !(CONFIG_SMP) */ | 54 | #endif /* !(CONFIG_SMP) */ |
53 | 55 | ||
54 | #define NO_PROC_ID 0xFF | ||
55 | |||
56 | #endif /* !(_SPARC64_SMP_H) */ | 56 | #endif /* !(_SPARC64_SMP_H) */ |
diff --git a/include/asm-sparc64/sparsemem.h b/include/asm-sparc64/sparsemem.h index ed5c9d8541e2..77bcd2bfa53c 100644 --- a/include/asm-sparc64/sparsemem.h +++ b/include/asm-sparc64/sparsemem.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
6 | #define SECTION_SIZE_BITS 26 | 6 | #define SECTION_SIZE_BITS 31 |
7 | #define MAX_PHYSADDR_BITS 42 | 7 | #define MAX_PHYSADDR_BITS 42 |
8 | #define MAX_PHYSMEM_BITS 42 | 8 | #define MAX_PHYSMEM_BITS 42 |
9 | 9 | ||
diff --git a/include/asm-sparc64/timer.h b/include/asm-sparc64/timer.h index d435594df786..ccbd69448866 100644 --- a/include/asm-sparc64/timer.h +++ b/include/asm-sparc64/timer.h | |||
@@ -11,22 +11,19 @@ | |||
11 | 11 | ||
12 | 12 | ||
13 | struct sparc64_tick_ops { | 13 | struct sparc64_tick_ops { |
14 | void (*init_tick)(unsigned long); | ||
15 | unsigned long (*get_tick)(void); | 14 | unsigned long (*get_tick)(void); |
16 | unsigned long (*get_compare)(void); | 15 | int (*add_compare)(unsigned long); |
17 | unsigned long (*add_tick)(unsigned long, unsigned long); | ||
18 | unsigned long (*add_compare)(unsigned long); | ||
19 | unsigned long softint_mask; | 16 | unsigned long softint_mask; |
17 | void (*disable_irq)(void); | ||
18 | |||
19 | void (*init_tick)(void); | ||
20 | unsigned long (*add_tick)(unsigned long); | ||
21 | |||
22 | char *name; | ||
20 | }; | 23 | }; |
21 | 24 | ||
22 | extern struct sparc64_tick_ops *tick_ops; | 25 | extern struct sparc64_tick_ops *tick_ops; |
23 | 26 | ||
24 | #ifdef CONFIG_SMP | ||
25 | extern unsigned long timer_tick_offset; | ||
26 | struct pt_regs; | ||
27 | extern void timer_tick_interrupt(struct pt_regs *); | ||
28 | #endif | ||
29 | |||
30 | extern unsigned long sparc64_get_clock_tick(unsigned int cpu); | 27 | extern unsigned long sparc64_get_clock_tick(unsigned int cpu); |
31 | 28 | ||
32 | #endif /* _SPARC64_TIMER_H */ | 29 | #endif /* _SPARC64_TIMER_H */ |
diff --git a/include/asm-sparc64/ttable.h b/include/asm-sparc64/ttable.h index c2a16e188499..bbb9c8f13d61 100644 --- a/include/asm-sparc64/ttable.h +++ b/include/asm-sparc64/ttable.h | |||
@@ -157,23 +157,6 @@ | |||
157 | ba,a,pt %xcc, rtrap_irq; \ | 157 | ba,a,pt %xcc, rtrap_irq; \ |
158 | .previous; | 158 | .previous; |
159 | 159 | ||
160 | #define TICK_SMP_IRQ \ | ||
161 | rdpr %pil, %g2; \ | ||
162 | wrpr %g0, 15, %pil; \ | ||
163 | sethi %hi(1f-4), %g7; \ | ||
164 | ba,pt %xcc, etrap_irq; \ | ||
165 | or %g7, %lo(1f-4), %g7; \ | ||
166 | nop; \ | ||
167 | nop; \ | ||
168 | nop; \ | ||
169 | .subsection 2; \ | ||
170 | 1: call trace_hardirqs_off; \ | ||
171 | nop; \ | ||
172 | call smp_percpu_timer_interrupt; \ | ||
173 | add %sp, PTREGS_OFF, %o0; \ | ||
174 | ba,a,pt %xcc, rtrap_irq; \ | ||
175 | .previous; | ||
176 | |||
177 | #else | 160 | #else |
178 | 161 | ||
179 | #define TRAP_IRQ(routine, level) \ | 162 | #define TRAP_IRQ(routine, level) \ |
@@ -186,16 +169,6 @@ | |||
186 | add %sp, PTREGS_OFF, %o1; \ | 169 | add %sp, PTREGS_OFF, %o1; \ |
187 | ba,a,pt %xcc, rtrap_irq; | 170 | ba,a,pt %xcc, rtrap_irq; |
188 | 171 | ||
189 | #define TICK_SMP_IRQ \ | ||
190 | rdpr %pil, %g2; \ | ||
191 | wrpr %g0, 15, %pil; \ | ||
192 | sethi %hi(109f), %g7; \ | ||
193 | ba,pt %xcc, etrap_irq; \ | ||
194 | 109: or %g7, %lo(109b), %g7; \ | ||
195 | call smp_percpu_timer_interrupt; \ | ||
196 | add %sp, PTREGS_OFF, %o0; \ | ||
197 | ba,a,pt %xcc, rtrap_irq; | ||
198 | |||
199 | #endif | 172 | #endif |
200 | 173 | ||
201 | #define TRAP_IVEC TRAP_NOSAVE(do_ivec) | 174 | #define TRAP_IVEC TRAP_NOSAVE(do_ivec) |
diff --git a/include/linux/string.h b/include/linux/string.h index 4f69ef9e6eb5..7f2eb6a477f9 100644 --- a/include/linux/string.h +++ b/include/linux/string.h | |||
@@ -47,6 +47,12 @@ extern int strncmp(const char *,const char *,__kernel_size_t); | |||
47 | #ifndef __HAVE_ARCH_STRNICMP | 47 | #ifndef __HAVE_ARCH_STRNICMP |
48 | extern int strnicmp(const char *, const char *, __kernel_size_t); | 48 | extern int strnicmp(const char *, const char *, __kernel_size_t); |
49 | #endif | 49 | #endif |
50 | #ifndef __HAVE_ARCH_STRCASECMP | ||
51 | extern int strcasecmp(const char *s1, const char *s2); | ||
52 | #endif | ||
53 | #ifndef __HAVE_ARCH_STRNCASECMP | ||
54 | extern int strncasecmp(const char *s1, const char *s2, size_t n); | ||
55 | #endif | ||
50 | #ifndef __HAVE_ARCH_STRCHR | 56 | #ifndef __HAVE_ARCH_STRCHR |
51 | extern char * strchr(const char *,int); | 57 | extern char * strchr(const char *,int); |
52 | #endif | 58 | #endif |
diff --git a/lib/string.c b/lib/string.c index bab440fb0dfc..5efafed3d6b6 100644 --- a/lib/string.c +++ b/lib/string.c | |||
@@ -60,6 +60,34 @@ int strnicmp(const char *s1, const char *s2, size_t len) | |||
60 | EXPORT_SYMBOL(strnicmp); | 60 | EXPORT_SYMBOL(strnicmp); |
61 | #endif | 61 | #endif |
62 | 62 | ||
63 | #ifndef __HAVE_ARCH_STRCASECMP | ||
64 | int strcasecmp(const char *s1, const char *s2) | ||
65 | { | ||
66 | int c1, c2; | ||
67 | |||
68 | do { | ||
69 | c1 = tolower(*s1++); | ||
70 | c2 = tolower(*s2++); | ||
71 | } while (c1 == c2 && c1 != 0); | ||
72 | return c1 - c2; | ||
73 | } | ||
74 | EXPORT_SYMBOL(strcasecmp); | ||
75 | #endif | ||
76 | |||
77 | #ifndef __HAVE_ARCH_STRNCASECMP | ||
78 | int strncasecmp(const char *s1, const char *s2, size_t n) | ||
79 | { | ||
80 | int c1, c2; | ||
81 | |||
82 | do { | ||
83 | c1 = tolower(*s1++); | ||
84 | c2 = tolower(*s2++); | ||
85 | } while ((--n > 0) && c1 == c2 && c1 != 0); | ||
86 | return c1 - c2; | ||
87 | } | ||
88 | EXPORT_SYMBOL(strncasecmp); | ||
89 | #endif | ||
90 | |||
63 | #ifndef __HAVE_ARCH_STRCPY | 91 | #ifndef __HAVE_ARCH_STRCPY |
64 | /** | 92 | /** |
65 | * strcpy - Copy a %NUL terminated string | 93 | * strcpy - Copy a %NUL terminated string |
diff --git a/sound/sparc/amd7930.c b/sound/sparc/amd7930.c index c899786f30f5..07962a35f241 100644 --- a/sound/sparc/amd7930.c +++ b/sound/sparc/amd7930.c | |||
@@ -1067,8 +1067,8 @@ out_err: | |||
1067 | 1067 | ||
1068 | static int __devinit amd7930_obio_attach(struct device_node *dp) | 1068 | static int __devinit amd7930_obio_attach(struct device_node *dp) |
1069 | { | 1069 | { |
1070 | struct linux_prom_registers *regs; | 1070 | const struct linux_prom_registers *regs; |
1071 | struct linux_prom_irqs *irqp; | 1071 | const struct linux_prom_irqs *irqp; |
1072 | struct resource res, *rp; | 1072 | struct resource res, *rp; |
1073 | int len; | 1073 | int len; |
1074 | 1074 | ||
diff --git a/sound/sparc/cs4231.c b/sound/sparc/cs4231.c index f5956d557f70..900a00de35fd 100644 --- a/sound/sparc/cs4231.c +++ b/sound/sparc/cs4231.c | |||
@@ -2284,7 +2284,7 @@ static int __init cs4231_init(void) | |||
2284 | if (!strcmp(edev->prom_node->name, "SUNW,CS4231")) { | 2284 | if (!strcmp(edev->prom_node->name, "SUNW,CS4231")) { |
2285 | match = 1; | 2285 | match = 1; |
2286 | } else if (!strcmp(edev->prom_node->name, "audio")) { | 2286 | } else if (!strcmp(edev->prom_node->name, "audio")) { |
2287 | char *compat; | 2287 | const char *compat; |
2288 | 2288 | ||
2289 | compat = of_get_property(edev->prom_node, | 2289 | compat = of_get_property(edev->prom_node, |
2290 | "compatible", NULL); | 2290 | "compatible", NULL); |