diff options
author | Christoph Hellwig <hch@lst.de> | 2019-08-13 03:25:01 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2019-08-16 14:33:57 -0400 |
commit | cf07cb1ff4ea008abf06c95878c700cf1dd65c3e (patch) | |
tree | 2f2f324bfe52f80bbfee2eeee90e5427ea9346b3 | |
parent | f7bc6e42bf12487182fc442a08eca25d968dc543 (diff) |
ia64: remove support for the SGI SN2 platform
The SGI SN2 (early Altix) is a very non-standard IA64 platform that was
at the very high end of even IA64 hardware, and has been discontinued
a long time ago. Remove it because there no upstream users left, and it
has magic hooks all over the kernel.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lkml.kernel.org/r/20190813072514.23299-16-hch@lst.de
Signed-off-by: Tony Luck <tony.luck@intel.com>
92 files changed, 14 insertions, 20657 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 43f3a3076ab2..ae3aca14506e 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -66,7 +66,6 @@ config 64BIT | |||
66 | 66 | ||
67 | config ZONE_DMA32 | 67 | config ZONE_DMA32 |
68 | def_bool y | 68 | def_bool y |
69 | depends on !IA64_SGI_SN2 | ||
70 | 69 | ||
71 | config QUICKLIST | 70 | config QUICKLIST |
72 | bool | 71 | bool |
@@ -140,7 +139,6 @@ config IA64_GENERIC | |||
140 | DIG+Intel+IOMMU For DIG systems with Intel IOMMU | 139 | DIG+Intel+IOMMU For DIG systems with Intel IOMMU |
141 | HP-zx1/sx1000 For HP systems | 140 | HP-zx1/sx1000 For HP systems |
142 | HP-zx1/sx1000+swiotlb For HP systems with (broken) DMA-constrained devices. | 141 | HP-zx1/sx1000+swiotlb For HP systems with (broken) DMA-constrained devices. |
143 | SGI-SN2 For SGI Altix systems | ||
144 | SGI-UV For SGI UV systems | 142 | SGI-UV For SGI UV systems |
145 | Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/> | 143 | Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/> |
146 | 144 | ||
@@ -171,17 +169,6 @@ config IA64_HP_ZX1_SWIOTLB | |||
171 | I/O TLB, which allows supporting the broken devices at the expense of | 169 | I/O TLB, which allows supporting the broken devices at the expense of |
172 | wasting some kernel memory (about 2MB by default). | 170 | wasting some kernel memory (about 2MB by default). |
173 | 171 | ||
174 | config IA64_SGI_SN2 | ||
175 | bool "SGI-SN2" | ||
176 | select NUMA | ||
177 | select ACPI_NUMA | ||
178 | help | ||
179 | Selecting this option will optimize the kernel for use on sn2 based | ||
180 | systems, but the resulting kernel binary will not run on other | ||
181 | types of ia64 systems. If you have an SGI Altix system, it's safe | ||
182 | to select this option. If in doubt, select ia64 generic support | ||
183 | instead. | ||
184 | |||
185 | config IA64_SGI_UV | 172 | config IA64_SGI_UV |
186 | bool "SGI-UV" | 173 | bool "SGI-UV" |
187 | select NUMA | 174 | select NUMA |
@@ -381,13 +368,12 @@ config ARCH_SPARSEMEM_ENABLE | |||
381 | select SPARSEMEM_VMEMMAP_ENABLE | 368 | select SPARSEMEM_VMEMMAP_ENABLE |
382 | 369 | ||
383 | config ARCH_DISCONTIGMEM_DEFAULT | 370 | config ARCH_DISCONTIGMEM_DEFAULT |
384 | def_bool y if (IA64_SGI_SN2 || IA64_GENERIC || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB) | 371 | def_bool y if (IA64_GENERIC || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB) |
385 | depends on ARCH_DISCONTIGMEM_ENABLE | 372 | depends on ARCH_DISCONTIGMEM_ENABLE |
386 | 373 | ||
387 | config NUMA | 374 | config NUMA |
388 | bool "NUMA support" | 375 | bool "NUMA support" |
389 | depends on !IA64_HP_SIM && !FLATMEM | 376 | depends on !IA64_HP_SIM && !FLATMEM |
390 | default y if IA64_SGI_SN2 | ||
391 | select ACPI_NUMA if ACPI | 377 | select ACPI_NUMA if ACPI |
392 | help | 378 | help |
393 | Say Y to compile the kernel to support NUMA (Non-Uniform Memory | 379 | Say Y to compile the kernel to support NUMA (Non-Uniform Memory |
@@ -472,9 +458,6 @@ config IA64_MC_ERR_INJECT | |||
472 | 458 | ||
473 | If you're unsure, do not select this option. | 459 | If you're unsure, do not select this option. |
474 | 460 | ||
475 | config SGI_SN | ||
476 | def_bool y if (IA64_SGI_SN2 || IA64_GENERIC) | ||
477 | |||
478 | config IA64_ESI | 461 | config IA64_ESI |
479 | bool "ESI (Extensible SAL Interface) support" | 462 | bool "ESI (Extensible SAL Interface) support" |
480 | help | 463 | help |
diff --git a/arch/ia64/Kconfig.debug b/arch/ia64/Kconfig.debug index 1371efc9b005..793a613c54ab 100644 --- a/arch/ia64/Kconfig.debug +++ b/arch/ia64/Kconfig.debug | |||
@@ -14,7 +14,7 @@ config IA64_GRANULE_16MB | |||
14 | 14 | ||
15 | config IA64_GRANULE_64MB | 15 | config IA64_GRANULE_64MB |
16 | bool "64MB" | 16 | bool "64MB" |
17 | depends on !(IA64_GENERIC || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_SGI_SN2) | 17 | depends on !(IA64_GENERIC || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB) |
18 | 18 | ||
19 | endchoice | 19 | endchoice |
20 | 20 | ||
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index 171290f9f1de..0d730b061f72 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile | |||
@@ -49,14 +49,13 @@ core-$(CONFIG_IA64_DIG_VTD) += arch/ia64/dig/ | |||
49 | core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ | 49 | core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ |
50 | core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ | 50 | core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ |
51 | core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/ | 51 | core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/ |
52 | core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/ | ||
53 | core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/ | 52 | core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/ |
54 | 53 | ||
55 | drivers-$(CONFIG_PCI) += arch/ia64/pci/ | 54 | drivers-$(CONFIG_PCI) += arch/ia64/pci/ |
56 | drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ | 55 | drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ |
57 | drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ | 56 | drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ |
58 | drivers-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ | 57 | drivers-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ |
59 | drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ arch/ia64/sn/ arch/ia64/uv/ | 58 | drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ arch/ia64/uv/ |
60 | drivers-$(CONFIG_OPROFILE) += arch/ia64/oprofile/ | 59 | drivers-$(CONFIG_OPROFILE) += arch/ia64/oprofile/ |
61 | 60 | ||
62 | boot := arch/ia64/hp/sim/boot | 61 | boot := arch/ia64/hp/sim/boot |
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index 0ea569040c5b..80c5ef8f475e 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h | |||
@@ -43,8 +43,6 @@ static inline const char *acpi_get_sysname (void) | |||
43 | return "hpzx1"; | 43 | return "hpzx1"; |
44 | # elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB) | 44 | # elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB) |
45 | return "hpzx1_swiotlb"; | 45 | return "hpzx1_swiotlb"; |
46 | # elif defined (CONFIG_IA64_SGI_SN2) | ||
47 | return "sn2"; | ||
48 | # elif defined (CONFIG_IA64_SGI_UV) | 46 | # elif defined (CONFIG_IA64_SGI_UV) |
49 | return "uv"; | 47 | return "uv"; |
50 | # elif defined (CONFIG_IA64_DIG) | 48 | # elif defined (CONFIG_IA64_DIG) |
diff --git a/arch/ia64/include/asm/irq.h b/arch/ia64/include/asm/irq.h index 8b84a55ed38a..5acf52e90872 100644 --- a/arch/ia64/include/asm/irq.h +++ b/arch/ia64/include/asm/irq.h | |||
@@ -28,9 +28,6 @@ irq_canonicalize (int irq) | |||
28 | } | 28 | } |
29 | 29 | ||
30 | extern void set_irq_affinity_info (unsigned int irq, int dest, int redir); | 30 | extern void set_irq_affinity_info (unsigned int irq, int dest, int redir); |
31 | bool is_affinity_mask_valid(const struct cpumask *cpumask); | ||
32 | |||
33 | #define is_affinity_mask_valid is_affinity_mask_valid | ||
34 | 31 | ||
35 | int create_irq(void); | 32 | int create_irq(void); |
36 | void destroy_irq(unsigned int irq); | 33 | void destroy_irq(unsigned int irq); |
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h index beae261fbcb4..d657f59d4fb3 100644 --- a/arch/ia64/include/asm/machvec.h +++ b/arch/ia64/include/asm/machvec.h | |||
@@ -101,8 +101,6 @@ extern void machvec_timer_interrupt (int, void *); | |||
101 | # include <asm/machvec_hpzx1.h> | 101 | # include <asm/machvec_hpzx1.h> |
102 | # elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB) | 102 | # elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB) |
103 | # include <asm/machvec_hpzx1_swiotlb.h> | 103 | # include <asm/machvec_hpzx1_swiotlb.h> |
104 | # elif defined (CONFIG_IA64_SGI_SN2) | ||
105 | # include <asm/machvec_sn2.h> | ||
106 | # elif defined (CONFIG_IA64_SGI_UV) | 104 | # elif defined (CONFIG_IA64_SGI_UV) |
107 | # include <asm/machvec_uv.h> | 105 | # include <asm/machvec_uv.h> |
108 | # elif defined (CONFIG_IA64_GENERIC) | 106 | # elif defined (CONFIG_IA64_GENERIC) |
diff --git a/arch/ia64/include/asm/machvec_sn2.h b/arch/ia64/include/asm/machvec_sn2.h deleted file mode 100644 index a243e4fb4877..000000000000 --- a/arch/ia64/include/asm/machvec_sn2.h +++ /dev/null | |||
@@ -1,114 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2002-2003,2006 Silicon Graphics, Inc. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of version 2 of the GNU General Public License | ||
6 | * as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it would be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
11 | * | ||
12 | * Further, this software is distributed without any warranty that it is | ||
13 | * free of the rightful claim of any third person regarding infringement | ||
14 | * or the like. Any license provided herein, whether implied or | ||
15 | * otherwise, applies only to this software file. Patent licenses, if | ||
16 | * any, provided herein do not apply to combinations of this program with | ||
17 | * other software, or any other product whatsoever. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public | ||
20 | * License along with this program; if not, write the Free Software | ||
21 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. | ||
22 | * | ||
23 | * For further information regarding this notice, see: | ||
24 | * | ||
25 | * http://oss.sgi.com/projects/GenInfo/NoticeExplan | ||
26 | */ | ||
27 | |||
28 | #ifndef _ASM_IA64_MACHVEC_SN2_H | ||
29 | #define _ASM_IA64_MACHVEC_SN2_H | ||
30 | |||
31 | extern ia64_mv_setup_t sn_setup; | ||
32 | extern ia64_mv_cpu_init_t sn_cpu_init; | ||
33 | extern ia64_mv_irq_init_t sn_irq_init; | ||
34 | extern ia64_mv_send_ipi_t sn2_send_IPI; | ||
35 | extern ia64_mv_timer_interrupt_t sn_timer_interrupt; | ||
36 | extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge; | ||
37 | extern ia64_mv_irq_to_vector sn_irq_to_vector; | ||
38 | extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq; | ||
39 | extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem; | ||
40 | extern ia64_mv_pci_legacy_read_t sn_pci_legacy_read; | ||
41 | extern ia64_mv_pci_legacy_write_t sn_pci_legacy_write; | ||
42 | extern ia64_mv_inb_t __sn_inb; | ||
43 | extern ia64_mv_inw_t __sn_inw; | ||
44 | extern ia64_mv_inl_t __sn_inl; | ||
45 | extern ia64_mv_outb_t __sn_outb; | ||
46 | extern ia64_mv_outw_t __sn_outw; | ||
47 | extern ia64_mv_outl_t __sn_outl; | ||
48 | extern ia64_mv_mmiowb_t __sn_mmiowb; | ||
49 | extern ia64_mv_readb_t __sn_readb; | ||
50 | extern ia64_mv_readw_t __sn_readw; | ||
51 | extern ia64_mv_readl_t __sn_readl; | ||
52 | extern ia64_mv_readq_t __sn_readq; | ||
53 | extern ia64_mv_readb_t __sn_readb_relaxed; | ||
54 | extern ia64_mv_readw_t __sn_readw_relaxed; | ||
55 | extern ia64_mv_readl_t __sn_readl_relaxed; | ||
56 | extern ia64_mv_readq_t __sn_readq_relaxed; | ||
57 | extern ia64_mv_dma_init sn_dma_init; | ||
58 | extern ia64_mv_migrate_t sn_migrate; | ||
59 | extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event; | ||
60 | extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq; | ||
61 | extern ia64_mv_teardown_msi_irq_t sn_teardown_msi_irq; | ||
62 | extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus; | ||
63 | |||
64 | |||
65 | /* | ||
66 | * This stuff has dual use! | ||
67 | * | ||
68 | * For a generic kernel, the macros are used to initialize the | ||
69 | * platform's machvec structure. When compiling a non-generic kernel, | ||
70 | * the macros are used directly. | ||
71 | */ | ||
72 | #define ia64_platform_name "sn2" | ||
73 | #define platform_setup sn_setup | ||
74 | #define platform_cpu_init sn_cpu_init | ||
75 | #define platform_irq_init sn_irq_init | ||
76 | #define platform_send_ipi sn2_send_IPI | ||
77 | #define platform_timer_interrupt sn_timer_interrupt | ||
78 | #define platform_global_tlb_purge sn2_global_tlb_purge | ||
79 | #define platform_pci_fixup sn_pci_fixup | ||
80 | #define platform_inb __sn_inb | ||
81 | #define platform_inw __sn_inw | ||
82 | #define platform_inl __sn_inl | ||
83 | #define platform_outb __sn_outb | ||
84 | #define platform_outw __sn_outw | ||
85 | #define platform_outl __sn_outl | ||
86 | #define platform_mmiowb __sn_mmiowb | ||
87 | #define platform_readb __sn_readb | ||
88 | #define platform_readw __sn_readw | ||
89 | #define platform_readl __sn_readl | ||
90 | #define platform_readq __sn_readq | ||
91 | #define platform_readb_relaxed __sn_readb_relaxed | ||
92 | #define platform_readw_relaxed __sn_readw_relaxed | ||
93 | #define platform_readl_relaxed __sn_readl_relaxed | ||
94 | #define platform_readq_relaxed __sn_readq_relaxed | ||
95 | #define platform_irq_to_vector sn_irq_to_vector | ||
96 | #define platform_local_vector_to_irq sn_local_vector_to_irq | ||
97 | #define platform_pci_get_legacy_mem sn_pci_get_legacy_mem | ||
98 | #define platform_pci_legacy_read sn_pci_legacy_read | ||
99 | #define platform_pci_legacy_write sn_pci_legacy_write | ||
100 | #define platform_dma_init sn_dma_init | ||
101 | #define platform_migrate sn_migrate | ||
102 | #define platform_kernel_launch_event sn_kernel_launch_event | ||
103 | #ifdef CONFIG_PCI_MSI | ||
104 | #define platform_setup_msi_irq sn_setup_msi_irq | ||
105 | #define platform_teardown_msi_irq sn_teardown_msi_irq | ||
106 | #else | ||
107 | #define platform_setup_msi_irq ((ia64_mv_setup_msi_irq_t*)NULL) | ||
108 | #define platform_teardown_msi_irq ((ia64_mv_teardown_msi_irq_t*)NULL) | ||
109 | #endif | ||
110 | #define platform_pci_fixup_bus sn_pci_fixup_bus | ||
111 | |||
112 | #include <asm/sn/io.h> | ||
113 | |||
114 | #endif /* _ASM_IA64_MACHVEC_SN2_H */ | ||
diff --git a/arch/ia64/include/asm/mmzone.h b/arch/ia64/include/asm/mmzone.h index e0de61709cf1..0ceca5f9449c 100644 --- a/arch/ia64/include/asm/mmzone.h +++ b/arch/ia64/include/asm/mmzone.h | |||
@@ -30,7 +30,7 @@ static inline int pfn_to_nid(unsigned long pfn) | |||
30 | #ifdef CONFIG_IA64_DIG /* DIG systems are small */ | 30 | #ifdef CONFIG_IA64_DIG /* DIG systems are small */ |
31 | # define MAX_PHYSNODE_ID 8 | 31 | # define MAX_PHYSNODE_ID 8 |
32 | # define NR_NODE_MEMBLKS (MAX_NUMNODES * 8) | 32 | # define NR_NODE_MEMBLKS (MAX_NUMNODES * 8) |
33 | #else /* sn2 is the biggest case, so we use that if !DIG */ | 33 | #else |
34 | # define MAX_PHYSNODE_ID 2048 | 34 | # define MAX_PHYSNODE_ID 2048 |
35 | # define NR_NODE_MEMBLKS (MAX_NUMNODES * 4) | 35 | # define NR_NODE_MEMBLKS (MAX_NUMNODES * 4) |
36 | #endif | 36 | #endif |
diff --git a/arch/ia64/include/asm/sn/acpi.h b/arch/ia64/include/asm/sn/acpi.h deleted file mode 100644 index fd480db25565..000000000000 --- a/arch/ia64/include/asm/sn/acpi.h +++ /dev/null | |||
@@ -1,15 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_IA64_SN_ACPI_H | ||
10 | #define _ASM_IA64_SN_ACPI_H | ||
11 | |||
12 | extern int sn_acpi_rev; | ||
13 | #define SN_ACPI_BASE_SUPPORT() (sn_acpi_rev >= 0x20101) | ||
14 | |||
15 | #endif /* _ASM_IA64_SN_ACPI_H */ | ||
diff --git a/arch/ia64/include/asm/sn/addrs.h b/arch/ia64/include/asm/sn/addrs.h deleted file mode 100644 index e715c794b186..000000000000 --- a/arch/ia64/include/asm/sn/addrs.h +++ /dev/null | |||
@@ -1,299 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 1992-1999,2001-2005 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_IA64_SN_ADDRS_H | ||
10 | #define _ASM_IA64_SN_ADDRS_H | ||
11 | |||
12 | #include <asm/percpu.h> | ||
13 | #include <asm/sn/types.h> | ||
14 | #include <asm/sn/arch.h> | ||
15 | #include <asm/sn/pda.h> | ||
16 | |||
17 | /* | ||
18 | * Memory/SHUB Address Format: | ||
19 | * +-+---------+--+--------------+ | ||
20 | * |0| NASID |AS| NodeOffset | | ||
21 | * +-+---------+--+--------------+ | ||
22 | * | ||
23 | * NASID: (low NASID bit is 0) Memory and SHUB MMRs | ||
24 | * AS: 2-bit Address Space Identifier. Used only if low NASID bit is 0 | ||
25 | * 00: Local Resources and MMR space | ||
26 | * Top bit of NodeOffset | ||
27 | * 0: Local resources space | ||
28 | * node id: | ||
29 | * 0: IA64/NT compatibility space | ||
30 | * 2: Local MMR Space | ||
31 | * 4: Local memory, regardless of local node id | ||
32 | * 1: Global MMR space | ||
33 | * 01: GET space. | ||
34 | * 10: AMO space. | ||
35 | * 11: Cacheable memory space. | ||
36 | * | ||
37 | * NodeOffset: byte offset | ||
38 | * | ||
39 | * | ||
40 | * TIO address format: | ||
41 | * +-+----------+--+--------------+ | ||
42 | * |0| NASID |AS| Nodeoffset | | ||
43 | * +-+----------+--+--------------+ | ||
44 | * | ||
45 | * NASID: (low NASID bit is 1) TIO | ||
46 | * AS: 2-bit Chiplet Identifier | ||
47 | * 00: TIO LB (Indicates TIO MMR access.) | ||
48 | * 01: TIO ICE (indicates coretalk space access.) | ||
49 | * | ||
50 | * NodeOffset: top bit must be set. | ||
51 | * | ||
52 | * | ||
53 | * Note that in both of the above address formats, the low | ||
54 | * NASID bit indicates if the reference is to the SHUB or TIO MMRs. | ||
55 | */ | ||
56 | |||
57 | |||
58 | /* | ||
59 | * Define basic shift & mask constants for manipulating NASIDs and AS values. | ||
60 | */ | ||
61 | #define NASID_BITMASK (sn_hub_info->nasid_bitmask) | ||
62 | #define NASID_SHIFT (sn_hub_info->nasid_shift) | ||
63 | #define AS_SHIFT (sn_hub_info->as_shift) | ||
64 | #define AS_BITMASK 0x3UL | ||
65 | |||
66 | #define NASID_MASK ((u64)NASID_BITMASK << NASID_SHIFT) | ||
67 | #define AS_MASK ((u64)AS_BITMASK << AS_SHIFT) | ||
68 | |||
69 | |||
70 | /* | ||
71 | * AS values. These are the same on both SHUB1 & SHUB2. | ||
72 | */ | ||
73 | #define AS_GET_VAL 1UL | ||
74 | #define AS_AMO_VAL 2UL | ||
75 | #define AS_CAC_VAL 3UL | ||
76 | #define AS_GET_SPACE (AS_GET_VAL << AS_SHIFT) | ||
77 | #define AS_AMO_SPACE (AS_AMO_VAL << AS_SHIFT) | ||
78 | #define AS_CAC_SPACE (AS_CAC_VAL << AS_SHIFT) | ||
79 | |||
80 | |||
81 | /* | ||
82 | * Virtual Mode Local & Global MMR space. | ||
83 | */ | ||
84 | #define SH1_LOCAL_MMR_OFFSET 0x8000000000UL | ||
85 | #define SH2_LOCAL_MMR_OFFSET 0x0200000000UL | ||
86 | #define LOCAL_MMR_OFFSET (is_shub2() ? SH2_LOCAL_MMR_OFFSET : SH1_LOCAL_MMR_OFFSET) | ||
87 | #define LOCAL_MMR_SPACE (__IA64_UNCACHED_OFFSET | LOCAL_MMR_OFFSET) | ||
88 | #define LOCAL_PHYS_MMR_SPACE (RGN_BASE(RGN_HPAGE) | LOCAL_MMR_OFFSET) | ||
89 | |||
90 | #define SH1_GLOBAL_MMR_OFFSET 0x0800000000UL | ||
91 | #define SH2_GLOBAL_MMR_OFFSET 0x0300000000UL | ||
92 | #define GLOBAL_MMR_OFFSET (is_shub2() ? SH2_GLOBAL_MMR_OFFSET : SH1_GLOBAL_MMR_OFFSET) | ||
93 | #define GLOBAL_MMR_SPACE (__IA64_UNCACHED_OFFSET | GLOBAL_MMR_OFFSET) | ||
94 | |||
95 | /* | ||
96 | * Physical mode addresses | ||
97 | */ | ||
98 | #define GLOBAL_PHYS_MMR_SPACE (RGN_BASE(RGN_HPAGE) | GLOBAL_MMR_OFFSET) | ||
99 | |||
100 | |||
101 | /* | ||
102 | * Clear region & AS bits. | ||
103 | */ | ||
104 | #define TO_PHYS_MASK (~(RGN_BITS | AS_MASK)) | ||
105 | |||
106 | |||
107 | /* | ||
108 | * Misc NASID manipulation. | ||
109 | */ | ||
110 | #define NASID_SPACE(n) ((u64)(n) << NASID_SHIFT) | ||
111 | #define REMOTE_ADDR(n,a) (NASID_SPACE(n) | (a)) | ||
112 | #define NODE_OFFSET(x) ((x) & (NODE_ADDRSPACE_SIZE - 1)) | ||
113 | #define NODE_ADDRSPACE_SIZE (1UL << AS_SHIFT) | ||
114 | #define NASID_GET(x) (int) (((u64) (x) >> NASID_SHIFT) & NASID_BITMASK) | ||
115 | #define LOCAL_MMR_ADDR(a) (LOCAL_MMR_SPACE | (a)) | ||
116 | #define GLOBAL_MMR_ADDR(n,a) (GLOBAL_MMR_SPACE | REMOTE_ADDR(n,a)) | ||
117 | #define GLOBAL_MMR_PHYS_ADDR(n,a) (GLOBAL_PHYS_MMR_SPACE | REMOTE_ADDR(n,a)) | ||
118 | #define GLOBAL_CAC_ADDR(n,a) (CAC_BASE | REMOTE_ADDR(n,a)) | ||
119 | #define CHANGE_NASID(n,x) ((void *)(((u64)(x) & ~NASID_MASK) | NASID_SPACE(n))) | ||
120 | #define IS_TIO_NASID(n) ((n) & 1) | ||
121 | |||
122 | |||
123 | /* non-II mmr's start at top of big window space (4G) */ | ||
124 | #define BWIN_TOP 0x0000000100000000UL | ||
125 | |||
126 | /* | ||
127 | * general address defines | ||
128 | */ | ||
129 | #define CAC_BASE (PAGE_OFFSET | AS_CAC_SPACE) | ||
130 | #define AMO_BASE (__IA64_UNCACHED_OFFSET | AS_AMO_SPACE) | ||
131 | #define AMO_PHYS_BASE (RGN_BASE(RGN_HPAGE) | AS_AMO_SPACE) | ||
132 | #define GET_BASE (PAGE_OFFSET | AS_GET_SPACE) | ||
133 | |||
134 | /* | ||
135 | * Convert Memory addresses between various addressing modes. | ||
136 | */ | ||
137 | #define TO_PHYS(x) (TO_PHYS_MASK & (x)) | ||
138 | #define TO_CAC(x) (CAC_BASE | TO_PHYS(x)) | ||
139 | #ifdef CONFIG_SGI_SN | ||
140 | #define TO_AMO(x) (AMO_BASE | TO_PHYS(x)) | ||
141 | #define TO_GET(x) (GET_BASE | TO_PHYS(x)) | ||
142 | #else | ||
143 | #define TO_AMO(x) ({ BUG(); x; }) | ||
144 | #define TO_GET(x) ({ BUG(); x; }) | ||
145 | #endif | ||
146 | |||
147 | /* | ||
148 | * Covert from processor physical address to II/TIO physical address: | ||
149 | * II - squeeze out the AS bits | ||
150 | * TIO- requires a chiplet id in bits 38-39. For DMA to memory, | ||
151 | * the chiplet id is zero. If we implement TIO-TIO dma, we might need | ||
152 | * to insert a chiplet id into this macro. However, it is our belief | ||
153 | * right now that this chiplet id will be ICE, which is also zero. | ||
154 | */ | ||
155 | #define SH1_TIO_PHYS_TO_DMA(x) \ | ||
156 | ((((u64)(NASID_GET(x))) << 40) | NODE_OFFSET(x)) | ||
157 | |||
158 | #define SH2_NETWORK_BANK_OFFSET(x) \ | ||
159 | ((u64)(x) & ((1UL << (sn_hub_info->nasid_shift - 4)) -1)) | ||
160 | |||
161 | #define SH2_NETWORK_BANK_SELECT(x) \ | ||
162 | ((((u64)(x) & (0x3UL << (sn_hub_info->nasid_shift - 4))) \ | ||
163 | >> (sn_hub_info->nasid_shift - 4)) << 36) | ||
164 | |||
165 | #define SH2_NETWORK_ADDRESS(x) \ | ||
166 | (SH2_NETWORK_BANK_OFFSET(x) | SH2_NETWORK_BANK_SELECT(x)) | ||
167 | |||
168 | #define SH2_TIO_PHYS_TO_DMA(x) \ | ||
169 | (((u64)(NASID_GET(x)) << 40) | SH2_NETWORK_ADDRESS(x)) | ||
170 | |||
171 | #define PHYS_TO_TIODMA(x) \ | ||
172 | (is_shub1() ? SH1_TIO_PHYS_TO_DMA(x) : SH2_TIO_PHYS_TO_DMA(x)) | ||
173 | |||
174 | #define PHYS_TO_DMA(x) \ | ||
175 | ((((u64)(x) & NASID_MASK) >> 2) | NODE_OFFSET(x)) | ||
176 | |||
177 | |||
178 | /* | ||
179 | * Macros to test for address type. | ||
180 | */ | ||
181 | #define IS_AMO_ADDRESS(x) (((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_BASE) | ||
182 | #define IS_AMO_PHYS_ADDRESS(x) (((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_PHYS_BASE) | ||
183 | |||
184 | |||
185 | /* | ||
186 | * The following definitions pertain to the IO special address | ||
187 | * space. They define the location of the big and little windows | ||
188 | * of any given node. | ||
189 | */ | ||
190 | #define BWIN_SIZE_BITS 29 /* big window size: 512M */ | ||
191 | #define TIO_BWIN_SIZE_BITS 30 /* big window size: 1G */ | ||
192 | #define NODE_SWIN_BASE(n, w) ((w == 0) ? NODE_BWIN_BASE((n), SWIN0_BIGWIN) \ | ||
193 | : RAW_NODE_SWIN_BASE(n, w)) | ||
194 | #define TIO_SWIN_BASE(n, w) (TIO_IO_BASE(n) + \ | ||
195 | ((u64) (w) << TIO_SWIN_SIZE_BITS)) | ||
196 | #define NODE_IO_BASE(n) (GLOBAL_MMR_SPACE | NASID_SPACE(n)) | ||
197 | #define TIO_IO_BASE(n) (__IA64_UNCACHED_OFFSET | NASID_SPACE(n)) | ||
198 | #define BWIN_SIZE (1UL << BWIN_SIZE_BITS) | ||
199 | #define NODE_BWIN_BASE0(n) (NODE_IO_BASE(n) + BWIN_SIZE) | ||
200 | #define NODE_BWIN_BASE(n, w) (NODE_BWIN_BASE0(n) + ((u64) (w) << BWIN_SIZE_BITS)) | ||
201 | #define RAW_NODE_SWIN_BASE(n, w) (NODE_IO_BASE(n) + ((u64) (w) << SWIN_SIZE_BITS)) | ||
202 | #define BWIN_WIDGET_MASK 0x7 | ||
203 | #define BWIN_WINDOWNUM(x) (((x) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK) | ||
204 | #define SH1_IS_BIG_WINDOW_ADDR(x) ((x) & BWIN_TOP) | ||
205 | |||
206 | #define TIO_BWIN_WINDOW_SELECT_MASK 0x7 | ||
207 | #define TIO_BWIN_WINDOWNUM(x) (((x) >> TIO_BWIN_SIZE_BITS) & TIO_BWIN_WINDOW_SELECT_MASK) | ||
208 | |||
209 | #define TIO_HWIN_SHIFT_BITS 33 | ||
210 | #define TIO_HWIN(x) (NODE_OFFSET(x) >> TIO_HWIN_SHIFT_BITS) | ||
211 | |||
212 | /* | ||
213 | * The following definitions pertain to the IO special address | ||
214 | * space. They define the location of the big and little windows | ||
215 | * of any given node. | ||
216 | */ | ||
217 | |||
218 | #define SWIN_SIZE_BITS 24 | ||
219 | #define SWIN_WIDGET_MASK 0xF | ||
220 | |||
221 | #define TIO_SWIN_SIZE_BITS 28 | ||
222 | #define TIO_SWIN_SIZE (1UL << TIO_SWIN_SIZE_BITS) | ||
223 | #define TIO_SWIN_WIDGET_MASK 0x3 | ||
224 | |||
225 | /* | ||
226 | * Convert smallwindow address to xtalk address. | ||
227 | * | ||
228 | * 'addr' can be physical or virtual address, but will be converted | ||
229 | * to Xtalk address in the range 0 -> SWINZ_SIZEMASK | ||
230 | */ | ||
231 | #define SWIN_WIDGETNUM(x) (((x) >> SWIN_SIZE_BITS) & SWIN_WIDGET_MASK) | ||
232 | #define TIO_SWIN_WIDGETNUM(x) (((x) >> TIO_SWIN_SIZE_BITS) & TIO_SWIN_WIDGET_MASK) | ||
233 | |||
234 | |||
235 | /* | ||
236 | * The following macros produce the correct base virtual address for | ||
237 | * the hub registers. The REMOTE_HUB_* macro produce | ||
238 | * the address for the specified hub's registers. The intent is | ||
239 | * that the appropriate PI, MD, NI, or II register would be substituted | ||
240 | * for x. | ||
241 | * | ||
242 | * WARNING: | ||
243 | * When certain Hub chip workaround are defined, it's not sufficient | ||
244 | * to dereference the *_HUB_ADDR() macros. You should instead use | ||
245 | * HUB_L() and HUB_S() if you must deal with pointers to hub registers. | ||
246 | * Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S(). | ||
247 | * They're always safe. | ||
248 | */ | ||
249 | /* Shub1 TIO & MMR addressing macros */ | ||
250 | #define SH1_TIO_IOSPACE_ADDR(n,x) \ | ||
251 | GLOBAL_MMR_ADDR(n,x) | ||
252 | |||
253 | #define SH1_REMOTE_BWIN_MMR(n,x) \ | ||
254 | GLOBAL_MMR_ADDR(n,x) | ||
255 | |||
256 | #define SH1_REMOTE_SWIN_MMR(n,x) \ | ||
257 | (NODE_SWIN_BASE(n,1) + 0x800000UL + (x)) | ||
258 | |||
259 | #define SH1_REMOTE_MMR(n,x) \ | ||
260 | (SH1_IS_BIG_WINDOW_ADDR(x) ? SH1_REMOTE_BWIN_MMR(n,x) : \ | ||
261 | SH1_REMOTE_SWIN_MMR(n,x)) | ||
262 | |||
263 | /* Shub1 TIO & MMR addressing macros */ | ||
264 | #define SH2_TIO_IOSPACE_ADDR(n,x) \ | ||
265 | ((__IA64_UNCACHED_OFFSET | REMOTE_ADDR(n,x) | 1UL << (NASID_SHIFT - 2))) | ||
266 | |||
267 | #define SH2_REMOTE_MMR(n,x) \ | ||
268 | GLOBAL_MMR_ADDR(n,x) | ||
269 | |||
270 | |||
271 | /* TIO & MMR addressing macros that work on both shub1 & shub2 */ | ||
272 | #define TIO_IOSPACE_ADDR(n,x) \ | ||
273 | ((u64 *)(is_shub1() ? SH1_TIO_IOSPACE_ADDR(n,x) : \ | ||
274 | SH2_TIO_IOSPACE_ADDR(n,x))) | ||
275 | |||
276 | #define SH_REMOTE_MMR(n,x) \ | ||
277 | (is_shub1() ? SH1_REMOTE_MMR(n,x) : SH2_REMOTE_MMR(n,x)) | ||
278 | |||
279 | #define REMOTE_HUB_ADDR(n,x) \ | ||
280 | (IS_TIO_NASID(n) ? ((volatile u64*)TIO_IOSPACE_ADDR(n,x)) : \ | ||
281 | ((volatile u64*)SH_REMOTE_MMR(n,x))) | ||
282 | |||
283 | |||
284 | #define HUB_L(x) (*((volatile typeof(*x) *)x)) | ||
285 | #define HUB_S(x,d) (*((volatile typeof(*x) *)x) = (d)) | ||
286 | |||
287 | #define REMOTE_HUB_L(n, a) HUB_L(REMOTE_HUB_ADDR((n), (a))) | ||
288 | #define REMOTE_HUB_S(n, a, d) HUB_S(REMOTE_HUB_ADDR((n), (a)), (d)) | ||
289 | |||
290 | /* | ||
291 | * Coretalk address breakdown | ||
292 | */ | ||
293 | #define CTALK_NASID_SHFT 40 | ||
294 | #define CTALK_NASID_MASK (0x3FFFULL << CTALK_NASID_SHFT) | ||
295 | #define CTALK_CID_SHFT 38 | ||
296 | #define CTALK_CID_MASK (0x3ULL << CTALK_CID_SHFT) | ||
297 | #define CTALK_NODE_OFFSET 0x3FFFFFFFFF | ||
298 | |||
299 | #endif /* _ASM_IA64_SN_ADDRS_H */ | ||
diff --git a/arch/ia64/include/asm/sn/arch.h b/arch/ia64/include/asm/sn/arch.h deleted file mode 100644 index 31eb784866f8..000000000000 --- a/arch/ia64/include/asm/sn/arch.h +++ /dev/null | |||
@@ -1,86 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * SGI specific setup. | ||
7 | * | ||
8 | * Copyright (C) 1995-1997,1999,2001-2005 Silicon Graphics, Inc. All rights reserved. | ||
9 | * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org) | ||
10 | */ | ||
11 | #ifndef _ASM_IA64_SN_ARCH_H | ||
12 | #define _ASM_IA64_SN_ARCH_H | ||
13 | |||
14 | #include <linux/numa.h> | ||
15 | #include <asm/types.h> | ||
16 | #include <asm/percpu.h> | ||
17 | #include <asm/sn/types.h> | ||
18 | #include <asm/sn/sn_cpuid.h> | ||
19 | |||
20 | /* | ||
21 | * This is the maximum number of NUMALINK nodes that can be part of a single | ||
22 | * SSI kernel. This number includes C-brick, M-bricks, and TIOs. Nodes in | ||
23 | * remote partitions are NOT included in this number. | ||
24 | * The number of compact nodes cannot exceed size of a coherency domain. | ||
25 | * The purpose of this define is to specify a node count that includes | ||
26 | * all C/M/TIO nodes in an SSI system. | ||
27 | * | ||
28 | * SGI system can currently support up to 256 C/M nodes plus additional TIO nodes. | ||
29 | * | ||
30 | * Note: ACPI20 has an architectural limit of 256 nodes. When we upgrade | ||
31 | * to ACPI3.0, this limit will be removed. The notion of "compact nodes" | ||
32 | * should be deleted and TIOs should be included in MAX_NUMNODES. | ||
33 | */ | ||
34 | #define MAX_TIO_NODES MAX_NUMNODES | ||
35 | #define MAX_COMPACT_NODES (MAX_NUMNODES + MAX_TIO_NODES) | ||
36 | |||
37 | /* | ||
38 | * Maximum number of nodes in all partitions and in all coherency domains. | ||
39 | * This is the total number of nodes accessible in the numalink fabric. It | ||
40 | * includes all C & M bricks, plus all TIOs. | ||
41 | * | ||
42 | * This value is also the value of the maximum number of NASIDs in the numalink | ||
43 | * fabric. | ||
44 | */ | ||
45 | #define MAX_NUMALINK_NODES 16384 | ||
46 | |||
47 | /* | ||
48 | * The following defines attributes of the HUB chip. These attributes are | ||
49 | * frequently referenced. They are kept in the per-cpu data areas of each cpu. | ||
50 | * They are kept together in a struct to minimize cache misses. | ||
51 | */ | ||
52 | struct sn_hub_info_s { | ||
53 | u8 shub2; | ||
54 | u8 nasid_shift; | ||
55 | u8 as_shift; | ||
56 | u8 shub_1_1_found; | ||
57 | u16 nasid_bitmask; | ||
58 | }; | ||
59 | DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); | ||
60 | #define sn_hub_info this_cpu_ptr(&__sn_hub_info) | ||
61 | #define is_shub2() (sn_hub_info->shub2) | ||
62 | #define is_shub1() (sn_hub_info->shub2 == 0) | ||
63 | |||
64 | /* | ||
65 | * Use this macro to test if shub 1.1 wars should be enabled | ||
66 | */ | ||
67 | #define enable_shub_wars_1_1() (sn_hub_info->shub_1_1_found) | ||
68 | |||
69 | |||
70 | /* | ||
71 | * Compact node ID to nasid mappings kept in the per-cpu data areas of each | ||
72 | * cpu. | ||
73 | */ | ||
74 | DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]); | ||
75 | #define sn_cnodeid_to_nasid this_cpu_ptr(&__sn_cnodeid_to_nasid[0]) | ||
76 | |||
77 | |||
78 | extern u8 sn_partition_id; | ||
79 | extern u8 sn_system_size; | ||
80 | extern u8 sn_sharing_domain_size; | ||
81 | extern u8 sn_region_size; | ||
82 | |||
83 | extern void sn_flush_all_caches(long addr, long bytes); | ||
84 | extern bool sn_cpu_disable_allowed(int cpu); | ||
85 | |||
86 | #endif /* _ASM_IA64_SN_ARCH_H */ | ||
diff --git a/arch/ia64/include/asm/sn/bte.h b/arch/ia64/include/asm/sn/bte.h deleted file mode 100644 index cd71ab5faf62..000000000000 --- a/arch/ia64/include/asm/sn/bte.h +++ /dev/null | |||
@@ -1,236 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | */ | ||
8 | |||
9 | |||
10 | #ifndef _ASM_IA64_SN_BTE_H | ||
11 | #define _ASM_IA64_SN_BTE_H | ||
12 | |||
13 | #include <linux/timer.h> | ||
14 | #include <linux/spinlock.h> | ||
15 | #include <linux/cache.h> | ||
16 | #include <asm/sn/pda.h> | ||
17 | #include <asm/sn/types.h> | ||
18 | #include <asm/sn/shub_mmr.h> | ||
19 | |||
20 | struct nodepda_s; | ||
21 | |||
22 | #define IBCT_NOTIFY (0x1UL << 4) | ||
23 | #define IBCT_ZFIL_MODE (0x1UL << 0) | ||
24 | |||
25 | /* #define BTE_DEBUG */ | ||
26 | /* #define BTE_DEBUG_VERBOSE */ | ||
27 | |||
28 | #ifdef BTE_DEBUG | ||
29 | # define BTE_PRINTK(x) printk x /* Terse */ | ||
30 | # ifdef BTE_DEBUG_VERBOSE | ||
31 | # define BTE_PRINTKV(x) printk x /* Verbose */ | ||
32 | # else | ||
33 | # define BTE_PRINTKV(x) | ||
34 | # endif /* BTE_DEBUG_VERBOSE */ | ||
35 | #else | ||
36 | # define BTE_PRINTK(x) | ||
37 | # define BTE_PRINTKV(x) | ||
38 | #endif /* BTE_DEBUG */ | ||
39 | |||
40 | |||
41 | /* BTE status register only supports 16 bits for length field */ | ||
42 | #define BTE_LEN_BITS (16) | ||
43 | #define BTE_LEN_MASK ((1 << BTE_LEN_BITS) - 1) | ||
44 | #define BTE_MAX_XFER (BTE_LEN_MASK << L1_CACHE_SHIFT) | ||
45 | |||
46 | |||
47 | /* Define hardware */ | ||
48 | #define BTES_PER_NODE (is_shub2() ? 4 : 2) | ||
49 | #define MAX_BTES_PER_NODE 4 | ||
50 | |||
51 | #define BTE2OFF_CTRL 0 | ||
52 | #define BTE2OFF_SRC (SH2_BT_ENG_SRC_ADDR_0 - SH2_BT_ENG_CSR_0) | ||
53 | #define BTE2OFF_DEST (SH2_BT_ENG_DEST_ADDR_0 - SH2_BT_ENG_CSR_0) | ||
54 | #define BTE2OFF_NOTIFY (SH2_BT_ENG_NOTIF_ADDR_0 - SH2_BT_ENG_CSR_0) | ||
55 | |||
56 | #define BTE_BASE_ADDR(interface) \ | ||
57 | (is_shub2() ? (interface == 0) ? SH2_BT_ENG_CSR_0 : \ | ||
58 | (interface == 1) ? SH2_BT_ENG_CSR_1 : \ | ||
59 | (interface == 2) ? SH2_BT_ENG_CSR_2 : \ | ||
60 | SH2_BT_ENG_CSR_3 \ | ||
61 | : (interface == 0) ? IIO_IBLS0 : IIO_IBLS1) | ||
62 | |||
63 | #define BTE_SOURCE_ADDR(base) \ | ||
64 | (is_shub2() ? base + (BTE2OFF_SRC/8) \ | ||
65 | : base + (BTEOFF_SRC/8)) | ||
66 | |||
67 | #define BTE_DEST_ADDR(base) \ | ||
68 | (is_shub2() ? base + (BTE2OFF_DEST/8) \ | ||
69 | : base + (BTEOFF_DEST/8)) | ||
70 | |||
71 | #define BTE_CTRL_ADDR(base) \ | ||
72 | (is_shub2() ? base + (BTE2OFF_CTRL/8) \ | ||
73 | : base + (BTEOFF_CTRL/8)) | ||
74 | |||
75 | #define BTE_NOTIF_ADDR(base) \ | ||
76 | (is_shub2() ? base + (BTE2OFF_NOTIFY/8) \ | ||
77 | : base + (BTEOFF_NOTIFY/8)) | ||
78 | |||
79 | /* Define hardware modes */ | ||
80 | #define BTE_NOTIFY IBCT_NOTIFY | ||
81 | #define BTE_NORMAL BTE_NOTIFY | ||
82 | #define BTE_ZERO_FILL (BTE_NOTIFY | IBCT_ZFIL_MODE) | ||
83 | /* Use a reserved bit to let the caller specify a wait for any BTE */ | ||
84 | #define BTE_WACQUIRE 0x4000 | ||
85 | /* Use the BTE on the node with the destination memory */ | ||
86 | #define BTE_USE_DEST (BTE_WACQUIRE << 1) | ||
87 | /* Use any available BTE interface on any node for the transfer */ | ||
88 | #define BTE_USE_ANY (BTE_USE_DEST << 1) | ||
89 | /* macro to force the IBCT0 value valid */ | ||
90 | #define BTE_VALID_MODE(x) ((x) & (IBCT_NOTIFY | IBCT_ZFIL_MODE)) | ||
91 | |||
92 | #define BTE_ACTIVE (IBLS_BUSY | IBLS_ERROR) | ||
93 | #define BTE_WORD_AVAILABLE (IBLS_BUSY << 1) | ||
94 | #define BTE_WORD_BUSY (~BTE_WORD_AVAILABLE) | ||
95 | |||
96 | /* | ||
97 | * Some macros to simplify reading. | ||
98 | * Start with macros to locate the BTE control registers. | ||
99 | */ | ||
100 | #define BTE_LNSTAT_LOAD(_bte) \ | ||
101 | HUB_L(_bte->bte_base_addr) | ||
102 | #define BTE_LNSTAT_STORE(_bte, _x) \ | ||
103 | HUB_S(_bte->bte_base_addr, (_x)) | ||
104 | #define BTE_SRC_STORE(_bte, _x) \ | ||
105 | ({ \ | ||
106 | u64 __addr = ((_x) & ~AS_MASK); \ | ||
107 | if (is_shub2()) \ | ||
108 | __addr = SH2_TIO_PHYS_TO_DMA(__addr); \ | ||
109 | HUB_S(_bte->bte_source_addr, __addr); \ | ||
110 | }) | ||
111 | #define BTE_DEST_STORE(_bte, _x) \ | ||
112 | ({ \ | ||
113 | u64 __addr = ((_x) & ~AS_MASK); \ | ||
114 | if (is_shub2()) \ | ||
115 | __addr = SH2_TIO_PHYS_TO_DMA(__addr); \ | ||
116 | HUB_S(_bte->bte_destination_addr, __addr); \ | ||
117 | }) | ||
118 | #define BTE_CTRL_STORE(_bte, _x) \ | ||
119 | HUB_S(_bte->bte_control_addr, (_x)) | ||
120 | #define BTE_NOTIF_STORE(_bte, _x) \ | ||
121 | ({ \ | ||
122 | u64 __addr = ia64_tpa((_x) & ~AS_MASK); \ | ||
123 | if (is_shub2()) \ | ||
124 | __addr = SH2_TIO_PHYS_TO_DMA(__addr); \ | ||
125 | HUB_S(_bte->bte_notify_addr, __addr); \ | ||
126 | }) | ||
127 | |||
128 | #define BTE_START_TRANSFER(_bte, _len, _mode) \ | ||
129 | is_shub2() ? BTE_CTRL_STORE(_bte, IBLS_BUSY | (_mode << 24) | _len) \ | ||
130 | : BTE_LNSTAT_STORE(_bte, _len); \ | ||
131 | BTE_CTRL_STORE(_bte, _mode) | ||
132 | |||
133 | /* Possible results from bte_copy and bte_unaligned_copy */ | ||
134 | /* The following error codes map into the BTE hardware codes | ||
135 | * IIO_ICRB_ECODE_* (in shubio.h). The hardware uses | ||
136 | * an error code of 0 (IIO_ICRB_ECODE_DERR), but we want zero | ||
137 | * to mean BTE_SUCCESS, so add one (BTEFAIL_OFFSET) to the error | ||
138 | * codes to give the following error codes. | ||
139 | */ | ||
140 | #define BTEFAIL_OFFSET 1 | ||
141 | |||
142 | typedef enum { | ||
143 | BTE_SUCCESS, /* 0 is success */ | ||
144 | BTEFAIL_DIR, /* Directory error due to IIO access*/ | ||
145 | BTEFAIL_POISON, /* poison error on IO access (write to poison page) */ | ||
146 | BTEFAIL_WERR, /* Write error (ie WINV to a Read only line) */ | ||
147 | BTEFAIL_ACCESS, /* access error (protection violation) */ | ||
148 | BTEFAIL_PWERR, /* Partial Write Error */ | ||
149 | BTEFAIL_PRERR, /* Partial Read Error */ | ||
150 | BTEFAIL_TOUT, /* CRB Time out */ | ||
151 | BTEFAIL_XTERR, /* Incoming xtalk pkt had error bit */ | ||
152 | BTEFAIL_NOTAVAIL, /* BTE not available */ | ||
153 | } bte_result_t; | ||
154 | |||
155 | #define BTEFAIL_SH2_RESP_SHORT 0x1 /* bit 000001 */ | ||
156 | #define BTEFAIL_SH2_RESP_LONG 0x2 /* bit 000010 */ | ||
157 | #define BTEFAIL_SH2_RESP_DSP 0x4 /* bit 000100 */ | ||
158 | #define BTEFAIL_SH2_RESP_ACCESS 0x8 /* bit 001000 */ | ||
159 | #define BTEFAIL_SH2_CRB_TO 0x10 /* bit 010000 */ | ||
160 | #define BTEFAIL_SH2_NACK_LIMIT 0x20 /* bit 100000 */ | ||
161 | #define BTEFAIL_SH2_ALL 0x3F /* bit 111111 */ | ||
162 | |||
163 | #define BTE_ERR_BITS 0x3FUL | ||
164 | #define BTE_ERR_SHIFT 36 | ||
165 | #define BTE_ERR_MASK (BTE_ERR_BITS << BTE_ERR_SHIFT) | ||
166 | |||
167 | #define BTE_ERROR_RETRY(value) \ | ||
168 | (is_shub2() ? (value != BTEFAIL_SH2_CRB_TO) \ | ||
169 | : (value != BTEFAIL_TOUT)) | ||
170 | |||
171 | /* | ||
172 | * On shub1 BTE_ERR_MASK will always be false, so no need for is_shub2() | ||
173 | */ | ||
174 | #define BTE_SHUB2_ERROR(_status) \ | ||
175 | ((_status & BTE_ERR_MASK) \ | ||
176 | ? (((_status >> BTE_ERR_SHIFT) & BTE_ERR_BITS) | IBLS_ERROR) \ | ||
177 | : _status) | ||
178 | |||
179 | #define BTE_GET_ERROR_STATUS(_status) \ | ||
180 | (BTE_SHUB2_ERROR(_status) & ~IBLS_ERROR) | ||
181 | |||
182 | #define BTE_VALID_SH2_ERROR(value) \ | ||
183 | ((value >= BTEFAIL_SH2_RESP_SHORT) && (value <= BTEFAIL_SH2_ALL)) | ||
184 | |||
185 | /* | ||
186 | * Structure defining a bte. An instance of this | ||
187 | * structure is created in the nodepda for each | ||
188 | * bte on that node (as defined by BTES_PER_NODE) | ||
189 | * This structure contains everything necessary | ||
190 | * to work with a BTE. | ||
191 | */ | ||
192 | struct bteinfo_s { | ||
193 | volatile u64 notify ____cacheline_aligned; | ||
194 | u64 *bte_base_addr ____cacheline_aligned; | ||
195 | u64 *bte_source_addr; | ||
196 | u64 *bte_destination_addr; | ||
197 | u64 *bte_control_addr; | ||
198 | u64 *bte_notify_addr; | ||
199 | spinlock_t spinlock; | ||
200 | cnodeid_t bte_cnode; /* cnode */ | ||
201 | int bte_error_count; /* Number of errors encountered */ | ||
202 | int bte_num; /* 0 --> BTE0, 1 --> BTE1 */ | ||
203 | int cleanup_active; /* Interface is locked for cleanup */ | ||
204 | volatile bte_result_t bh_error; /* error while processing */ | ||
205 | volatile u64 *most_rcnt_na; | ||
206 | struct bteinfo_s *btes_to_try[MAX_BTES_PER_NODE]; | ||
207 | }; | ||
208 | |||
209 | |||
210 | /* | ||
211 | * Function prototypes (functions defined in bte.c, used elsewhere) | ||
212 | */ | ||
213 | extern bte_result_t bte_copy(u64, u64, u64, u64, void *); | ||
214 | extern bte_result_t bte_unaligned_copy(u64, u64, u64, u64); | ||
215 | extern void bte_error_handler(struct nodepda_s *); | ||
216 | |||
217 | #define bte_zero(dest, len, mode, notification) \ | ||
218 | bte_copy(0, dest, len, ((mode) | BTE_ZERO_FILL), notification) | ||
219 | |||
220 | /* | ||
221 | * The following is the preferred way of calling bte_unaligned_copy | ||
222 | * If the copy is fully cache line aligned, then bte_copy is | ||
223 | * used instead. Since bte_copy is inlined, this saves a call | ||
224 | * stack. NOTE: bte_copy is called synchronously and does block | ||
225 | * until the transfer is complete. In order to get the asynch | ||
226 | * version of bte_copy, you must perform this check yourself. | ||
227 | */ | ||
228 | #define BTE_UNALIGNED_COPY(src, dest, len, mode) \ | ||
229 | (((len & (L1_CACHE_BYTES - 1)) || \ | ||
230 | (src & (L1_CACHE_BYTES - 1)) || \ | ||
231 | (dest & (L1_CACHE_BYTES - 1))) ? \ | ||
232 | bte_unaligned_copy(src, dest, len, mode) : \ | ||
233 | bte_copy(src, dest, len, mode, NULL)) | ||
234 | |||
235 | |||
236 | #endif /* _ASM_IA64_SN_BTE_H */ | ||
diff --git a/arch/ia64/include/asm/sn/clksupport.h b/arch/ia64/include/asm/sn/clksupport.h deleted file mode 100644 index d340c365a824..000000000000 --- a/arch/ia64/include/asm/sn/clksupport.h +++ /dev/null | |||
@@ -1,28 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | /* | ||
10 | * This file contains definitions for accessing a platform supported high resolution | ||
11 | * clock. The clock is monitonically increasing and can be accessed from any node | ||
12 | * in the system. The clock is synchronized across nodes - all nodes see the | ||
13 | * same value. | ||
14 | * | ||
15 | * RTC_COUNTER_ADDR - contains the address of the counter | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #ifndef _ASM_IA64_SN_CLKSUPPORT_H | ||
20 | #define _ASM_IA64_SN_CLKSUPPORT_H | ||
21 | |||
22 | extern unsigned long sn_rtc_cycles_per_second; | ||
23 | |||
24 | #define RTC_COUNTER_ADDR ((long *)LOCAL_MMR_ADDR(SH_RTC)) | ||
25 | |||
26 | #define rtc_time() (*RTC_COUNTER_ADDR) | ||
27 | |||
28 | #endif /* _ASM_IA64_SN_CLKSUPPORT_H */ | ||
diff --git a/arch/ia64/include/asm/sn/geo.h b/arch/ia64/include/asm/sn/geo.h deleted file mode 100644 index f083c9434066..000000000000 --- a/arch/ia64/include/asm/sn/geo.h +++ /dev/null | |||
@@ -1,132 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_IA64_SN_GEO_H | ||
10 | #define _ASM_IA64_SN_GEO_H | ||
11 | |||
12 | /* The geoid_t implementation below is based loosely on the pcfg_t | ||
13 | implementation in sys/SN/promcfg.h. */ | ||
14 | |||
15 | /* Type declaractions */ | ||
16 | |||
17 | /* Size of a geoid_t structure (must be before decl. of geoid_u) */ | ||
18 | #define GEOID_SIZE 8 /* Would 16 be better? The size can | ||
19 | be different on different platforms. */ | ||
20 | |||
21 | #define MAX_SLOTS 0xf /* slots per module */ | ||
22 | #define MAX_SLABS 0xf /* slabs per slot */ | ||
23 | |||
24 | typedef unsigned char geo_type_t; | ||
25 | |||
26 | /* Fields common to all substructures */ | ||
27 | typedef struct geo_common_s { | ||
28 | moduleid_t module; /* The module (box) this h/w lives in */ | ||
29 | geo_type_t type; /* What type of h/w is named by this geoid_t */ | ||
30 | slabid_t slab:4; /* slab (ASIC), 0 .. 15 within slot */ | ||
31 | slotid_t slot:4; /* slot (Blade), 0 .. 15 within module */ | ||
32 | } geo_common_t; | ||
33 | |||
34 | /* Additional fields for particular types of hardware */ | ||
35 | typedef struct geo_node_s { | ||
36 | geo_common_t common; /* No additional fields needed */ | ||
37 | } geo_node_t; | ||
38 | |||
39 | typedef struct geo_rtr_s { | ||
40 | geo_common_t common; /* No additional fields needed */ | ||
41 | } geo_rtr_t; | ||
42 | |||
43 | typedef struct geo_iocntl_s { | ||
44 | geo_common_t common; /* No additional fields needed */ | ||
45 | } geo_iocntl_t; | ||
46 | |||
47 | typedef struct geo_pcicard_s { | ||
48 | geo_iocntl_t common; | ||
49 | char bus; /* Bus/widget number */ | ||
50 | char slot; /* PCI slot number */ | ||
51 | } geo_pcicard_t; | ||
52 | |||
53 | /* Subcomponents of a node */ | ||
54 | typedef struct geo_cpu_s { | ||
55 | geo_node_t node; | ||
56 | char slice; /* Which CPU on the node */ | ||
57 | } geo_cpu_t; | ||
58 | |||
59 | typedef struct geo_mem_s { | ||
60 | geo_node_t node; | ||
61 | char membus; /* The memory bus on the node */ | ||
62 | char memslot; /* The memory slot on the bus */ | ||
63 | } geo_mem_t; | ||
64 | |||
65 | |||
66 | typedef union geoid_u { | ||
67 | geo_common_t common; | ||
68 | geo_node_t node; | ||
69 | geo_iocntl_t iocntl; | ||
70 | geo_pcicard_t pcicard; | ||
71 | geo_rtr_t rtr; | ||
72 | geo_cpu_t cpu; | ||
73 | geo_mem_t mem; | ||
74 | char padsize[GEOID_SIZE]; | ||
75 | } geoid_t; | ||
76 | |||
77 | |||
78 | /* Preprocessor macros */ | ||
79 | |||
80 | #define GEO_MAX_LEN 48 /* max. formatted length, plus some pad: | ||
81 | module/001c07/slab/5/node/memory/2/slot/4 */ | ||
82 | |||
83 | /* Values for geo_type_t */ | ||
84 | #define GEO_TYPE_INVALID 0 | ||
85 | #define GEO_TYPE_MODULE 1 | ||
86 | #define GEO_TYPE_NODE 2 | ||
87 | #define GEO_TYPE_RTR 3 | ||
88 | #define GEO_TYPE_IOCNTL 4 | ||
89 | #define GEO_TYPE_IOCARD 5 | ||
90 | #define GEO_TYPE_CPU 6 | ||
91 | #define GEO_TYPE_MEM 7 | ||
92 | #define GEO_TYPE_MAX (GEO_TYPE_MEM+1) | ||
93 | |||
94 | /* Parameter for hwcfg_format_geoid_compt() */ | ||
95 | #define GEO_COMPT_MODULE 1 | ||
96 | #define GEO_COMPT_SLAB 2 | ||
97 | #define GEO_COMPT_IOBUS 3 | ||
98 | #define GEO_COMPT_IOSLOT 4 | ||
99 | #define GEO_COMPT_CPU 5 | ||
100 | #define GEO_COMPT_MEMBUS 6 | ||
101 | #define GEO_COMPT_MEMSLOT 7 | ||
102 | |||
103 | #define GEO_INVALID_STR "<invalid>" | ||
104 | |||
105 | #define INVALID_NASID ((nasid_t)-1) | ||
106 | #define INVALID_CNODEID ((cnodeid_t)-1) | ||
107 | #define INVALID_PNODEID ((pnodeid_t)-1) | ||
108 | #define INVALID_SLAB (slabid_t)-1 | ||
109 | #define INVALID_SLOT (slotid_t)-1 | ||
110 | #define INVALID_MODULE ((moduleid_t)-1) | ||
111 | |||
112 | static inline slabid_t geo_slab(geoid_t g) | ||
113 | { | ||
114 | return (g.common.type == GEO_TYPE_INVALID) ? | ||
115 | INVALID_SLAB : g.common.slab; | ||
116 | } | ||
117 | |||
118 | static inline slotid_t geo_slot(geoid_t g) | ||
119 | { | ||
120 | return (g.common.type == GEO_TYPE_INVALID) ? | ||
121 | INVALID_SLOT : g.common.slot; | ||
122 | } | ||
123 | |||
124 | static inline moduleid_t geo_module(geoid_t g) | ||
125 | { | ||
126 | return (g.common.type == GEO_TYPE_INVALID) ? | ||
127 | INVALID_MODULE : g.common.module; | ||
128 | } | ||
129 | |||
130 | extern geoid_t cnodeid_get_geoid(cnodeid_t cnode); | ||
131 | |||
132 | #endif /* _ASM_IA64_SN_GEO_H */ | ||
diff --git a/arch/ia64/include/asm/sn/intr.h b/arch/ia64/include/asm/sn/intr.h index e0487aa97418..3885a77b21df 100644 --- a/arch/ia64/include/asm/sn/intr.h +++ b/arch/ia64/include/asm/sn/intr.h | |||
@@ -9,60 +9,7 @@ | |||
9 | #ifndef _ASM_IA64_SN_INTR_H | 9 | #ifndef _ASM_IA64_SN_INTR_H |
10 | #define _ASM_IA64_SN_INTR_H | 10 | #define _ASM_IA64_SN_INTR_H |
11 | 11 | ||
12 | #include <linux/rcupdate.h> | ||
13 | #include <asm/sn/types.h> | ||
14 | |||
15 | #define SGI_UART_VECTOR 0xe9 | ||
16 | |||
17 | /* Reserved IRQs : Note, not to exceed IA64_SN2_FIRST_DEVICE_VECTOR */ | ||
18 | #define SGI_XPC_ACTIVATE 0x30 | 12 | #define SGI_XPC_ACTIVATE 0x30 |
19 | #define SGI_II_ERROR 0x31 | ||
20 | #define SGI_XBOW_ERROR 0x32 | ||
21 | #define SGI_PCIASIC_ERROR 0x33 | ||
22 | #define SGI_ACPI_SCI_INT 0x34 | ||
23 | #define SGI_TIOCA_ERROR 0x35 | ||
24 | #define SGI_TIO_ERROR 0x36 | ||
25 | #define SGI_TIOCX_ERROR 0x37 | ||
26 | #define SGI_MMTIMER_VECTOR 0x38 | ||
27 | #define SGI_XPC_NOTIFY 0xe7 | 13 | #define SGI_XPC_NOTIFY 0xe7 |
28 | 14 | ||
29 | #define IA64_SN2_FIRST_DEVICE_VECTOR 0x3c | ||
30 | #define IA64_SN2_LAST_DEVICE_VECTOR 0xe6 | ||
31 | |||
32 | #define SN2_IRQ_RESERVED 0x1 | ||
33 | #define SN2_IRQ_CONNECTED 0x2 | ||
34 | #define SN2_IRQ_SHARED 0x4 | ||
35 | |||
36 | // The SN PROM irq struct | ||
37 | struct sn_irq_info { | ||
38 | struct sn_irq_info *irq_next; /* deprecated DO NOT USE */ | ||
39 | short irq_nasid; /* Nasid IRQ is assigned to */ | ||
40 | int irq_slice; /* slice IRQ is assigned to */ | ||
41 | int irq_cpuid; /* kernel logical cpuid */ | ||
42 | int irq_irq; /* the IRQ number */ | ||
43 | int irq_int_bit; /* Bridge interrupt pin */ | ||
44 | /* <0 means MSI */ | ||
45 | u64 irq_xtalkaddr; /* xtalkaddr IRQ is sent to */ | ||
46 | int irq_bridge_type;/* pciio asic type (pciio.h) */ | ||
47 | void *irq_bridge; /* bridge generating irq */ | ||
48 | void *irq_pciioinfo; /* associated pciio_info_t */ | ||
49 | int irq_last_intr; /* For Shub lb lost intr WAR */ | ||
50 | int irq_cookie; /* unique cookie */ | ||
51 | int irq_flags; /* flags */ | ||
52 | int irq_share_cnt; /* num devices sharing IRQ */ | ||
53 | struct list_head list; /* list of sn_irq_info structs */ | ||
54 | struct rcu_head rcu; /* rcu callback list */ | ||
55 | }; | ||
56 | |||
57 | extern void sn_send_IPI_phys(int, long, int, int); | ||
58 | extern u64 sn_intr_alloc(nasid_t, int, | ||
59 | struct sn_irq_info *, | ||
60 | int, nasid_t, int); | ||
61 | extern void sn_intr_free(nasid_t, int, struct sn_irq_info *); | ||
62 | extern struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *, nasid_t, int); | ||
63 | extern void sn_set_err_irq_affinity(unsigned int); | ||
64 | extern struct list_head **sn_irq_lh; | ||
65 | |||
66 | #define CPU_VECTOR_TO_IRQ(cpuid,vector) (vector) | ||
67 | |||
68 | #endif /* _ASM_IA64_SN_INTR_H */ | 15 | #endif /* _ASM_IA64_SN_INTR_H */ |
diff --git a/arch/ia64/include/asm/sn/io.h b/arch/ia64/include/asm/sn/io.h deleted file mode 100644 index 41c73a735628..000000000000 --- a/arch/ia64/include/asm/sn/io.h +++ /dev/null | |||
@@ -1,274 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_SN_IO_H | ||
10 | #define _ASM_SN_IO_H | ||
11 | #include <linux/compiler.h> | ||
12 | #include <asm/intrinsics.h> | ||
13 | |||
14 | extern void * sn_io_addr(unsigned long port) __attribute_const__; /* Forward definition */ | ||
15 | extern void __sn_mmiowb(void); /* Forward definition */ | ||
16 | |||
17 | extern int num_cnodes; | ||
18 | |||
19 | #define __sn_mf_a() ia64_mfa() | ||
20 | |||
21 | extern void sn_dma_flush(unsigned long); | ||
22 | |||
23 | #define __sn_inb ___sn_inb | ||
24 | #define __sn_inw ___sn_inw | ||
25 | #define __sn_inl ___sn_inl | ||
26 | #define __sn_outb ___sn_outb | ||
27 | #define __sn_outw ___sn_outw | ||
28 | #define __sn_outl ___sn_outl | ||
29 | #define __sn_readb ___sn_readb | ||
30 | #define __sn_readw ___sn_readw | ||
31 | #define __sn_readl ___sn_readl | ||
32 | #define __sn_readq ___sn_readq | ||
33 | #define __sn_readb_relaxed ___sn_readb_relaxed | ||
34 | #define __sn_readw_relaxed ___sn_readw_relaxed | ||
35 | #define __sn_readl_relaxed ___sn_readl_relaxed | ||
36 | #define __sn_readq_relaxed ___sn_readq_relaxed | ||
37 | |||
38 | /* | ||
39 | * Convenience macros for setting/clearing bits using the above accessors | ||
40 | */ | ||
41 | |||
42 | #define __sn_setq_relaxed(addr, val) \ | ||
43 | writeq((__sn_readq_relaxed(addr) | (val)), (addr)) | ||
44 | #define __sn_clrq_relaxed(addr, val) \ | ||
45 | writeq((__sn_readq_relaxed(addr) & ~(val)), (addr)) | ||
46 | |||
47 | /* | ||
48 | * The following routines are SN Platform specific, called when | ||
49 | * a reference is made to inX/outX set macros. SN Platform | ||
50 | * inX set of macros ensures that Posted DMA writes on the | ||
51 | * Bridge is flushed. | ||
52 | * | ||
53 | * The routines should be self explainatory. | ||
54 | */ | ||
55 | |||
56 | static inline unsigned int | ||
57 | ___sn_inb (unsigned long port) | ||
58 | { | ||
59 | volatile unsigned char *addr; | ||
60 | unsigned char ret = -1; | ||
61 | |||
62 | if ((addr = sn_io_addr(port))) { | ||
63 | ret = *addr; | ||
64 | __sn_mf_a(); | ||
65 | sn_dma_flush((unsigned long)addr); | ||
66 | } | ||
67 | return ret; | ||
68 | } | ||
69 | |||
70 | static inline unsigned int | ||
71 | ___sn_inw (unsigned long port) | ||
72 | { | ||
73 | volatile unsigned short *addr; | ||
74 | unsigned short ret = -1; | ||
75 | |||
76 | if ((addr = sn_io_addr(port))) { | ||
77 | ret = *addr; | ||
78 | __sn_mf_a(); | ||
79 | sn_dma_flush((unsigned long)addr); | ||
80 | } | ||
81 | return ret; | ||
82 | } | ||
83 | |||
84 | static inline unsigned int | ||
85 | ___sn_inl (unsigned long port) | ||
86 | { | ||
87 | volatile unsigned int *addr; | ||
88 | unsigned int ret = -1; | ||
89 | |||
90 | if ((addr = sn_io_addr(port))) { | ||
91 | ret = *addr; | ||
92 | __sn_mf_a(); | ||
93 | sn_dma_flush((unsigned long)addr); | ||
94 | } | ||
95 | return ret; | ||
96 | } | ||
97 | |||
98 | static inline void | ||
99 | ___sn_outb (unsigned char val, unsigned long port) | ||
100 | { | ||
101 | volatile unsigned char *addr; | ||
102 | |||
103 | if ((addr = sn_io_addr(port))) { | ||
104 | *addr = val; | ||
105 | __sn_mmiowb(); | ||
106 | } | ||
107 | } | ||
108 | |||
109 | static inline void | ||
110 | ___sn_outw (unsigned short val, unsigned long port) | ||
111 | { | ||
112 | volatile unsigned short *addr; | ||
113 | |||
114 | if ((addr = sn_io_addr(port))) { | ||
115 | *addr = val; | ||
116 | __sn_mmiowb(); | ||
117 | } | ||
118 | } | ||
119 | |||
120 | static inline void | ||
121 | ___sn_outl (unsigned int val, unsigned long port) | ||
122 | { | ||
123 | volatile unsigned int *addr; | ||
124 | |||
125 | if ((addr = sn_io_addr(port))) { | ||
126 | *addr = val; | ||
127 | __sn_mmiowb(); | ||
128 | } | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * The following routines are SN Platform specific, called when | ||
133 | * a reference is made to readX/writeX set macros. SN Platform | ||
134 | * readX set of macros ensures that Posted DMA writes on the | ||
135 | * Bridge is flushed. | ||
136 | * | ||
137 | * The routines should be self explainatory. | ||
138 | */ | ||
139 | |||
140 | static inline unsigned char | ||
141 | ___sn_readb (const volatile void __iomem *addr) | ||
142 | { | ||
143 | unsigned char val; | ||
144 | |||
145 | val = *(volatile unsigned char __force *)addr; | ||
146 | __sn_mf_a(); | ||
147 | sn_dma_flush((unsigned long)addr); | ||
148 | return val; | ||
149 | } | ||
150 | |||
151 | static inline unsigned short | ||
152 | ___sn_readw (const volatile void __iomem *addr) | ||
153 | { | ||
154 | unsigned short val; | ||
155 | |||
156 | val = *(volatile unsigned short __force *)addr; | ||
157 | __sn_mf_a(); | ||
158 | sn_dma_flush((unsigned long)addr); | ||
159 | return val; | ||
160 | } | ||
161 | |||
162 | static inline unsigned int | ||
163 | ___sn_readl (const volatile void __iomem *addr) | ||
164 | { | ||
165 | unsigned int val; | ||
166 | |||
167 | val = *(volatile unsigned int __force *)addr; | ||
168 | __sn_mf_a(); | ||
169 | sn_dma_flush((unsigned long)addr); | ||
170 | return val; | ||
171 | } | ||
172 | |||
173 | static inline unsigned long | ||
174 | ___sn_readq (const volatile void __iomem *addr) | ||
175 | { | ||
176 | unsigned long val; | ||
177 | |||
178 | val = *(volatile unsigned long __force *)addr; | ||
179 | __sn_mf_a(); | ||
180 | sn_dma_flush((unsigned long)addr); | ||
181 | return val; | ||
182 | } | ||
183 | |||
184 | /* | ||
185 | * For generic and SN2 kernels, we have a set of fast access | ||
186 | * PIO macros. These macros are provided on SN Platform | ||
187 | * because the normal inX and readX macros perform an | ||
188 | * additional task of flushing Post DMA request on the Bridge. | ||
189 | * | ||
190 | * These routines should be self explainatory. | ||
191 | */ | ||
192 | |||
193 | static inline unsigned int | ||
194 | sn_inb_fast (unsigned long port) | ||
195 | { | ||
196 | volatile unsigned char *addr = (unsigned char *)port; | ||
197 | unsigned char ret; | ||
198 | |||
199 | ret = *addr; | ||
200 | __sn_mf_a(); | ||
201 | return ret; | ||
202 | } | ||
203 | |||
204 | static inline unsigned int | ||
205 | sn_inw_fast (unsigned long port) | ||
206 | { | ||
207 | volatile unsigned short *addr = (unsigned short *)port; | ||
208 | unsigned short ret; | ||
209 | |||
210 | ret = *addr; | ||
211 | __sn_mf_a(); | ||
212 | return ret; | ||
213 | } | ||
214 | |||
215 | static inline unsigned int | ||
216 | sn_inl_fast (unsigned long port) | ||
217 | { | ||
218 | volatile unsigned int *addr = (unsigned int *)port; | ||
219 | unsigned int ret; | ||
220 | |||
221 | ret = *addr; | ||
222 | __sn_mf_a(); | ||
223 | return ret; | ||
224 | } | ||
225 | |||
226 | static inline unsigned char | ||
227 | ___sn_readb_relaxed (const volatile void __iomem *addr) | ||
228 | { | ||
229 | return *(volatile unsigned char __force *)addr; | ||
230 | } | ||
231 | |||
232 | static inline unsigned short | ||
233 | ___sn_readw_relaxed (const volatile void __iomem *addr) | ||
234 | { | ||
235 | return *(volatile unsigned short __force *)addr; | ||
236 | } | ||
237 | |||
238 | static inline unsigned int | ||
239 | ___sn_readl_relaxed (const volatile void __iomem *addr) | ||
240 | { | ||
241 | return *(volatile unsigned int __force *) addr; | ||
242 | } | ||
243 | |||
244 | static inline unsigned long | ||
245 | ___sn_readq_relaxed (const volatile void __iomem *addr) | ||
246 | { | ||
247 | return *(volatile unsigned long __force *) addr; | ||
248 | } | ||
249 | |||
250 | struct pci_dev; | ||
251 | |||
252 | static inline int | ||
253 | sn_pci_set_vchan(struct pci_dev *pci_dev, unsigned long *addr, int vchan) | ||
254 | { | ||
255 | |||
256 | if (vchan > 1) { | ||
257 | return -1; | ||
258 | } | ||
259 | |||
260 | if (!(*addr >> 32)) /* Using a mask here would be cleaner */ | ||
261 | return 0; /* but this generates better code */ | ||
262 | |||
263 | if (vchan == 1) { | ||
264 | /* Set Bit 57 */ | ||
265 | *addr |= (1UL << 57); | ||
266 | } else { | ||
267 | /* Clear Bit 57 */ | ||
268 | *addr &= ~(1UL << 57); | ||
269 | } | ||
270 | |||
271 | return 0; | ||
272 | } | ||
273 | |||
274 | #endif /* _ASM_SN_IO_H */ | ||
diff --git a/arch/ia64/include/asm/sn/ioc3.h b/arch/ia64/include/asm/sn/ioc3.h deleted file mode 100644 index d4a524951df3..000000000000 --- a/arch/ia64/include/asm/sn/ioc3.h +++ /dev/null | |||
@@ -1,242 +0,0 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* | ||
3 | * Copyright (C) 2005 Silicon Graphics, Inc. | ||
4 | */ | ||
5 | #ifndef IA64_SN_IOC3_H | ||
6 | #define IA64_SN_IOC3_H | ||
7 | |||
8 | /* serial port register map */ | ||
9 | struct ioc3_serialregs { | ||
10 | uint32_t sscr; | ||
11 | uint32_t stpir; | ||
12 | uint32_t stcir; | ||
13 | uint32_t srpir; | ||
14 | uint32_t srcir; | ||
15 | uint32_t srtr; | ||
16 | uint32_t shadow; | ||
17 | }; | ||
18 | |||
19 | /* SUPERIO uart register map */ | ||
20 | struct ioc3_uartregs { | ||
21 | char iu_lcr; | ||
22 | union { | ||
23 | char iir; /* read only */ | ||
24 | char fcr; /* write only */ | ||
25 | } u3; | ||
26 | union { | ||
27 | char ier; /* DLAB == 0 */ | ||
28 | char dlm; /* DLAB == 1 */ | ||
29 | } u2; | ||
30 | union { | ||
31 | char rbr; /* read only, DLAB == 0 */ | ||
32 | char thr; /* write only, DLAB == 0 */ | ||
33 | char dll; /* DLAB == 1 */ | ||
34 | } u1; | ||
35 | char iu_scr; | ||
36 | char iu_msr; | ||
37 | char iu_lsr; | ||
38 | char iu_mcr; | ||
39 | }; | ||
40 | |||
41 | #define iu_rbr u1.rbr | ||
42 | #define iu_thr u1.thr | ||
43 | #define iu_dll u1.dll | ||
44 | #define iu_ier u2.ier | ||
45 | #define iu_dlm u2.dlm | ||
46 | #define iu_iir u3.iir | ||
47 | #define iu_fcr u3.fcr | ||
48 | |||
49 | struct ioc3_sioregs { | ||
50 | char fill[0x170]; | ||
51 | struct ioc3_uartregs uartb; | ||
52 | struct ioc3_uartregs uarta; | ||
53 | }; | ||
54 | |||
55 | /* PCI IO/mem space register map */ | ||
56 | struct ioc3 { | ||
57 | uint32_t pci_id; | ||
58 | uint32_t pci_scr; | ||
59 | uint32_t pci_rev; | ||
60 | uint32_t pci_lat; | ||
61 | uint32_t pci_addr; | ||
62 | uint32_t pci_err_addr_l; | ||
63 | uint32_t pci_err_addr_h; | ||
64 | |||
65 | uint32_t sio_ir; | ||
66 | /* these registers are read-only for general kernel code. To | ||
67 | * modify them use the functions in ioc3.c | ||
68 | */ | ||
69 | uint32_t sio_ies; | ||
70 | uint32_t sio_iec; | ||
71 | uint32_t sio_cr; | ||
72 | uint32_t int_out; | ||
73 | uint32_t mcr; | ||
74 | uint32_t gpcr_s; | ||
75 | uint32_t gpcr_c; | ||
76 | uint32_t gpdr; | ||
77 | uint32_t gppr[9]; | ||
78 | char fill[0x4c]; | ||
79 | |||
80 | /* serial port registers */ | ||
81 | uint32_t sbbr_h; | ||
82 | uint32_t sbbr_l; | ||
83 | |||
84 | struct ioc3_serialregs port_a; | ||
85 | struct ioc3_serialregs port_b; | ||
86 | char fill1[0x1ff10]; | ||
87 | /* superio registers */ | ||
88 | struct ioc3_sioregs sregs; | ||
89 | }; | ||
90 | |||
91 | /* These don't exist on the ioc3 serial card... */ | ||
92 | #define eier fill1[8] | ||
93 | #define eisr fill1[4] | ||
94 | |||
95 | #define PCI_LAT 0xc /* Latency Timer */ | ||
96 | #define PCI_SCR_DROP_MODE_EN 0x00008000 /* drop pios on parity err */ | ||
97 | #define UARTA_BASE 0x178 | ||
98 | #define UARTB_BASE 0x170 | ||
99 | |||
100 | |||
101 | /* bitmasks for serial RX status byte */ | ||
102 | #define RXSB_OVERRUN 0x01 /* char(s) lost */ | ||
103 | #define RXSB_PAR_ERR 0x02 /* parity error */ | ||
104 | #define RXSB_FRAME_ERR 0x04 /* framing error */ | ||
105 | #define RXSB_BREAK 0x08 /* break character */ | ||
106 | #define RXSB_CTS 0x10 /* state of CTS */ | ||
107 | #define RXSB_DCD 0x20 /* state of DCD */ | ||
108 | #define RXSB_MODEM_VALID 0x40 /* DCD, CTS and OVERRUN are valid */ | ||
109 | #define RXSB_DATA_VALID 0x80 /* FRAME_ERR PAR_ERR & BREAK valid */ | ||
110 | |||
111 | /* bitmasks for serial TX control byte */ | ||
112 | #define TXCB_INT_WHEN_DONE 0x20 /* interrupt after this byte is sent */ | ||
113 | #define TXCB_INVALID 0x00 /* byte is invalid */ | ||
114 | #define TXCB_VALID 0x40 /* byte is valid */ | ||
115 | #define TXCB_MCR 0x80 /* data<7:0> to modem cntrl register */ | ||
116 | #define TXCB_DELAY 0xc0 /* delay data<7:0> mSec */ | ||
117 | |||
118 | /* bitmasks for SBBR_L */ | ||
119 | #define SBBR_L_SIZE 0x00000001 /* 0 1KB rings, 1 4KB rings */ | ||
120 | |||
121 | /* bitmasks for SSCR_<A:B> */ | ||
122 | #define SSCR_RX_THRESHOLD 0x000001ff /* hiwater mark */ | ||
123 | #define SSCR_TX_TIMER_BUSY 0x00010000 /* TX timer in progress */ | ||
124 | #define SSCR_HFC_EN 0x00020000 /* h/w flow cntrl enabled */ | ||
125 | #define SSCR_RX_RING_DCD 0x00040000 /* postRX record on delta-DCD */ | ||
126 | #define SSCR_RX_RING_CTS 0x00080000 /* postRX record on delta-CTS */ | ||
127 | #define SSCR_HIGH_SPD 0x00100000 /* 4X speed */ | ||
128 | #define SSCR_DIAG 0x00200000 /* bypass clock divider */ | ||
129 | #define SSCR_RX_DRAIN 0x08000000 /* drain RX buffer to memory */ | ||
130 | #define SSCR_DMA_EN 0x10000000 /* enable ring buffer DMA */ | ||
131 | #define SSCR_DMA_PAUSE 0x20000000 /* pause DMA */ | ||
132 | #define SSCR_PAUSE_STATE 0x40000000 /* set when PAUSE takes effect*/ | ||
133 | #define SSCR_RESET 0x80000000 /* reset DMA channels */ | ||
134 | |||
135 | /* all producer/consumer pointers are the same bitfield */ | ||
136 | #define PROD_CONS_PTR_4K 0x00000ff8 /* for 4K buffers */ | ||
137 | #define PROD_CONS_PTR_1K 0x000003f8 /* for 1K buffers */ | ||
138 | #define PROD_CONS_PTR_OFF 3 | ||
139 | |||
140 | /* bitmasks for SRCIR_<A:B> */ | ||
141 | #define SRCIR_ARM 0x80000000 /* arm RX timer */ | ||
142 | |||
143 | /* bitmasks for SHADOW_<A:B> */ | ||
144 | #define SHADOW_DR 0x00000001 /* data ready */ | ||
145 | #define SHADOW_OE 0x00000002 /* overrun error */ | ||
146 | #define SHADOW_PE 0x00000004 /* parity error */ | ||
147 | #define SHADOW_FE 0x00000008 /* framing error */ | ||
148 | #define SHADOW_BI 0x00000010 /* break interrupt */ | ||
149 | #define SHADOW_THRE 0x00000020 /* transmit holding reg empty */ | ||
150 | #define SHADOW_TEMT 0x00000040 /* transmit shift reg empty */ | ||
151 | #define SHADOW_RFCE 0x00000080 /* char in RX fifo has error */ | ||
152 | #define SHADOW_DCTS 0x00010000 /* delta clear to send */ | ||
153 | #define SHADOW_DDCD 0x00080000 /* delta data carrier detect */ | ||
154 | #define SHADOW_CTS 0x00100000 /* clear to send */ | ||
155 | #define SHADOW_DCD 0x00800000 /* data carrier detect */ | ||
156 | #define SHADOW_DTR 0x01000000 /* data terminal ready */ | ||
157 | #define SHADOW_RTS 0x02000000 /* request to send */ | ||
158 | #define SHADOW_OUT1 0x04000000 /* 16550 OUT1 bit */ | ||
159 | #define SHADOW_OUT2 0x08000000 /* 16550 OUT2 bit */ | ||
160 | #define SHADOW_LOOP 0x10000000 /* loopback enabled */ | ||
161 | |||
162 | /* bitmasks for SRTR_<A:B> */ | ||
163 | #define SRTR_CNT 0x00000fff /* reload value for RX timer */ | ||
164 | #define SRTR_CNT_VAL 0x0fff0000 /* current value of RX timer */ | ||
165 | #define SRTR_CNT_VAL_SHIFT 16 | ||
166 | #define SRTR_HZ 16000 /* SRTR clock frequency */ | ||
167 | |||
168 | /* bitmasks for SIO_IR, SIO_IEC and SIO_IES */ | ||
169 | #define SIO_IR_SA_TX_MT 0x00000001 /* Serial port A TX empty */ | ||
170 | #define SIO_IR_SA_RX_FULL 0x00000002 /* port A RX buf full */ | ||
171 | #define SIO_IR_SA_RX_HIGH 0x00000004 /* port A RX hiwat */ | ||
172 | #define SIO_IR_SA_RX_TIMER 0x00000008 /* port A RX timeout */ | ||
173 | #define SIO_IR_SA_DELTA_DCD 0x00000010 /* port A delta DCD */ | ||
174 | #define SIO_IR_SA_DELTA_CTS 0x00000020 /* port A delta CTS */ | ||
175 | #define SIO_IR_SA_INT 0x00000040 /* port A pass-thru intr */ | ||
176 | #define SIO_IR_SA_TX_EXPLICIT 0x00000080 /* port A explicit TX thru */ | ||
177 | #define SIO_IR_SA_MEMERR 0x00000100 /* port A PCI error */ | ||
178 | #define SIO_IR_SB_TX_MT 0x00000200 | ||
179 | #define SIO_IR_SB_RX_FULL 0x00000400 | ||
180 | #define SIO_IR_SB_RX_HIGH 0x00000800 | ||
181 | #define SIO_IR_SB_RX_TIMER 0x00001000 | ||
182 | #define SIO_IR_SB_DELTA_DCD 0x00002000 | ||
183 | #define SIO_IR_SB_DELTA_CTS 0x00004000 | ||
184 | #define SIO_IR_SB_INT 0x00008000 | ||
185 | #define SIO_IR_SB_TX_EXPLICIT 0x00010000 | ||
186 | #define SIO_IR_SB_MEMERR 0x00020000 | ||
187 | #define SIO_IR_PP_INT 0x00040000 /* P port pass-thru intr */ | ||
188 | #define SIO_IR_PP_INTA 0x00080000 /* PP context A thru */ | ||
189 | #define SIO_IR_PP_INTB 0x00100000 /* PP context B thru */ | ||
190 | #define SIO_IR_PP_MEMERR 0x00200000 /* PP PCI error */ | ||
191 | #define SIO_IR_KBD_INT 0x00400000 /* kbd/mouse intr */ | ||
192 | #define SIO_IR_RT_INT 0x08000000 /* RT output pulse */ | ||
193 | #define SIO_IR_GEN_INT1 0x10000000 /* RT input pulse */ | ||
194 | #define SIO_IR_GEN_INT_SHIFT 28 | ||
195 | |||
196 | /* per device interrupt masks */ | ||
197 | #define SIO_IR_SA (SIO_IR_SA_TX_MT | \ | ||
198 | SIO_IR_SA_RX_FULL | \ | ||
199 | SIO_IR_SA_RX_HIGH | \ | ||
200 | SIO_IR_SA_RX_TIMER | \ | ||
201 | SIO_IR_SA_DELTA_DCD | \ | ||
202 | SIO_IR_SA_DELTA_CTS | \ | ||
203 | SIO_IR_SA_INT | \ | ||
204 | SIO_IR_SA_TX_EXPLICIT | \ | ||
205 | SIO_IR_SA_MEMERR) | ||
206 | |||
207 | #define SIO_IR_SB (SIO_IR_SB_TX_MT | \ | ||
208 | SIO_IR_SB_RX_FULL | \ | ||
209 | SIO_IR_SB_RX_HIGH | \ | ||
210 | SIO_IR_SB_RX_TIMER | \ | ||
211 | SIO_IR_SB_DELTA_DCD | \ | ||
212 | SIO_IR_SB_DELTA_CTS | \ | ||
213 | SIO_IR_SB_INT | \ | ||
214 | SIO_IR_SB_TX_EXPLICIT | \ | ||
215 | SIO_IR_SB_MEMERR) | ||
216 | |||
217 | #define SIO_IR_PP (SIO_IR_PP_INT | SIO_IR_PP_INTA | \ | ||
218 | SIO_IR_PP_INTB | SIO_IR_PP_MEMERR) | ||
219 | #define SIO_IR_RT (SIO_IR_RT_INT | SIO_IR_GEN_INT1) | ||
220 | |||
221 | /* bitmasks for SIO_CR */ | ||
222 | #define SIO_CR_CMD_PULSE_SHIFT 15 | ||
223 | #define SIO_CR_SER_A_BASE_SHIFT 1 | ||
224 | #define SIO_CR_SER_B_BASE_SHIFT 8 | ||
225 | #define SIO_CR_ARB_DIAG 0x00380000 /* cur !enet PCI requet (ro) */ | ||
226 | #define SIO_CR_ARB_DIAG_TXA 0x00000000 | ||
227 | #define SIO_CR_ARB_DIAG_RXA 0x00080000 | ||
228 | #define SIO_CR_ARB_DIAG_TXB 0x00100000 | ||
229 | #define SIO_CR_ARB_DIAG_RXB 0x00180000 | ||
230 | #define SIO_CR_ARB_DIAG_PP 0x00200000 | ||
231 | #define SIO_CR_ARB_DIAG_IDLE 0x00400000 /* 0 -> active request (ro) */ | ||
232 | |||
233 | /* defs for some of the generic I/O pins */ | ||
234 | #define GPCR_PHY_RESET 0x20 /* pin is output to PHY reset */ | ||
235 | #define GPCR_UARTB_MODESEL 0x40 /* pin is output to port B mode sel */ | ||
236 | #define GPCR_UARTA_MODESEL 0x80 /* pin is output to port A mode sel */ | ||
237 | |||
238 | #define GPPR_PHY_RESET_PIN 5 /* GIO pin controlling phy reset */ | ||
239 | #define GPPR_UARTB_MODESEL_PIN 6 /* GIO pin cntrling uartb modeselect */ | ||
240 | #define GPPR_UARTA_MODESEL_PIN 7 /* GIO pin cntrling uarta modeselect */ | ||
241 | |||
242 | #endif /* IA64_SN_IOC3_H */ | ||
diff --git a/arch/ia64/include/asm/sn/klconfig.h b/arch/ia64/include/asm/sn/klconfig.h deleted file mode 100644 index bcbf209d63be..000000000000 --- a/arch/ia64/include/asm/sn/klconfig.h +++ /dev/null | |||
@@ -1,246 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Derived from IRIX <sys/SN/klconfig.h>. | ||
7 | * | ||
8 | * Copyright (C) 1992-1997,1999,2001-2004 Silicon Graphics, Inc. All Rights Reserved. | ||
9 | * Copyright (C) 1999 by Ralf Baechle | ||
10 | */ | ||
11 | #ifndef _ASM_IA64_SN_KLCONFIG_H | ||
12 | #define _ASM_IA64_SN_KLCONFIG_H | ||
13 | |||
14 | /* | ||
15 | * The KLCONFIG structures store info about the various BOARDs found | ||
16 | * during Hardware Discovery. In addition, it stores info about the | ||
17 | * components found on the BOARDs. | ||
18 | */ | ||
19 | |||
20 | typedef s32 klconf_off_t; | ||
21 | |||
22 | |||
23 | /* Functions/macros needed to use this structure */ | ||
24 | |||
25 | typedef struct kl_config_hdr { | ||
26 | char pad[20]; | ||
27 | klconf_off_t ch_board_info; /* the link list of boards */ | ||
28 | char pad0[88]; | ||
29 | } kl_config_hdr_t; | ||
30 | |||
31 | |||
32 | #define NODE_OFFSET_TO_LBOARD(nasid,off) (lboard_t*)(GLOBAL_CAC_ADDR((nasid), (off))) | ||
33 | |||
34 | /* | ||
35 | * The KLCONFIG area is organized as a LINKED LIST of BOARDs. A BOARD | ||
36 | * can be either 'LOCAL' or 'REMOTE'. LOCAL means it is attached to | ||
37 | * the LOCAL/current NODE. REMOTE means it is attached to a different | ||
38 | * node.(TBD - Need a way to treat ROUTER boards.) | ||
39 | * | ||
40 | * There are 2 different structures to represent these boards - | ||
41 | * lboard - Local board, rboard - remote board. These 2 structures | ||
42 | * can be arbitrarily mixed in the LINKED LIST of BOARDs. (Refer | ||
43 | * Figure below). The first byte of the rboard or lboard structure | ||
44 | * is used to find out its type - no unions are used. | ||
45 | * If it is a lboard, then the config info of this board will be found | ||
46 | * on the local node. (LOCAL NODE BASE + offset value gives pointer to | ||
47 | * the structure. | ||
48 | * If it is a rboard, the local structure contains the node number | ||
49 | * and the offset of the beginning of the LINKED LIST on the remote node. | ||
50 | * The details of the hardware on a remote node can be built locally, | ||
51 | * if required, by reading the LINKED LIST on the remote node and | ||
52 | * ignoring all the rboards on that node. | ||
53 | * | ||
54 | * The local node uses the REMOTE NODE NUMBER + OFFSET to point to the | ||
55 | * First board info on the remote node. The remote node list is | ||
56 | * traversed as the local list, using the REMOTE BASE ADDRESS and not | ||
57 | * the local base address and ignoring all rboard values. | ||
58 | * | ||
59 | * | ||
60 | KLCONFIG | ||
61 | |||
62 | +------------+ +------------+ +------------+ +------------+ | ||
63 | | lboard | +-->| lboard | +-->| rboard | +-->| lboard | | ||
64 | +------------+ | +------------+ | +------------+ | +------------+ | ||
65 | | board info | | | board info | | |errinfo,bptr| | | board info | | ||
66 | +------------+ | +------------+ | +------------+ | +------------+ | ||
67 | | offset |--+ | offset |--+ | offset |--+ |offset=NULL | | ||
68 | +------------+ +------------+ +------------+ +------------+ | ||
69 | |||
70 | |||
71 | +------------+ | ||
72 | | board info | | ||
73 | +------------+ +--------------------------------+ | ||
74 | | compt 1 |------>| type, rev, diaginfo, size ... | (CPU) | ||
75 | +------------+ +--------------------------------+ | ||
76 | | compt 2 |--+ | ||
77 | +------------+ | +--------------------------------+ | ||
78 | | ... | +--->| type, rev, diaginfo, size ... | (MEM_BANK) | ||
79 | +------------+ +--------------------------------+ | ||
80 | | errinfo |--+ | ||
81 | +------------+ | +--------------------------------+ | ||
82 | +--->|r/l brd errinfo,compt err flags | | ||
83 | +--------------------------------+ | ||
84 | |||
85 | * | ||
86 | * Each BOARD consists of COMPONENTs and the BOARD structure has | ||
87 | * pointers (offsets) to its COMPONENT structure. | ||
88 | * The COMPONENT structure has version info, size and speed info, revision, | ||
89 | * error info and the NIC info. This structure can accommodate any | ||
90 | * BOARD with arbitrary COMPONENT composition. | ||
91 | * | ||
92 | * The ERRORINFO part of each BOARD has error information | ||
93 | * that describes errors about the BOARD itself. It also has flags to | ||
94 | * indicate the COMPONENT(s) on the board that have errors. The error | ||
95 | * information specific to the COMPONENT is present in the respective | ||
96 | * COMPONENT structure. | ||
97 | * | ||
98 | * The ERRORINFO structure is also treated like a COMPONENT, ie. the | ||
99 | * BOARD has pointers(offset) to the ERRORINFO structure. The rboard | ||
100 | * structure also has a pointer to the ERRORINFO structure. This is | ||
101 | * the place to store ERRORINFO about a REMOTE NODE, if the HUB on | ||
102 | * that NODE is not working or if the REMOTE MEMORY is BAD. In cases where | ||
103 | * only the CPU of the REMOTE NODE is disabled, the ERRORINFO pointer can | ||
104 | * be a NODE NUMBER, REMOTE OFFSET combination, pointing to error info | ||
105 | * which is present on the REMOTE NODE.(TBD) | ||
106 | * REMOTE ERRINFO can be stored on any of the nearest nodes | ||
107 | * or on all the nearest nodes.(TBD) | ||
108 | * Like BOARD structures, REMOTE ERRINFO structures can be built locally | ||
109 | * using the rboard errinfo pointer. | ||
110 | * | ||
111 | * In order to get useful information from this Data organization, a set of | ||
112 | * interface routines are provided (TBD). The important thing to remember while | ||
113 | * manipulating the structures, is that, the NODE number information should | ||
114 | * be used. If the NODE is non-zero (remote) then each offset should | ||
115 | * be added to the REMOTE BASE ADDR else it should be added to the LOCAL BASE ADDR. | ||
116 | * This includes offsets for BOARDS, COMPONENTS and ERRORINFO. | ||
117 | * | ||
118 | * Note that these structures do not provide much info about connectivity. | ||
119 | * That info will be part of HWGRAPH, which is an extension of the cfg_t | ||
120 | * data structure. (ref IP27prom/cfg.h) It has to be extended to include | ||
121 | * the IO part of the Network(TBD). | ||
122 | * | ||
123 | * The data structures below define the above concepts. | ||
124 | */ | ||
125 | |||
126 | |||
127 | /* | ||
128 | * BOARD classes | ||
129 | */ | ||
130 | |||
131 | #define KLCLASS_MASK 0xf0 | ||
132 | #define KLCLASS_NONE 0x00 | ||
133 | #define KLCLASS_NODE 0x10 /* CPU, Memory and HUB board */ | ||
134 | #define KLCLASS_CPU KLCLASS_NODE | ||
135 | #define KLCLASS_IO 0x20 /* BaseIO, 4 ch SCSI, ethernet, FDDI | ||
136 | and the non-graphics widget boards */ | ||
137 | #define KLCLASS_ROUTER 0x30 /* Router board */ | ||
138 | #define KLCLASS_MIDPLANE 0x40 /* We need to treat this as a board | ||
139 | so that we can record error info */ | ||
140 | #define KLCLASS_IOBRICK 0x70 /* IP35 iobrick */ | ||
141 | #define KLCLASS_MAX 8 /* Bump this if a new CLASS is added */ | ||
142 | |||
143 | #define KLCLASS(_x) ((_x) & KLCLASS_MASK) | ||
144 | |||
145 | |||
146 | /* | ||
147 | * board types | ||
148 | */ | ||
149 | |||
150 | #define KLTYPE_MASK 0x0f | ||
151 | #define KLTYPE(_x) ((_x) & KLTYPE_MASK) | ||
152 | |||
153 | #define KLTYPE_SNIA (KLCLASS_CPU | 0x1) | ||
154 | #define KLTYPE_TIO (KLCLASS_CPU | 0x2) | ||
155 | |||
156 | #define KLTYPE_ROUTER (KLCLASS_ROUTER | 0x1) | ||
157 | #define KLTYPE_META_ROUTER (KLCLASS_ROUTER | 0x3) | ||
158 | #define KLTYPE_REPEATER_ROUTER (KLCLASS_ROUTER | 0x4) | ||
159 | |||
160 | #define KLTYPE_IOBRICK_XBOW (KLCLASS_MIDPLANE | 0x2) | ||
161 | |||
162 | #define KLTYPE_IOBRICK (KLCLASS_IOBRICK | 0x0) | ||
163 | #define KLTYPE_NBRICK (KLCLASS_IOBRICK | 0x4) | ||
164 | #define KLTYPE_PXBRICK (KLCLASS_IOBRICK | 0x6) | ||
165 | #define KLTYPE_IXBRICK (KLCLASS_IOBRICK | 0x7) | ||
166 | #define KLTYPE_CGBRICK (KLCLASS_IOBRICK | 0x8) | ||
167 | #define KLTYPE_OPUSBRICK (KLCLASS_IOBRICK | 0x9) | ||
168 | #define KLTYPE_SABRICK (KLCLASS_IOBRICK | 0xa) | ||
169 | #define KLTYPE_IABRICK (KLCLASS_IOBRICK | 0xb) | ||
170 | #define KLTYPE_PABRICK (KLCLASS_IOBRICK | 0xc) | ||
171 | #define KLTYPE_GABRICK (KLCLASS_IOBRICK | 0xd) | ||
172 | |||
173 | |||
174 | /* | ||
175 | * board structures | ||
176 | */ | ||
177 | |||
178 | #define MAX_COMPTS_PER_BRD 24 | ||
179 | |||
180 | typedef struct lboard_s { | ||
181 | klconf_off_t brd_next_any; /* Next BOARD */ | ||
182 | unsigned char struct_type; /* type of structure, local or remote */ | ||
183 | unsigned char brd_type; /* type+class */ | ||
184 | unsigned char brd_sversion; /* version of this structure */ | ||
185 | unsigned char brd_brevision; /* board revision */ | ||
186 | unsigned char brd_promver; /* board prom version, if any */ | ||
187 | unsigned char brd_flags; /* Enabled, Disabled etc */ | ||
188 | unsigned char brd_slot; /* slot number */ | ||
189 | unsigned short brd_debugsw; /* Debug switches */ | ||
190 | geoid_t brd_geoid; /* geo id */ | ||
191 | partid_t brd_partition; /* Partition number */ | ||
192 | unsigned short brd_diagval; /* diagnostic value */ | ||
193 | unsigned short brd_diagparm; /* diagnostic parameter */ | ||
194 | unsigned char brd_inventory; /* inventory history */ | ||
195 | unsigned char brd_numcompts; /* Number of components */ | ||
196 | nic_t brd_nic; /* Number in CAN */ | ||
197 | nasid_t brd_nasid; /* passed parameter */ | ||
198 | klconf_off_t brd_compts[MAX_COMPTS_PER_BRD]; /* pointers to COMPONENTS */ | ||
199 | klconf_off_t brd_errinfo; /* Board's error information */ | ||
200 | struct lboard_s *brd_parent; /* Logical parent for this brd */ | ||
201 | char pad0[4]; | ||
202 | unsigned char brd_confidence; /* confidence that the board is bad */ | ||
203 | nasid_t brd_owner; /* who owns this board */ | ||
204 | unsigned char brd_nic_flags; /* To handle 8 more NICs */ | ||
205 | char pad1[24]; /* future expansion */ | ||
206 | char brd_name[32]; | ||
207 | nasid_t brd_next_same_host; /* host of next brd w/same nasid */ | ||
208 | klconf_off_t brd_next_same; /* Next BOARD with same nasid */ | ||
209 | } lboard_t; | ||
210 | |||
211 | /* | ||
212 | * Generic info structure. This stores common info about a | ||
213 | * component. | ||
214 | */ | ||
215 | |||
216 | typedef struct klinfo_s { /* Generic info */ | ||
217 | unsigned char struct_type; /* type of this structure */ | ||
218 | unsigned char struct_version; /* version of this structure */ | ||
219 | unsigned char flags; /* Enabled, disabled etc */ | ||
220 | unsigned char revision; /* component revision */ | ||
221 | unsigned short diagval; /* result of diagnostics */ | ||
222 | unsigned short diagparm; /* diagnostic parameter */ | ||
223 | unsigned char inventory; /* previous inventory status */ | ||
224 | unsigned short partid; /* widget part number */ | ||
225 | nic_t nic; /* MUst be aligned properly */ | ||
226 | unsigned char physid; /* physical id of component */ | ||
227 | unsigned int virtid; /* virtual id as seen by system */ | ||
228 | unsigned char widid; /* Widget id - if applicable */ | ||
229 | nasid_t nasid; /* node number - from parent */ | ||
230 | char pad1; /* pad out structure. */ | ||
231 | char pad2; /* pad out structure. */ | ||
232 | void *data; | ||
233 | klconf_off_t errinfo; /* component specific errors */ | ||
234 | unsigned short pad3; /* pci fields have moved over to */ | ||
235 | unsigned short pad4; /* klbri_t */ | ||
236 | } klinfo_t ; | ||
237 | |||
238 | |||
239 | static inline lboard_t *find_lboard_next(lboard_t * brd) | ||
240 | { | ||
241 | if (brd && brd->brd_next_any) | ||
242 | return NODE_OFFSET_TO_LBOARD(NASID_GET(brd), brd->brd_next_any); | ||
243 | return NULL; | ||
244 | } | ||
245 | |||
246 | #endif /* _ASM_IA64_SN_KLCONFIG_H */ | ||
diff --git a/arch/ia64/include/asm/sn/l1.h b/arch/ia64/include/asm/sn/l1.h deleted file mode 100644 index 344bf44bb356..000000000000 --- a/arch/ia64/include/asm/sn/l1.h +++ /dev/null | |||
@@ -1,51 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_IA64_SN_L1_H | ||
10 | #define _ASM_IA64_SN_L1_H | ||
11 | |||
12 | /* brick type response codes */ | ||
13 | #define L1_BRICKTYPE_PX 0x23 /* # */ | ||
14 | #define L1_BRICKTYPE_PE 0x25 /* % */ | ||
15 | #define L1_BRICKTYPE_N_p0 0x26 /* & */ | ||
16 | #define L1_BRICKTYPE_IP45 0x34 /* 4 */ | ||
17 | #define L1_BRICKTYPE_IP41 0x35 /* 5 */ | ||
18 | #define L1_BRICKTYPE_TWISTER 0x36 /* 6 */ /* IP53 & ROUTER */ | ||
19 | #define L1_BRICKTYPE_IX 0x3d /* = */ | ||
20 | #define L1_BRICKTYPE_IP34 0x61 /* a */ | ||
21 | #define L1_BRICKTYPE_GA 0x62 /* b */ | ||
22 | #define L1_BRICKTYPE_C 0x63 /* c */ | ||
23 | #define L1_BRICKTYPE_OPUS_TIO 0x66 /* f */ | ||
24 | #define L1_BRICKTYPE_I 0x69 /* i */ | ||
25 | #define L1_BRICKTYPE_N 0x6e /* n */ | ||
26 | #define L1_BRICKTYPE_OPUS 0x6f /* o */ | ||
27 | #define L1_BRICKTYPE_P 0x70 /* p */ | ||
28 | #define L1_BRICKTYPE_R 0x72 /* r */ | ||
29 | #define L1_BRICKTYPE_CHI_CG 0x76 /* v */ | ||
30 | #define L1_BRICKTYPE_X 0x78 /* x */ | ||
31 | #define L1_BRICKTYPE_X2 0x79 /* y */ | ||
32 | #define L1_BRICKTYPE_SA 0x5e /* ^ */ | ||
33 | #define L1_BRICKTYPE_PA 0x6a /* j */ | ||
34 | #define L1_BRICKTYPE_IA 0x6b /* k */ | ||
35 | #define L1_BRICKTYPE_ATHENA 0x2b /* + */ | ||
36 | #define L1_BRICKTYPE_DAYTONA 0x7a /* z */ | ||
37 | #define L1_BRICKTYPE_1932 0x2c /* . */ | ||
38 | #define L1_BRICKTYPE_191010 0x2e /* , */ | ||
39 | |||
40 | /* board type response codes */ | ||
41 | #define L1_BOARDTYPE_IP69 0x0100 /* CA */ | ||
42 | #define L1_BOARDTYPE_IP63 0x0200 /* CB */ | ||
43 | #define L1_BOARDTYPE_BASEIO 0x0300 /* IB */ | ||
44 | #define L1_BOARDTYPE_PCIE2SLOT 0x0400 /* IC */ | ||
45 | #define L1_BOARDTYPE_PCIX3SLOT 0x0500 /* ID */ | ||
46 | #define L1_BOARDTYPE_PCIXPCIE4SLOT 0x0600 /* IE */ | ||
47 | #define L1_BOARDTYPE_ABACUS 0x0700 /* AB */ | ||
48 | #define L1_BOARDTYPE_DAYTONA 0x0800 /* AD */ | ||
49 | #define L1_BOARDTYPE_INVAL (-1) /* invalid brick type */ | ||
50 | |||
51 | #endif /* _ASM_IA64_SN_L1_H */ | ||
diff --git a/arch/ia64/include/asm/sn/leds.h b/arch/ia64/include/asm/sn/leds.h deleted file mode 100644 index 66cf8c4d92c9..000000000000 --- a/arch/ia64/include/asm/sn/leds.h +++ /dev/null | |||
@@ -1,33 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved. | ||
6 | */ | ||
7 | #ifndef _ASM_IA64_SN_LEDS_H | ||
8 | #define _ASM_IA64_SN_LEDS_H | ||
9 | |||
10 | #include <asm/sn/addrs.h> | ||
11 | #include <asm/sn/pda.h> | ||
12 | #include <asm/sn/shub_mmr.h> | ||
13 | |||
14 | #define LED0 (LOCAL_MMR_ADDR(SH_REAL_JUNK_BUS_LED0)) | ||
15 | #define LED_CPU_SHIFT 16 | ||
16 | |||
17 | #define LED_CPU_HEARTBEAT 0x01 | ||
18 | #define LED_CPU_ACTIVITY 0x02 | ||
19 | #define LED_ALWAYS_SET 0x00 | ||
20 | |||
21 | /* | ||
22 | * Basic macros for flashing the LEDS on an SGI SN. | ||
23 | */ | ||
24 | |||
25 | static __inline__ void | ||
26 | set_led_bits(u8 value, u8 mask) | ||
27 | { | ||
28 | pda->led_state = (pda->led_state & ~mask) | (value & mask); | ||
29 | *pda->led_address = (short) pda->led_state; | ||
30 | } | ||
31 | |||
32 | #endif /* _ASM_IA64_SN_LEDS_H */ | ||
33 | |||
diff --git a/arch/ia64/include/asm/sn/module.h b/arch/ia64/include/asm/sn/module.h deleted file mode 100644 index 734e980ece2f..000000000000 --- a/arch/ia64/include/asm/sn/module.h +++ /dev/null | |||
@@ -1,127 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | #ifndef _ASM_IA64_SN_MODULE_H | ||
9 | #define _ASM_IA64_SN_MODULE_H | ||
10 | |||
11 | /* parameter for format_module_id() */ | ||
12 | #define MODULE_FORMAT_BRIEF 1 | ||
13 | #define MODULE_FORMAT_LONG 2 | ||
14 | #define MODULE_FORMAT_LCD 3 | ||
15 | |||
16 | /* | ||
17 | * Module id format | ||
18 | * | ||
19 | * 31-16 Rack ID (encoded class, group, number - 16-bit unsigned int) | ||
20 | * 15-8 Brick type (8-bit ascii character) | ||
21 | * 7-0 Bay (brick position in rack (0-63) - 8-bit unsigned int) | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | /* | ||
26 | * Macros for getting the brick type | ||
27 | */ | ||
28 | #define MODULE_BTYPE_MASK 0xff00 | ||
29 | #define MODULE_BTYPE_SHFT 8 | ||
30 | #define MODULE_GET_BTYPE(_m) (((_m) & MODULE_BTYPE_MASK) >> MODULE_BTYPE_SHFT) | ||
31 | #define MODULE_BT_TO_CHAR(_b) ((char)(_b)) | ||
32 | #define MODULE_GET_BTCHAR(_m) (MODULE_BT_TO_CHAR(MODULE_GET_BTYPE(_m))) | ||
33 | |||
34 | /* | ||
35 | * Macros for getting the rack ID. | ||
36 | */ | ||
37 | #define MODULE_RACK_MASK 0xffff0000 | ||
38 | #define MODULE_RACK_SHFT 16 | ||
39 | #define MODULE_GET_RACK(_m) (((_m) & MODULE_RACK_MASK) >> MODULE_RACK_SHFT) | ||
40 | |||
41 | /* | ||
42 | * Macros for getting the brick position | ||
43 | */ | ||
44 | #define MODULE_BPOS_MASK 0x00ff | ||
45 | #define MODULE_BPOS_SHFT 0 | ||
46 | #define MODULE_GET_BPOS(_m) (((_m) & MODULE_BPOS_MASK) >> MODULE_BPOS_SHFT) | ||
47 | |||
48 | /* | ||
49 | * Macros for encoding and decoding rack IDs | ||
50 | * A rack number consists of three parts: | ||
51 | * class (0==CPU/mixed, 1==I/O), group, number | ||
52 | * | ||
53 | * Rack number is stored just as it is displayed on the screen: | ||
54 | * a 3-decimal-digit number. | ||
55 | */ | ||
56 | #define RACK_CLASS_DVDR 100 | ||
57 | #define RACK_GROUP_DVDR 10 | ||
58 | #define RACK_NUM_DVDR 1 | ||
59 | |||
60 | #define RACK_CREATE_RACKID(_c, _g, _n) ((_c) * RACK_CLASS_DVDR + \ | ||
61 | (_g) * RACK_GROUP_DVDR + (_n) * RACK_NUM_DVDR) | ||
62 | |||
63 | #define RACK_GET_CLASS(_r) ((_r) / RACK_CLASS_DVDR) | ||
64 | #define RACK_GET_GROUP(_r) (((_r) - RACK_GET_CLASS(_r) * \ | ||
65 | RACK_CLASS_DVDR) / RACK_GROUP_DVDR) | ||
66 | #define RACK_GET_NUM(_r) (((_r) - RACK_GET_CLASS(_r) * \ | ||
67 | RACK_CLASS_DVDR - RACK_GET_GROUP(_r) * \ | ||
68 | RACK_GROUP_DVDR) / RACK_NUM_DVDR) | ||
69 | |||
70 | /* | ||
71 | * Macros for encoding and decoding rack IDs | ||
72 | * A rack number consists of three parts: | ||
73 | * class 1 bit, 0==CPU/mixed, 1==I/O | ||
74 | * group 2 bits for CPU/mixed, 3 bits for I/O | ||
75 | * number 3 bits for CPU/mixed, 2 bits for I/O (1 based) | ||
76 | */ | ||
77 | #define RACK_GROUP_BITS(_r) (RACK_GET_CLASS(_r) ? 3 : 2) | ||
78 | #define RACK_NUM_BITS(_r) (RACK_GET_CLASS(_r) ? 2 : 3) | ||
79 | |||
80 | #define RACK_CLASS_MASK(_r) 0x20 | ||
81 | #define RACK_CLASS_SHFT(_r) 5 | ||
82 | #define RACK_ADD_CLASS(_r, _c) \ | ||
83 | ((_r) |= (_c) << RACK_CLASS_SHFT(_r) & RACK_CLASS_MASK(_r)) | ||
84 | |||
85 | #define RACK_GROUP_SHFT(_r) RACK_NUM_BITS(_r) | ||
86 | #define RACK_GROUP_MASK(_r) \ | ||
87 | ( (((unsigned)1<<RACK_GROUP_BITS(_r)) - 1) << RACK_GROUP_SHFT(_r) ) | ||
88 | #define RACK_ADD_GROUP(_r, _g) \ | ||
89 | ((_r) |= (_g) << RACK_GROUP_SHFT(_r) & RACK_GROUP_MASK(_r)) | ||
90 | |||
91 | #define RACK_NUM_SHFT(_r) 0 | ||
92 | #define RACK_NUM_MASK(_r) \ | ||
93 | ( (((unsigned)1<<RACK_NUM_BITS(_r)) - 1) << RACK_NUM_SHFT(_r) ) | ||
94 | #define RACK_ADD_NUM(_r, _n) \ | ||
95 | ((_r) |= ((_n) - 1) << RACK_NUM_SHFT(_r) & RACK_NUM_MASK(_r)) | ||
96 | |||
97 | |||
98 | /* | ||
99 | * Brick type definitions | ||
100 | */ | ||
101 | #define MAX_BRICK_TYPES 256 /* brick type is stored as uchar */ | ||
102 | |||
103 | extern char brick_types[]; | ||
104 | |||
105 | #define MODULE_CBRICK 0 | ||
106 | #define MODULE_RBRICK 1 | ||
107 | #define MODULE_IBRICK 2 | ||
108 | #define MODULE_KBRICK 3 | ||
109 | #define MODULE_XBRICK 4 | ||
110 | #define MODULE_DBRICK 5 | ||
111 | #define MODULE_PBRICK 6 | ||
112 | #define MODULE_NBRICK 7 | ||
113 | #define MODULE_PEBRICK 8 | ||
114 | #define MODULE_PXBRICK 9 | ||
115 | #define MODULE_IXBRICK 10 | ||
116 | #define MODULE_CGBRICK 11 | ||
117 | #define MODULE_OPUSBRICK 12 | ||
118 | #define MODULE_SABRICK 13 /* TIO BringUp Brick */ | ||
119 | #define MODULE_IABRICK 14 | ||
120 | #define MODULE_PABRICK 15 | ||
121 | #define MODULE_GABRICK 16 | ||
122 | #define MODULE_OPUS_TIO 17 /* OPUS TIO Riser */ | ||
123 | |||
124 | extern char brick_types[]; | ||
125 | extern void format_module_id(char *, moduleid_t, int); | ||
126 | |||
127 | #endif /* _ASM_IA64_SN_MODULE_H */ | ||
diff --git a/arch/ia64/include/asm/sn/mspec.h b/arch/ia64/include/asm/sn/mspec.h deleted file mode 100644 index c1d3c50c3223..000000000000 --- a/arch/ia64/include/asm/sn/mspec.h +++ /dev/null | |||
@@ -1,59 +0,0 @@ | |||
1 | /* | ||
2 | * | ||
3 | * This file is subject to the terms and conditions of the GNU General Public | ||
4 | * License. See the file "COPYING" in the main directory of this archive | ||
5 | * for more details. | ||
6 | * | ||
7 | * Copyright (c) 2001-2008 Silicon Graphics, Inc. All rights reserved. | ||
8 | */ | ||
9 | |||
10 | #ifndef _ASM_IA64_SN_MSPEC_H | ||
11 | #define _ASM_IA64_SN_MSPEC_H | ||
12 | |||
13 | #define FETCHOP_VAR_SIZE 64 /* 64 byte per fetchop variable */ | ||
14 | |||
15 | #define FETCHOP_LOAD 0 | ||
16 | #define FETCHOP_INCREMENT 8 | ||
17 | #define FETCHOP_DECREMENT 16 | ||
18 | #define FETCHOP_CLEAR 24 | ||
19 | |||
20 | #define FETCHOP_STORE 0 | ||
21 | #define FETCHOP_AND 24 | ||
22 | #define FETCHOP_OR 32 | ||
23 | |||
24 | #define FETCHOP_CLEAR_CACHE 56 | ||
25 | |||
26 | #define FETCHOP_LOAD_OP(addr, op) ( \ | ||
27 | *(volatile long *)((char*) (addr) + (op))) | ||
28 | |||
29 | #define FETCHOP_STORE_OP(addr, op, x) ( \ | ||
30 | *(volatile long *)((char*) (addr) + (op)) = (long) (x)) | ||
31 | |||
32 | #ifdef __KERNEL__ | ||
33 | |||
34 | /* | ||
35 | * Each Atomic Memory Operation (amo, formerly known as fetchop) | ||
36 | * variable is 64 bytes long. The first 8 bytes are used. The | ||
37 | * remaining 56 bytes are unaddressable due to the operation taking | ||
38 | * that portion of the address. | ||
39 | * | ||
40 | * NOTE: The amo structure _MUST_ be placed in either the first or second | ||
41 | * half of the cache line. The cache line _MUST NOT_ be used for anything | ||
42 | * other than additional amo entries. This is because there are two | ||
43 | * addresses which reference the same physical cache line. One will | ||
44 | * be a cached entry with the memory type bits all set. This address | ||
45 | * may be loaded into processor cache. The amo will be referenced | ||
46 | * uncached via the memory special memory type. If any portion of the | ||
47 | * cached cache-line is modified, when that line is flushed, it will | ||
48 | * overwrite the uncached value in physical memory and lead to | ||
49 | * inconsistency. | ||
50 | */ | ||
51 | struct amo { | ||
52 | u64 variable; | ||
53 | u64 unused[7]; | ||
54 | }; | ||
55 | |||
56 | |||
57 | #endif /* __KERNEL__ */ | ||
58 | |||
59 | #endif /* _ASM_IA64_SN_MSPEC_H */ | ||
diff --git a/arch/ia64/include/asm/sn/nodepda.h b/arch/ia64/include/asm/sn/nodepda.h deleted file mode 100644 index 7c8b4710f071..000000000000 --- a/arch/ia64/include/asm/sn/nodepda.h +++ /dev/null | |||
@@ -1,82 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | #ifndef _ASM_IA64_SN_NODEPDA_H | ||
9 | #define _ASM_IA64_SN_NODEPDA_H | ||
10 | |||
11 | |||
12 | #include <asm/irq.h> | ||
13 | #include <asm/sn/arch.h> | ||
14 | #include <asm/sn/intr.h> | ||
15 | #include <asm/sn/bte.h> | ||
16 | |||
17 | /* | ||
18 | * NUMA Node-Specific Data structures are defined in this file. | ||
19 | * In particular, this is the location of the node PDA. | ||
20 | * A pointer to the right node PDA is saved in each CPU PDA. | ||
21 | */ | ||
22 | |||
23 | /* | ||
24 | * Node-specific data structure. | ||
25 | * | ||
26 | * One of these structures is allocated on each node of a NUMA system. | ||
27 | * | ||
28 | * This structure provides a convenient way of keeping together | ||
29 | * all per-node data structures. | ||
30 | */ | ||
31 | struct phys_cpuid { | ||
32 | short nasid; | ||
33 | char subnode; | ||
34 | char slice; | ||
35 | }; | ||
36 | |||
37 | struct nodepda_s { | ||
38 | void *pdinfo; /* Platform-dependent per-node info */ | ||
39 | |||
40 | /* | ||
41 | * The BTEs on this node are shared by the local cpus | ||
42 | */ | ||
43 | struct bteinfo_s bte_if[MAX_BTES_PER_NODE]; /* Virtual Interface */ | ||
44 | struct timer_list bte_recovery_timer; | ||
45 | spinlock_t bte_recovery_lock; | ||
46 | |||
47 | /* | ||
48 | * Array of pointers to the nodepdas for each node. | ||
49 | */ | ||
50 | struct nodepda_s *pernode_pdaindr[MAX_COMPACT_NODES]; | ||
51 | |||
52 | /* | ||
53 | * Array of physical cpu identifiers. Indexed by cpuid. | ||
54 | */ | ||
55 | struct phys_cpuid phys_cpuid[NR_CPUS]; | ||
56 | spinlock_t ptc_lock ____cacheline_aligned_in_smp; | ||
57 | }; | ||
58 | |||
59 | typedef struct nodepda_s nodepda_t; | ||
60 | |||
61 | /* | ||
62 | * Access Functions for node PDA. | ||
63 | * Since there is one nodepda for each node, we need a convenient mechanism | ||
64 | * to access these nodepdas without cluttering code with #ifdefs. | ||
65 | * The next set of definitions provides this. | ||
66 | * Routines are expected to use | ||
67 | * | ||
68 | * sn_nodepda - to access node PDA for the node on which code is running | ||
69 | * NODEPDA(cnodeid) - to access node PDA for cnodeid | ||
70 | */ | ||
71 | |||
72 | DECLARE_PER_CPU(struct nodepda_s *, __sn_nodepda); | ||
73 | #define sn_nodepda __this_cpu_read(__sn_nodepda) | ||
74 | #define NODEPDA(cnodeid) (sn_nodepda->pernode_pdaindr[cnodeid]) | ||
75 | |||
76 | /* | ||
77 | * Check if given a compact node id the corresponding node has all the | ||
78 | * cpus disabled. | ||
79 | */ | ||
80 | #define is_headless_node(cnodeid) (nr_cpus_node(cnodeid) == 0) | ||
81 | |||
82 | #endif /* _ASM_IA64_SN_NODEPDA_H */ | ||
diff --git a/arch/ia64/include/asm/sn/pcibr_provider.h b/arch/ia64/include/asm/sn/pcibr_provider.h deleted file mode 100644 index da205b7cdaac..000000000000 --- a/arch/ia64/include/asm/sn/pcibr_provider.h +++ /dev/null | |||
@@ -1,150 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1992-1997,2000-2006 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | #ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H | ||
9 | #define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H | ||
10 | |||
11 | #include <asm/sn/intr.h> | ||
12 | #include <asm/sn/pcibus_provider_defs.h> | ||
13 | |||
14 | /* Workarounds */ | ||
15 | #define PV907516 (1 << 1) /* TIOCP: Don't write the write buffer flush reg */ | ||
16 | |||
17 | #define BUSTYPE_MASK 0x1 | ||
18 | |||
19 | /* Macros given a pcibus structure */ | ||
20 | #define IS_PCIX(ps) ((ps)->pbi_bridge_mode & BUSTYPE_MASK) | ||
21 | #define IS_PCI_BRIDGE_ASIC(asic) (asic == PCIIO_ASIC_TYPE_PIC || \ | ||
22 | asic == PCIIO_ASIC_TYPE_TIOCP) | ||
23 | #define IS_PIC_SOFT(ps) (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_PIC) | ||
24 | #define IS_TIOCP_SOFT(ps) (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_TIOCP) | ||
25 | |||
26 | |||
27 | /* | ||
28 | * The different PCI Bridge types supported on the SGI Altix platforms | ||
29 | */ | ||
30 | #define PCIBR_BRIDGETYPE_UNKNOWN -1 | ||
31 | #define PCIBR_BRIDGETYPE_PIC 2 | ||
32 | #define PCIBR_BRIDGETYPE_TIOCP 3 | ||
33 | |||
34 | /* | ||
35 | * Bridge 64bit Direct Map Attributes | ||
36 | */ | ||
37 | #define PCI64_ATTR_PREF (1ull << 59) | ||
38 | #define PCI64_ATTR_PREC (1ull << 58) | ||
39 | #define PCI64_ATTR_VIRTUAL (1ull << 57) | ||
40 | #define PCI64_ATTR_BAR (1ull << 56) | ||
41 | #define PCI64_ATTR_SWAP (1ull << 55) | ||
42 | #define PCI64_ATTR_VIRTUAL1 (1ull << 54) | ||
43 | |||
44 | #define PCI32_LOCAL_BASE 0 | ||
45 | #define PCI32_MAPPED_BASE 0x40000000 | ||
46 | #define PCI32_DIRECT_BASE 0x80000000 | ||
47 | |||
48 | #define IS_PCI32_MAPPED(x) ((u64)(x) < PCI32_DIRECT_BASE && \ | ||
49 | (u64)(x) >= PCI32_MAPPED_BASE) | ||
50 | #define IS_PCI32_DIRECT(x) ((u64)(x) >= PCI32_MAPPED_BASE) | ||
51 | |||
52 | |||
53 | /* | ||
54 | * Bridge PMU Address Transaltion Entry Attibutes | ||
55 | */ | ||
56 | #define PCI32_ATE_V (0x1 << 0) | ||
57 | #define PCI32_ATE_CO (0x1 << 1) /* PIC ASIC ONLY */ | ||
58 | #define PCI32_ATE_PIO (0x1 << 1) /* TIOCP ASIC ONLY */ | ||
59 | #define PCI32_ATE_MSI (0x1 << 2) | ||
60 | #define PCI32_ATE_PREF (0x1 << 3) | ||
61 | #define PCI32_ATE_BAR (0x1 << 4) | ||
62 | #define PCI32_ATE_ADDR_SHFT 12 | ||
63 | |||
64 | #define MINIMAL_ATES_REQUIRED(addr, size) \ | ||
65 | (IOPG(IOPGOFF(addr) + (size) - 1) == IOPG((size) - 1)) | ||
66 | |||
67 | #define MINIMAL_ATE_FLAG(addr, size) \ | ||
68 | (MINIMAL_ATES_REQUIRED((u64)addr, size) ? 1 : 0) | ||
69 | |||
70 | /* bit 29 of the pci address is the SWAP bit */ | ||
71 | #define ATE_SWAPSHIFT 29 | ||
72 | #define ATE_SWAP_ON(x) ((x) |= (1 << ATE_SWAPSHIFT)) | ||
73 | #define ATE_SWAP_OFF(x) ((x) &= ~(1 << ATE_SWAPSHIFT)) | ||
74 | |||
75 | /* | ||
76 | * I/O page size | ||
77 | */ | ||
78 | #if PAGE_SIZE < 16384 | ||
79 | #define IOPFNSHIFT 12 /* 4K per mapped page */ | ||
80 | #else | ||
81 | #define IOPFNSHIFT 14 /* 16K per mapped page */ | ||
82 | #endif | ||
83 | |||
84 | #define IOPGSIZE (1 << IOPFNSHIFT) | ||
85 | #define IOPG(x) ((x) >> IOPFNSHIFT) | ||
86 | #define IOPGOFF(x) ((x) & (IOPGSIZE-1)) | ||
87 | |||
88 | #define PCIBR_DEV_SWAP_DIR (1ull << 19) | ||
89 | #define PCIBR_CTRL_PAGE_SIZE (0x1 << 21) | ||
90 | |||
91 | /* | ||
92 | * PMU resources. | ||
93 | */ | ||
94 | struct ate_resource{ | ||
95 | u64 *ate; | ||
96 | u64 num_ate; | ||
97 | u64 lowest_free_index; | ||
98 | }; | ||
99 | |||
100 | struct pcibus_info { | ||
101 | struct pcibus_bussoft pbi_buscommon; /* common header */ | ||
102 | u32 pbi_moduleid; | ||
103 | short pbi_bridge_type; | ||
104 | short pbi_bridge_mode; | ||
105 | |||
106 | struct ate_resource pbi_int_ate_resource; | ||
107 | u64 pbi_int_ate_size; | ||
108 | |||
109 | u64 pbi_dir_xbase; | ||
110 | char pbi_hub_xid; | ||
111 | |||
112 | u64 pbi_devreg[8]; | ||
113 | |||
114 | u32 pbi_valid_devices; | ||
115 | u32 pbi_enabled_devices; | ||
116 | |||
117 | spinlock_t pbi_lock; | ||
118 | }; | ||
119 | |||
120 | extern int pcibr_init_provider(void); | ||
121 | extern void *pcibr_bus_fixup(struct pcibus_bussoft *, struct pci_controller *); | ||
122 | extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t, int type); | ||
123 | extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t, int type); | ||
124 | extern void pcibr_dma_unmap(struct pci_dev *, dma_addr_t, int); | ||
125 | |||
126 | /* | ||
127 | * prototypes for the bridge asic register access routines in pcibr_reg.c | ||
128 | */ | ||
129 | extern void pcireg_control_bit_clr(struct pcibus_info *, u64); | ||
130 | extern void pcireg_control_bit_set(struct pcibus_info *, u64); | ||
131 | extern u64 pcireg_tflush_get(struct pcibus_info *); | ||
132 | extern u64 pcireg_intr_status_get(struct pcibus_info *); | ||
133 | extern void pcireg_intr_enable_bit_clr(struct pcibus_info *, u64); | ||
134 | extern void pcireg_intr_enable_bit_set(struct pcibus_info *, u64); | ||
135 | extern void pcireg_intr_addr_addr_set(struct pcibus_info *, int, u64); | ||
136 | extern void pcireg_force_intr_set(struct pcibus_info *, int); | ||
137 | extern u64 pcireg_wrb_flush_get(struct pcibus_info *, int); | ||
138 | extern void pcireg_int_ate_set(struct pcibus_info *, int, u64); | ||
139 | extern u64 __iomem * pcireg_int_ate_addr(struct pcibus_info *, int); | ||
140 | extern void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info); | ||
141 | extern void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info); | ||
142 | extern int pcibr_ate_alloc(struct pcibus_info *, int); | ||
143 | extern void pcibr_ate_free(struct pcibus_info *, int); | ||
144 | extern void ate_write(struct pcibus_info *, int, int, u64); | ||
145 | extern int sal_pcibr_slot_enable(struct pcibus_info *soft, int device, | ||
146 | void *resp, char **ssdt); | ||
147 | extern int sal_pcibr_slot_disable(struct pcibus_info *soft, int device, | ||
148 | int action, void *resp); | ||
149 | extern u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus); | ||
150 | #endif | ||
diff --git a/arch/ia64/include/asm/sn/pcibus_provider_defs.h b/arch/ia64/include/asm/sn/pcibus_provider_defs.h deleted file mode 100644 index 8f7c83d0f6d3..000000000000 --- a/arch/ia64/include/asm/sn/pcibus_provider_defs.h +++ /dev/null | |||
@@ -1,68 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | #ifndef _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H | ||
9 | #define _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H | ||
10 | |||
11 | /* | ||
12 | * SN pci asic types. Do not ever renumber these or reuse values. The | ||
13 | * values must agree with what prom thinks they are. | ||
14 | */ | ||
15 | |||
16 | #define PCIIO_ASIC_TYPE_UNKNOWN 0 | ||
17 | #define PCIIO_ASIC_TYPE_PPB 1 | ||
18 | #define PCIIO_ASIC_TYPE_PIC 2 | ||
19 | #define PCIIO_ASIC_TYPE_TIOCP 3 | ||
20 | #define PCIIO_ASIC_TYPE_TIOCA 4 | ||
21 | #define PCIIO_ASIC_TYPE_TIOCE 5 | ||
22 | |||
23 | #define PCIIO_ASIC_MAX_TYPES 6 | ||
24 | |||
25 | /* | ||
26 | * Common pciio bus provider data. There should be one of these as the | ||
27 | * first field in any pciio based provider soft structure (e.g. pcibr_soft | ||
28 | * tioca_soft, etc). | ||
29 | */ | ||
30 | |||
31 | struct pcibus_bussoft { | ||
32 | u32 bs_asic_type; /* chipset type */ | ||
33 | u32 bs_xid; /* xwidget id */ | ||
34 | u32 bs_persist_busnum; /* Persistent Bus Number */ | ||
35 | u32 bs_persist_segment; /* Segment Number */ | ||
36 | u64 bs_legacy_io; /* legacy io pio addr */ | ||
37 | u64 bs_legacy_mem; /* legacy mem pio addr */ | ||
38 | u64 bs_base; /* widget base */ | ||
39 | struct xwidget_info *bs_xwidget_info; | ||
40 | }; | ||
41 | |||
42 | struct pci_controller; | ||
43 | /* | ||
44 | * SN pci bus indirection | ||
45 | */ | ||
46 | |||
47 | struct sn_pcibus_provider { | ||
48 | dma_addr_t (*dma_map)(struct pci_dev *, unsigned long, size_t, int flags); | ||
49 | dma_addr_t (*dma_map_consistent)(struct pci_dev *, unsigned long, size_t, int flags); | ||
50 | void (*dma_unmap)(struct pci_dev *, dma_addr_t, int); | ||
51 | void * (*bus_fixup)(struct pcibus_bussoft *, struct pci_controller *); | ||
52 | void (*force_interrupt)(struct sn_irq_info *); | ||
53 | void (*target_interrupt)(struct sn_irq_info *); | ||
54 | }; | ||
55 | |||
56 | /* | ||
57 | * Flags used by the map interfaces | ||
58 | * bits 3:0 specifies format of passed in address | ||
59 | * bit 4 specifies that address is to be used for MSI | ||
60 | */ | ||
61 | |||
62 | #define SN_DMA_ADDRTYPE(x) ((x) & 0xf) | ||
63 | #define SN_DMA_ADDR_PHYS 1 /* address is an xio address. */ | ||
64 | #define SN_DMA_ADDR_XIO 2 /* address is phys memory */ | ||
65 | #define SN_DMA_MSI 0x10 /* Bus address is to be used for MSI */ | ||
66 | |||
67 | extern struct sn_pcibus_provider *sn_pci_provider[]; | ||
68 | #endif /* _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H */ | ||
diff --git a/arch/ia64/include/asm/sn/pcidev.h b/arch/ia64/include/asm/sn/pcidev.h deleted file mode 100644 index 1c2382cea807..000000000000 --- a/arch/ia64/include/asm/sn/pcidev.h +++ /dev/null | |||
@@ -1,85 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | #ifndef _ASM_IA64_SN_PCI_PCIDEV_H | ||
9 | #define _ASM_IA64_SN_PCI_PCIDEV_H | ||
10 | |||
11 | #include <linux/pci.h> | ||
12 | |||
13 | /* | ||
14 | * In ia64, pci_dev->sysdata must be a *pci_controller. To provide access to | ||
15 | * the pcidev_info structs for all devices under a controller, we keep a | ||
16 | * list of pcidev_info under pci_controller->platform_data. | ||
17 | */ | ||
18 | struct sn_platform_data { | ||
19 | void *provider_soft; | ||
20 | struct list_head pcidev_info; | ||
21 | }; | ||
22 | |||
23 | #define SN_PLATFORM_DATA(busdev) \ | ||
24 | ((struct sn_platform_data *)(PCI_CONTROLLER(busdev)->platform_data)) | ||
25 | |||
26 | #define SN_PCIDEV_INFO(dev) sn_pcidev_info_get(dev) | ||
27 | |||
28 | /* | ||
29 | * Given a pci_bus, return the sn pcibus_bussoft struct. Note that | ||
30 | * this only works for root busses, not for busses represented by PPB's. | ||
31 | */ | ||
32 | |||
33 | #define SN_PCIBUS_BUSSOFT(pci_bus) \ | ||
34 | ((struct pcibus_bussoft *)(SN_PLATFORM_DATA(pci_bus)->provider_soft)) | ||
35 | |||
36 | #define SN_PCIBUS_BUSSOFT_INFO(pci_bus) \ | ||
37 | ((struct pcibus_info *)(SN_PLATFORM_DATA(pci_bus)->provider_soft)) | ||
38 | /* | ||
39 | * Given a struct pci_dev, return the sn pcibus_bussoft struct. Note | ||
40 | * that this is not equivalent to SN_PCIBUS_BUSSOFT(pci_dev->bus) due | ||
41 | * due to possible PPB's in the path. | ||
42 | */ | ||
43 | |||
44 | #define SN_PCIDEV_BUSSOFT(pci_dev) \ | ||
45 | (SN_PCIDEV_INFO(pci_dev)->pdi_host_pcidev_info->pdi_pcibus_info) | ||
46 | |||
47 | #define SN_PCIDEV_BUSPROVIDER(pci_dev) \ | ||
48 | (SN_PCIDEV_INFO(pci_dev)->pdi_provider) | ||
49 | |||
50 | #define PCIIO_BUS_NONE 255 /* bus 255 reserved */ | ||
51 | #define PCIIO_SLOT_NONE 255 | ||
52 | #define PCIIO_FUNC_NONE 255 | ||
53 | #define PCIIO_VENDOR_ID_NONE (-1) | ||
54 | |||
55 | struct pcidev_info { | ||
56 | u64 pdi_pio_mapped_addr[7]; /* 6 BARs PLUS 1 ROM */ | ||
57 | u64 pdi_slot_host_handle; /* Bus and devfn Host pci_dev */ | ||
58 | |||
59 | struct pcibus_bussoft *pdi_pcibus_info; /* Kernel common bus soft */ | ||
60 | struct pcidev_info *pdi_host_pcidev_info; /* Kernel Host pci_dev */ | ||
61 | struct pci_dev *pdi_linux_pcidev; /* Kernel pci_dev */ | ||
62 | |||
63 | struct sn_irq_info *pdi_sn_irq_info; | ||
64 | struct sn_pcibus_provider *pdi_provider; /* sn pci ops */ | ||
65 | struct pci_dev *host_pci_dev; /* host bus link */ | ||
66 | struct list_head pdi_list; /* List of pcidev_info */ | ||
67 | }; | ||
68 | |||
69 | extern void sn_irq_fixup(struct pci_dev *pci_dev, | ||
70 | struct sn_irq_info *sn_irq_info); | ||
71 | extern void sn_irq_unfixup(struct pci_dev *pci_dev); | ||
72 | extern struct pcidev_info * sn_pcidev_info_get(struct pci_dev *); | ||
73 | extern void sn_bus_fixup(struct pci_bus *); | ||
74 | extern void sn_acpi_bus_fixup(struct pci_bus *); | ||
75 | extern void sn_common_bus_fixup(struct pci_bus *, struct pcibus_bussoft *); | ||
76 | extern void sn_bus_store_sysdata(struct pci_dev *dev); | ||
77 | extern void sn_bus_free_sysdata(void); | ||
78 | extern void sn_generate_path(struct pci_bus *pci_bus, char *address); | ||
79 | extern void sn_io_slot_fixup(struct pci_dev *); | ||
80 | extern void sn_acpi_slot_fixup(struct pci_dev *); | ||
81 | extern void sn_pci_fixup_slot(struct pci_dev *dev, struct pcidev_info *, | ||
82 | struct sn_irq_info *); | ||
83 | extern void sn_pci_unfixup_slot(struct pci_dev *dev); | ||
84 | extern void sn_irq_lh_init(void); | ||
85 | #endif /* _ASM_IA64_SN_PCI_PCIDEV_H */ | ||
diff --git a/arch/ia64/include/asm/sn/pda.h b/arch/ia64/include/asm/sn/pda.h deleted file mode 100644 index 22ae358c8d16..000000000000 --- a/arch/ia64/include/asm/sn/pda.h +++ /dev/null | |||
@@ -1,68 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | #ifndef _ASM_IA64_SN_PDA_H | ||
9 | #define _ASM_IA64_SN_PDA_H | ||
10 | |||
11 | #include <linux/cache.h> | ||
12 | #include <asm/percpu.h> | ||
13 | |||
14 | |||
15 | /* | ||
16 | * CPU-specific data structure. | ||
17 | * | ||
18 | * One of these structures is allocated for each cpu of a NUMA system. | ||
19 | * | ||
20 | * This structure provides a convenient way of keeping together | ||
21 | * all SN per-cpu data structures. | ||
22 | */ | ||
23 | |||
24 | typedef struct pda_s { | ||
25 | |||
26 | /* | ||
27 | * Support for SN LEDs | ||
28 | */ | ||
29 | volatile short *led_address; | ||
30 | u8 led_state; | ||
31 | u8 hb_state; /* supports blinking heartbeat leds */ | ||
32 | unsigned int hb_count; | ||
33 | |||
34 | unsigned int idle_flag; | ||
35 | |||
36 | volatile unsigned long *bedrock_rev_id; | ||
37 | volatile unsigned long *pio_write_status_addr; | ||
38 | unsigned long pio_write_status_val; | ||
39 | volatile unsigned long *pio_shub_war_cam_addr; | ||
40 | |||
41 | unsigned long sn_in_service_ivecs[4]; | ||
42 | int sn_lb_int_war_ticks; | ||
43 | int sn_last_irq; | ||
44 | int sn_first_irq; | ||
45 | } pda_t; | ||
46 | |||
47 | |||
48 | #define CACHE_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) | ||
49 | |||
50 | /* | ||
51 | * PDA | ||
52 | * Per-cpu private data area for each cpu. The PDA is located immediately after | ||
53 | * the IA64 cpu_data area. A full page is allocated for the cp_data area for each | ||
54 | * cpu but only a small amout of the page is actually used. We put the SNIA PDA | ||
55 | * in the same page as the cpu_data area. Note that there is a check in the setup | ||
56 | * code to verify that we don't overflow the page. | ||
57 | * | ||
58 | * Seems like we should should cache-line align the pda so that any changes in the | ||
59 | * size of the cpu_data area don't change cache layout. Should we align to 32, 64, 128 | ||
60 | * or 512 boundary. Each has merits. For now, pick 128 but should be revisited later. | ||
61 | */ | ||
62 | DECLARE_PER_CPU(struct pda_s, pda_percpu); | ||
63 | |||
64 | #define pda (&__ia64_per_cpu_var(pda_percpu)) | ||
65 | |||
66 | #define pdacpu(cpu) (&per_cpu(pda_percpu, cpu)) | ||
67 | |||
68 | #endif /* _ASM_IA64_SN_PDA_H */ | ||
diff --git a/arch/ia64/include/asm/sn/pic.h b/arch/ia64/include/asm/sn/pic.h deleted file mode 100644 index 5f9da5fd6e56..000000000000 --- a/arch/ia64/include/asm/sn/pic.h +++ /dev/null | |||
@@ -1,261 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | #ifndef _ASM_IA64_SN_PCI_PIC_H | ||
9 | #define _ASM_IA64_SN_PCI_PIC_H | ||
10 | |||
11 | /* | ||
12 | * PIC AS DEVICE ZERO | ||
13 | * ------------------ | ||
14 | * | ||
15 | * PIC handles PCI/X busses. PCI/X requires that the 'bridge' (i.e. PIC) | ||
16 | * be designated as 'device 0'. That is a departure from earlier SGI | ||
17 | * PCI bridges. Because of that we use config space 1 to access the | ||
18 | * config space of the first actual PCI device on the bus. | ||
19 | * Here's what the PIC manual says: | ||
20 | * | ||
21 | * The current PCI-X bus specification now defines that the parent | ||
22 | * hosts bus bridge (PIC for example) must be device 0 on bus 0. PIC | ||
23 | * reduced the total number of devices from 8 to 4 and removed the | ||
24 | * device registers and windows, now only supporting devices 0,1,2, and | ||
25 | * 3. PIC did leave all 8 configuration space windows. The reason was | ||
26 | * there was nothing to gain by removing them. Here in lies the problem. | ||
27 | * The device numbering we do using 0 through 3 is unrelated to the device | ||
28 | * numbering which PCI-X requires in configuration space. In the past we | ||
29 | * correlated Configs pace and our device space 0 <-> 0, 1 <-> 1, etc. | ||
30 | * PCI-X requires we start a 1, not 0 and currently the PX brick | ||
31 | * does associate our: | ||
32 | * | ||
33 | * device 0 with configuration space window 1, | ||
34 | * device 1 with configuration space window 2, | ||
35 | * device 2 with configuration space window 3, | ||
36 | * device 3 with configuration space window 4. | ||
37 | * | ||
38 | * The net effect is that all config space access are off-by-one with | ||
39 | * relation to other per-slot accesses on the PIC. | ||
40 | * Here is a table that shows some of that: | ||
41 | * | ||
42 | * Internal Slot# | ||
43 | * | | ||
44 | * | 0 1 2 3 | ||
45 | * ----------|--------------------------------------- | ||
46 | * config | 0x21000 0x22000 0x23000 0x24000 | ||
47 | * | | ||
48 | * even rrb | 0[0] n/a 1[0] n/a [] == implied even/odd | ||
49 | * | | ||
50 | * odd rrb | n/a 0[1] n/a 1[1] | ||
51 | * | | ||
52 | * int dev | 00 01 10 11 | ||
53 | * | | ||
54 | * ext slot# | 1 2 3 4 | ||
55 | * ----------|--------------------------------------- | ||
56 | */ | ||
57 | |||
58 | #define PIC_ATE_TARGETID_SHFT 8 | ||
59 | #define PIC_HOST_INTR_ADDR 0x0000FFFFFFFFFFFFUL | ||
60 | #define PIC_PCI64_ATTR_TARG_SHFT 60 | ||
61 | |||
62 | |||
63 | /***************************************************************************** | ||
64 | *********************** PIC MMR structure mapping *************************** | ||
65 | *****************************************************************************/ | ||
66 | |||
67 | /* NOTE: PIC WAR. PV#854697. PIC does not allow writes just to [31:0] | ||
68 | * of a 64-bit register. When writing PIC registers, always write the | ||
69 | * entire 64 bits. | ||
70 | */ | ||
71 | |||
72 | struct pic { | ||
73 | |||
74 | /* 0x000000-0x00FFFF -- Local Registers */ | ||
75 | |||
76 | /* 0x000000-0x000057 -- Standard Widget Configuration */ | ||
77 | u64 p_wid_id; /* 0x000000 */ | ||
78 | u64 p_wid_stat; /* 0x000008 */ | ||
79 | u64 p_wid_err_upper; /* 0x000010 */ | ||
80 | u64 p_wid_err_lower; /* 0x000018 */ | ||
81 | #define p_wid_err p_wid_err_lower | ||
82 | u64 p_wid_control; /* 0x000020 */ | ||
83 | u64 p_wid_req_timeout; /* 0x000028 */ | ||
84 | u64 p_wid_int_upper; /* 0x000030 */ | ||
85 | u64 p_wid_int_lower; /* 0x000038 */ | ||
86 | #define p_wid_int p_wid_int_lower | ||
87 | u64 p_wid_err_cmdword; /* 0x000040 */ | ||
88 | u64 p_wid_llp; /* 0x000048 */ | ||
89 | u64 p_wid_tflush; /* 0x000050 */ | ||
90 | |||
91 | /* 0x000058-0x00007F -- Bridge-specific Widget Configuration */ | ||
92 | u64 p_wid_aux_err; /* 0x000058 */ | ||
93 | u64 p_wid_resp_upper; /* 0x000060 */ | ||
94 | u64 p_wid_resp_lower; /* 0x000068 */ | ||
95 | #define p_wid_resp p_wid_resp_lower | ||
96 | u64 p_wid_tst_pin_ctrl; /* 0x000070 */ | ||
97 | u64 p_wid_addr_lkerr; /* 0x000078 */ | ||
98 | |||
99 | /* 0x000080-0x00008F -- PMU & MAP */ | ||
100 | u64 p_dir_map; /* 0x000080 */ | ||
101 | u64 _pad_000088; /* 0x000088 */ | ||
102 | |||
103 | /* 0x000090-0x00009F -- SSRAM */ | ||
104 | u64 p_map_fault; /* 0x000090 */ | ||
105 | u64 _pad_000098; /* 0x000098 */ | ||
106 | |||
107 | /* 0x0000A0-0x0000AF -- Arbitration */ | ||
108 | u64 p_arb; /* 0x0000A0 */ | ||
109 | u64 _pad_0000A8; /* 0x0000A8 */ | ||
110 | |||
111 | /* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */ | ||
112 | u64 p_ate_parity_err; /* 0x0000B0 */ | ||
113 | u64 _pad_0000B8; /* 0x0000B8 */ | ||
114 | |||
115 | /* 0x0000C0-0x0000FF -- PCI/GIO */ | ||
116 | u64 p_bus_timeout; /* 0x0000C0 */ | ||
117 | u64 p_pci_cfg; /* 0x0000C8 */ | ||
118 | u64 p_pci_err_upper; /* 0x0000D0 */ | ||
119 | u64 p_pci_err_lower; /* 0x0000D8 */ | ||
120 | #define p_pci_err p_pci_err_lower | ||
121 | u64 _pad_0000E0[4]; /* 0x0000{E0..F8} */ | ||
122 | |||
123 | /* 0x000100-0x0001FF -- Interrupt */ | ||
124 | u64 p_int_status; /* 0x000100 */ | ||
125 | u64 p_int_enable; /* 0x000108 */ | ||
126 | u64 p_int_rst_stat; /* 0x000110 */ | ||
127 | u64 p_int_mode; /* 0x000118 */ | ||
128 | u64 p_int_device; /* 0x000120 */ | ||
129 | u64 p_int_host_err; /* 0x000128 */ | ||
130 | u64 p_int_addr[8]; /* 0x0001{30,,,68} */ | ||
131 | u64 p_err_int_view; /* 0x000170 */ | ||
132 | u64 p_mult_int; /* 0x000178 */ | ||
133 | u64 p_force_always[8]; /* 0x0001{80,,,B8} */ | ||
134 | u64 p_force_pin[8]; /* 0x0001{C0,,,F8} */ | ||
135 | |||
136 | /* 0x000200-0x000298 -- Device */ | ||
137 | u64 p_device[4]; /* 0x0002{00,,,18} */ | ||
138 | u64 _pad_000220[4]; /* 0x0002{20,,,38} */ | ||
139 | u64 p_wr_req_buf[4]; /* 0x0002{40,,,58} */ | ||
140 | u64 _pad_000260[4]; /* 0x0002{60,,,78} */ | ||
141 | u64 p_rrb_map[2]; /* 0x0002{80,,,88} */ | ||
142 | #define p_even_resp p_rrb_map[0] /* 0x000280 */ | ||
143 | #define p_odd_resp p_rrb_map[1] /* 0x000288 */ | ||
144 | u64 p_resp_status; /* 0x000290 */ | ||
145 | u64 p_resp_clear; /* 0x000298 */ | ||
146 | |||
147 | u64 _pad_0002A0[12]; /* 0x0002{A0..F8} */ | ||
148 | |||
149 | /* 0x000300-0x0003F8 -- Buffer Address Match Registers */ | ||
150 | struct { | ||
151 | u64 upper; /* 0x0003{00,,,F0} */ | ||
152 | u64 lower; /* 0x0003{08,,,F8} */ | ||
153 | } p_buf_addr_match[16]; | ||
154 | |||
155 | /* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */ | ||
156 | struct { | ||
157 | u64 flush_w_touch; /* 0x000{400,,,5C0} */ | ||
158 | u64 flush_wo_touch; /* 0x000{408,,,5C8} */ | ||
159 | u64 inflight; /* 0x000{410,,,5D0} */ | ||
160 | u64 prefetch; /* 0x000{418,,,5D8} */ | ||
161 | u64 total_pci_retry; /* 0x000{420,,,5E0} */ | ||
162 | u64 max_pci_retry; /* 0x000{428,,,5E8} */ | ||
163 | u64 max_latency; /* 0x000{430,,,5F0} */ | ||
164 | u64 clear_all; /* 0x000{438,,,5F8} */ | ||
165 | } p_buf_count[8]; | ||
166 | |||
167 | |||
168 | /* 0x000600-0x0009FF -- PCI/X registers */ | ||
169 | u64 p_pcix_bus_err_addr; /* 0x000600 */ | ||
170 | u64 p_pcix_bus_err_attr; /* 0x000608 */ | ||
171 | u64 p_pcix_bus_err_data; /* 0x000610 */ | ||
172 | u64 p_pcix_pio_split_addr; /* 0x000618 */ | ||
173 | u64 p_pcix_pio_split_attr; /* 0x000620 */ | ||
174 | u64 p_pcix_dma_req_err_attr; /* 0x000628 */ | ||
175 | u64 p_pcix_dma_req_err_addr; /* 0x000630 */ | ||
176 | u64 p_pcix_timeout; /* 0x000638 */ | ||
177 | |||
178 | u64 _pad_000640[120]; /* 0x000{640,,,9F8} */ | ||
179 | |||
180 | /* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */ | ||
181 | struct { | ||
182 | u64 p_buf_addr; /* 0x000{A00,,,AF0} */ | ||
183 | u64 p_buf_attr; /* 0X000{A08,,,AF8} */ | ||
184 | } p_pcix_read_buf_64[16]; | ||
185 | |||
186 | struct { | ||
187 | u64 p_buf_addr; /* 0x000{B00,,,BE0} */ | ||
188 | u64 p_buf_attr; /* 0x000{B08,,,BE8} */ | ||
189 | u64 p_buf_valid; /* 0x000{B10,,,BF0} */ | ||
190 | u64 __pad1; /* 0x000{B18,,,BF8} */ | ||
191 | } p_pcix_write_buf_64[8]; | ||
192 | |||
193 | /* End of Local Registers -- Start of Address Map space */ | ||
194 | |||
195 | char _pad_000c00[0x010000 - 0x000c00]; | ||
196 | |||
197 | /* 0x010000-0x011fff -- Internal ATE RAM (Auto Parity Generation) */ | ||
198 | u64 p_int_ate_ram[1024]; /* 0x010000-0x011fff */ | ||
199 | |||
200 | /* 0x012000-0x013fff -- Internal ATE RAM (Manual Parity Generation) */ | ||
201 | u64 p_int_ate_ram_mp[1024]; /* 0x012000-0x013fff */ | ||
202 | |||
203 | char _pad_014000[0x18000 - 0x014000]; | ||
204 | |||
205 | /* 0x18000-0x197F8 -- PIC Write Request Ram */ | ||
206 | u64 p_wr_req_lower[256]; /* 0x18000 - 0x187F8 */ | ||
207 | u64 p_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */ | ||
208 | u64 p_wr_req_parity[256]; /* 0x19000 - 0x197F8 */ | ||
209 | |||
210 | char _pad_019800[0x20000 - 0x019800]; | ||
211 | |||
212 | /* 0x020000-0x027FFF -- PCI Device Configuration Spaces */ | ||
213 | union { | ||
214 | u8 c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */ | ||
215 | u16 s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */ | ||
216 | u32 l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */ | ||
217 | u64 d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */ | ||
218 | union { | ||
219 | u8 c[0x100 / 1]; | ||
220 | u16 s[0x100 / 2]; | ||
221 | u32 l[0x100 / 4]; | ||
222 | u64 d[0x100 / 8]; | ||
223 | } f[8]; | ||
224 | } p_type0_cfg_dev[8]; /* 0x02{0000,,,7FFF} */ | ||
225 | |||
226 | /* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */ | ||
227 | union { | ||
228 | u8 c[0x1000 / 1]; /* 0x028000-0x029000 */ | ||
229 | u16 s[0x1000 / 2]; /* 0x028000-0x029000 */ | ||
230 | u32 l[0x1000 / 4]; /* 0x028000-0x029000 */ | ||
231 | u64 d[0x1000 / 8]; /* 0x028000-0x029000 */ | ||
232 | union { | ||
233 | u8 c[0x100 / 1]; | ||
234 | u16 s[0x100 / 2]; | ||
235 | u32 l[0x100 / 4]; | ||
236 | u64 d[0x100 / 8]; | ||
237 | } f[8]; | ||
238 | } p_type1_cfg; /* 0x028000-0x029000 */ | ||
239 | |||
240 | char _pad_029000[0x030000-0x029000]; | ||
241 | |||
242 | /* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */ | ||
243 | union { | ||
244 | u8 c[8 / 1]; | ||
245 | u16 s[8 / 2]; | ||
246 | u32 l[8 / 4]; | ||
247 | u64 d[8 / 8]; | ||
248 | } p_pci_iack; /* 0x030000-0x030007 */ | ||
249 | |||
250 | char _pad_030007[0x040000-0x030008]; | ||
251 | |||
252 | /* 0x040000-0x030007 -- PCIX Special Cycle */ | ||
253 | union { | ||
254 | u8 c[8 / 1]; | ||
255 | u16 s[8 / 2]; | ||
256 | u32 l[8 / 4]; | ||
257 | u64 d[8 / 8]; | ||
258 | } p_pcix_cycle; /* 0x040000-0x040007 */ | ||
259 | }; | ||
260 | |||
261 | #endif /* _ASM_IA64_SN_PCI_PIC_H */ | ||
diff --git a/arch/ia64/include/asm/sn/rw_mmr.h b/arch/ia64/include/asm/sn/rw_mmr.h deleted file mode 100644 index 2d78f4c5a45e..000000000000 --- a/arch/ia64/include/asm/sn/rw_mmr.h +++ /dev/null | |||
@@ -1,28 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2002-2006 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | */ | ||
8 | #ifndef _ASM_IA64_SN_RW_MMR_H | ||
9 | #define _ASM_IA64_SN_RW_MMR_H | ||
10 | |||
11 | |||
12 | /* | ||
13 | * This file that access MMRs via uncached physical addresses. | ||
14 | * pio_phys_read_mmr - read an MMR | ||
15 | * pio_phys_write_mmr - write an MMR | ||
16 | * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0 | ||
17 | * Second MMR will be skipped if address is NULL | ||
18 | * | ||
19 | * Addresses passed to these routines should be uncached physical addresses | ||
20 | * ie., 0x80000.... | ||
21 | */ | ||
22 | |||
23 | |||
24 | extern long pio_phys_read_mmr(volatile long *mmr); | ||
25 | extern void pio_phys_write_mmr(volatile long *mmr, long val); | ||
26 | extern void pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2); | ||
27 | |||
28 | #endif /* _ASM_IA64_SN_RW_MMR_H */ | ||
diff --git a/arch/ia64/include/asm/sn/shub_mmr.h b/arch/ia64/include/asm/sn/shub_mmr.h deleted file mode 100644 index a84d870f4294..000000000000 --- a/arch/ia64/include/asm/sn/shub_mmr.h +++ /dev/null | |||
@@ -1,502 +0,0 @@ | |||
1 | /* | ||
2 | * | ||
3 | * This file is subject to the terms and conditions of the GNU General Public | ||
4 | * License. See the file "COPYING" in the main directory of this archive | ||
5 | * for more details. | ||
6 | * | ||
7 | * Copyright (c) 2001-2005 Silicon Graphics, Inc. All rights reserved. | ||
8 | */ | ||
9 | |||
10 | #ifndef _ASM_IA64_SN_SHUB_MMR_H | ||
11 | #define _ASM_IA64_SN_SHUB_MMR_H | ||
12 | |||
13 | /* ==================================================================== */ | ||
14 | /* Register "SH_IPI_INT" */ | ||
15 | /* SHub Inter-Processor Interrupt Registers */ | ||
16 | /* ==================================================================== */ | ||
17 | #define SH1_IPI_INT __IA64_UL_CONST(0x0000000110000380) | ||
18 | #define SH2_IPI_INT __IA64_UL_CONST(0x0000000010000380) | ||
19 | |||
20 | /* SH_IPI_INT_TYPE */ | ||
21 | /* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */ | ||
22 | #define SH_IPI_INT_TYPE_SHFT 0 | ||
23 | #define SH_IPI_INT_TYPE_MASK __IA64_UL_CONST(0x0000000000000007) | ||
24 | |||
25 | /* SH_IPI_INT_AGT */ | ||
26 | /* Description: Agent, must be 0 for SHub */ | ||
27 | #define SH_IPI_INT_AGT_SHFT 3 | ||
28 | #define SH_IPI_INT_AGT_MASK __IA64_UL_CONST(0x0000000000000008) | ||
29 | |||
30 | /* SH_IPI_INT_PID */ | ||
31 | /* Description: Processor ID, same setting as on targeted McKinley */ | ||
32 | #define SH_IPI_INT_PID_SHFT 4 | ||
33 | #define SH_IPI_INT_PID_MASK __IA64_UL_CONST(0x00000000000ffff0) | ||
34 | |||
35 | /* SH_IPI_INT_BASE */ | ||
36 | /* Description: Optional interrupt vector area, 2MB aligned */ | ||
37 | #define SH_IPI_INT_BASE_SHFT 21 | ||
38 | #define SH_IPI_INT_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000) | ||
39 | |||
40 | /* SH_IPI_INT_IDX */ | ||
41 | /* Description: Targeted McKinley interrupt vector */ | ||
42 | #define SH_IPI_INT_IDX_SHFT 52 | ||
43 | #define SH_IPI_INT_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000) | ||
44 | |||
45 | /* SH_IPI_INT_SEND */ | ||
46 | /* Description: Send Interrupt Message to PI, This generates a puls */ | ||
47 | #define SH_IPI_INT_SEND_SHFT 63 | ||
48 | #define SH_IPI_INT_SEND_MASK __IA64_UL_CONST(0x8000000000000000) | ||
49 | |||
50 | /* ==================================================================== */ | ||
51 | /* Register "SH_EVENT_OCCURRED" */ | ||
52 | /* SHub Interrupt Event Occurred */ | ||
53 | /* ==================================================================== */ | ||
54 | #define SH1_EVENT_OCCURRED __IA64_UL_CONST(0x0000000110010000) | ||
55 | #define SH1_EVENT_OCCURRED_ALIAS __IA64_UL_CONST(0x0000000110010008) | ||
56 | #define SH2_EVENT_OCCURRED __IA64_UL_CONST(0x0000000010010000) | ||
57 | #define SH2_EVENT_OCCURRED_ALIAS __IA64_UL_CONST(0x0000000010010008) | ||
58 | |||
59 | /* ==================================================================== */ | ||
60 | /* Register "SH_PI_CAM_CONTROL" */ | ||
61 | /* CRB CAM MMR Access Control */ | ||
62 | /* ==================================================================== */ | ||
63 | #define SH1_PI_CAM_CONTROL __IA64_UL_CONST(0x0000000120050300) | ||
64 | |||
65 | /* ==================================================================== */ | ||
66 | /* Register "SH_SHUB_ID" */ | ||
67 | /* SHub ID Number */ | ||
68 | /* ==================================================================== */ | ||
69 | #define SH1_SHUB_ID __IA64_UL_CONST(0x0000000110060580) | ||
70 | #define SH1_SHUB_ID_REVISION_SHFT 28 | ||
71 | #define SH1_SHUB_ID_REVISION_MASK __IA64_UL_CONST(0x00000000f0000000) | ||
72 | |||
73 | /* ==================================================================== */ | ||
74 | /* Register "SH_RTC" */ | ||
75 | /* Real-time Clock */ | ||
76 | /* ==================================================================== */ | ||
77 | #define SH1_RTC __IA64_UL_CONST(0x00000001101c0000) | ||
78 | #define SH2_RTC __IA64_UL_CONST(0x00000002101c0000) | ||
79 | #define SH_RTC_MASK __IA64_UL_CONST(0x007fffffffffffff) | ||
80 | |||
81 | /* ==================================================================== */ | ||
82 | /* Register "SH_PIO_WRITE_STATUS_0|1" */ | ||
83 | /* PIO Write Status for CPU 0 & 1 */ | ||
84 | /* ==================================================================== */ | ||
85 | #define SH1_PIO_WRITE_STATUS_0 __IA64_UL_CONST(0x0000000120070200) | ||
86 | #define SH1_PIO_WRITE_STATUS_1 __IA64_UL_CONST(0x0000000120070280) | ||
87 | #define SH2_PIO_WRITE_STATUS_0 __IA64_UL_CONST(0x0000000020070200) | ||
88 | #define SH2_PIO_WRITE_STATUS_1 __IA64_UL_CONST(0x0000000020070280) | ||
89 | #define SH2_PIO_WRITE_STATUS_2 __IA64_UL_CONST(0x0000000020070300) | ||
90 | #define SH2_PIO_WRITE_STATUS_3 __IA64_UL_CONST(0x0000000020070380) | ||
91 | |||
92 | /* SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK */ | ||
93 | /* Description: Deadlock response detected */ | ||
94 | #define SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT 1 | ||
95 | #define SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK \ | ||
96 | __IA64_UL_CONST(0x0000000000000002) | ||
97 | |||
98 | /* SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT */ | ||
99 | /* Description: Count of currently pending PIO writes */ | ||
100 | #define SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_SHFT 56 | ||
101 | #define SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK \ | ||
102 | __IA64_UL_CONST(0x3f00000000000000) | ||
103 | |||
104 | /* ==================================================================== */ | ||
105 | /* Register "SH_PIO_WRITE_STATUS_0_ALIAS" */ | ||
106 | /* ==================================================================== */ | ||
107 | #define SH1_PIO_WRITE_STATUS_0_ALIAS __IA64_UL_CONST(0x0000000120070208) | ||
108 | #define SH2_PIO_WRITE_STATUS_0_ALIAS __IA64_UL_CONST(0x0000000020070208) | ||
109 | |||
110 | /* ==================================================================== */ | ||
111 | /* Register "SH_EVENT_OCCURRED" */ | ||
112 | /* SHub Interrupt Event Occurred */ | ||
113 | /* ==================================================================== */ | ||
114 | /* SH_EVENT_OCCURRED_UART_INT */ | ||
115 | /* Description: Pending Junk Bus UART Interrupt */ | ||
116 | #define SH_EVENT_OCCURRED_UART_INT_SHFT 20 | ||
117 | #define SH_EVENT_OCCURRED_UART_INT_MASK __IA64_UL_CONST(0x0000000000100000) | ||
118 | |||
119 | /* SH_EVENT_OCCURRED_IPI_INT */ | ||
120 | /* Description: Pending IPI Interrupt */ | ||
121 | #define SH_EVENT_OCCURRED_IPI_INT_SHFT 28 | ||
122 | #define SH_EVENT_OCCURRED_IPI_INT_MASK __IA64_UL_CONST(0x0000000010000000) | ||
123 | |||
124 | /* SH_EVENT_OCCURRED_II_INT0 */ | ||
125 | /* Description: Pending II 0 Interrupt */ | ||
126 | #define SH_EVENT_OCCURRED_II_INT0_SHFT 29 | ||
127 | #define SH_EVENT_OCCURRED_II_INT0_MASK __IA64_UL_CONST(0x0000000020000000) | ||
128 | |||
129 | /* SH_EVENT_OCCURRED_II_INT1 */ | ||
130 | /* Description: Pending II 1 Interrupt */ | ||
131 | #define SH_EVENT_OCCURRED_II_INT1_SHFT 30 | ||
132 | #define SH_EVENT_OCCURRED_II_INT1_MASK __IA64_UL_CONST(0x0000000040000000) | ||
133 | |||
134 | /* SH2_EVENT_OCCURRED_EXTIO_INT2 */ | ||
135 | /* Description: Pending SHUB 2 EXT IO INT2 */ | ||
136 | #define SH2_EVENT_OCCURRED_EXTIO_INT2_SHFT 33 | ||
137 | #define SH2_EVENT_OCCURRED_EXTIO_INT2_MASK __IA64_UL_CONST(0x0000000200000000) | ||
138 | |||
139 | /* SH2_EVENT_OCCURRED_EXTIO_INT3 */ | ||
140 | /* Description: Pending SHUB 2 EXT IO INT3 */ | ||
141 | #define SH2_EVENT_OCCURRED_EXTIO_INT3_SHFT 34 | ||
142 | #define SH2_EVENT_OCCURRED_EXTIO_INT3_MASK __IA64_UL_CONST(0x0000000400000000) | ||
143 | |||
144 | #define SH_ALL_INT_MASK \ | ||
145 | (SH_EVENT_OCCURRED_UART_INT_MASK | SH_EVENT_OCCURRED_IPI_INT_MASK | \ | ||
146 | SH_EVENT_OCCURRED_II_INT0_MASK | SH_EVENT_OCCURRED_II_INT1_MASK | \ | ||
147 | SH_EVENT_OCCURRED_II_INT1_MASK | SH2_EVENT_OCCURRED_EXTIO_INT2_MASK | \ | ||
148 | SH2_EVENT_OCCURRED_EXTIO_INT3_MASK) | ||
149 | |||
150 | |||
151 | /* ==================================================================== */ | ||
152 | /* LEDS */ | ||
153 | /* ==================================================================== */ | ||
154 | #define SH1_REAL_JUNK_BUS_LED0 0x7fed00000UL | ||
155 | #define SH1_REAL_JUNK_BUS_LED1 0x7fed10000UL | ||
156 | #define SH1_REAL_JUNK_BUS_LED2 0x7fed20000UL | ||
157 | #define SH1_REAL_JUNK_BUS_LED3 0x7fed30000UL | ||
158 | |||
159 | #define SH2_REAL_JUNK_BUS_LED0 0xf0000000UL | ||
160 | #define SH2_REAL_JUNK_BUS_LED1 0xf0010000UL | ||
161 | #define SH2_REAL_JUNK_BUS_LED2 0xf0020000UL | ||
162 | #define SH2_REAL_JUNK_BUS_LED3 0xf0030000UL | ||
163 | |||
164 | /* ==================================================================== */ | ||
165 | /* Register "SH1_PTC_0" */ | ||
166 | /* Puge Translation Cache Message Configuration Information */ | ||
167 | /* ==================================================================== */ | ||
168 | #define SH1_PTC_0 __IA64_UL_CONST(0x00000001101a0000) | ||
169 | |||
170 | /* SH1_PTC_0_A */ | ||
171 | /* Description: Type */ | ||
172 | #define SH1_PTC_0_A_SHFT 0 | ||
173 | |||
174 | /* SH1_PTC_0_PS */ | ||
175 | /* Description: Page Size */ | ||
176 | #define SH1_PTC_0_PS_SHFT 2 | ||
177 | |||
178 | /* SH1_PTC_0_RID */ | ||
179 | /* Description: Region ID */ | ||
180 | #define SH1_PTC_0_RID_SHFT 8 | ||
181 | |||
182 | /* SH1_PTC_0_START */ | ||
183 | /* Description: Start */ | ||
184 | #define SH1_PTC_0_START_SHFT 63 | ||
185 | |||
186 | /* ==================================================================== */ | ||
187 | /* Register "SH1_PTC_1" */ | ||
188 | /* Puge Translation Cache Message Configuration Information */ | ||
189 | /* ==================================================================== */ | ||
190 | #define SH1_PTC_1 __IA64_UL_CONST(0x00000001101a0080) | ||
191 | |||
192 | /* SH1_PTC_1_START */ | ||
193 | /* Description: PTC_1 Start */ | ||
194 | #define SH1_PTC_1_START_SHFT 63 | ||
195 | |||
196 | /* ==================================================================== */ | ||
197 | /* Register "SH2_PTC" */ | ||
198 | /* Puge Translation Cache Message Configuration Information */ | ||
199 | /* ==================================================================== */ | ||
200 | #define SH2_PTC __IA64_UL_CONST(0x0000000170000000) | ||
201 | |||
202 | /* SH2_PTC_A */ | ||
203 | /* Description: Type */ | ||
204 | #define SH2_PTC_A_SHFT 0 | ||
205 | |||
206 | /* SH2_PTC_PS */ | ||
207 | /* Description: Page Size */ | ||
208 | #define SH2_PTC_PS_SHFT 2 | ||
209 | |||
210 | /* SH2_PTC_RID */ | ||
211 | /* Description: Region ID */ | ||
212 | #define SH2_PTC_RID_SHFT 4 | ||
213 | |||
214 | /* SH2_PTC_START */ | ||
215 | /* Description: Start */ | ||
216 | #define SH2_PTC_START_SHFT 63 | ||
217 | |||
218 | /* SH2_PTC_ADDR_RID */ | ||
219 | /* Description: Region ID */ | ||
220 | #define SH2_PTC_ADDR_SHFT 4 | ||
221 | #define SH2_PTC_ADDR_MASK __IA64_UL_CONST(0x1ffffffffffff000) | ||
222 | |||
223 | /* ==================================================================== */ | ||
224 | /* Register "SH_RTC1_INT_CONFIG" */ | ||
225 | /* SHub RTC 1 Interrupt Config Registers */ | ||
226 | /* ==================================================================== */ | ||
227 | |||
228 | #define SH1_RTC1_INT_CONFIG __IA64_UL_CONST(0x0000000110001480) | ||
229 | #define SH2_RTC1_INT_CONFIG __IA64_UL_CONST(0x0000000010001480) | ||
230 | #define SH_RTC1_INT_CONFIG_MASK __IA64_UL_CONST(0x0ff3ffffffefffff) | ||
231 | #define SH_RTC1_INT_CONFIG_INIT __IA64_UL_CONST(0x0000000000000000) | ||
232 | |||
233 | /* SH_RTC1_INT_CONFIG_TYPE */ | ||
234 | /* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */ | ||
235 | #define SH_RTC1_INT_CONFIG_TYPE_SHFT 0 | ||
236 | #define SH_RTC1_INT_CONFIG_TYPE_MASK __IA64_UL_CONST(0x0000000000000007) | ||
237 | |||
238 | /* SH_RTC1_INT_CONFIG_AGT */ | ||
239 | /* Description: Agent, must be 0 for SHub */ | ||
240 | #define SH_RTC1_INT_CONFIG_AGT_SHFT 3 | ||
241 | #define SH_RTC1_INT_CONFIG_AGT_MASK __IA64_UL_CONST(0x0000000000000008) | ||
242 | |||
243 | /* SH_RTC1_INT_CONFIG_PID */ | ||
244 | /* Description: Processor ID, same setting as on targeted McKinley */ | ||
245 | #define SH_RTC1_INT_CONFIG_PID_SHFT 4 | ||
246 | #define SH_RTC1_INT_CONFIG_PID_MASK __IA64_UL_CONST(0x00000000000ffff0) | ||
247 | |||
248 | /* SH_RTC1_INT_CONFIG_BASE */ | ||
249 | /* Description: Optional interrupt vector area, 2MB aligned */ | ||
250 | #define SH_RTC1_INT_CONFIG_BASE_SHFT 21 | ||
251 | #define SH_RTC1_INT_CONFIG_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000) | ||
252 | |||
253 | /* SH_RTC1_INT_CONFIG_IDX */ | ||
254 | /* Description: Targeted McKinley interrupt vector */ | ||
255 | #define SH_RTC1_INT_CONFIG_IDX_SHFT 52 | ||
256 | #define SH_RTC1_INT_CONFIG_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000) | ||
257 | |||
258 | /* ==================================================================== */ | ||
259 | /* Register "SH_RTC1_INT_ENABLE" */ | ||
260 | /* SHub RTC 1 Interrupt Enable Registers */ | ||
261 | /* ==================================================================== */ | ||
262 | |||
263 | #define SH1_RTC1_INT_ENABLE __IA64_UL_CONST(0x0000000110001500) | ||
264 | #define SH2_RTC1_INT_ENABLE __IA64_UL_CONST(0x0000000010001500) | ||
265 | #define SH_RTC1_INT_ENABLE_MASK __IA64_UL_CONST(0x0000000000000001) | ||
266 | #define SH_RTC1_INT_ENABLE_INIT __IA64_UL_CONST(0x0000000000000000) | ||
267 | |||
268 | /* SH_RTC1_INT_ENABLE_RTC1_ENABLE */ | ||
269 | /* Description: Enable RTC 1 Interrupt */ | ||
270 | #define SH_RTC1_INT_ENABLE_RTC1_ENABLE_SHFT 0 | ||
271 | #define SH_RTC1_INT_ENABLE_RTC1_ENABLE_MASK \ | ||
272 | __IA64_UL_CONST(0x0000000000000001) | ||
273 | |||
274 | /* ==================================================================== */ | ||
275 | /* Register "SH_RTC2_INT_CONFIG" */ | ||
276 | /* SHub RTC 2 Interrupt Config Registers */ | ||
277 | /* ==================================================================== */ | ||
278 | |||
279 | #define SH1_RTC2_INT_CONFIG __IA64_UL_CONST(0x0000000110001580) | ||
280 | #define SH2_RTC2_INT_CONFIG __IA64_UL_CONST(0x0000000010001580) | ||
281 | #define SH_RTC2_INT_CONFIG_MASK __IA64_UL_CONST(0x0ff3ffffffefffff) | ||
282 | #define SH_RTC2_INT_CONFIG_INIT __IA64_UL_CONST(0x0000000000000000) | ||
283 | |||
284 | /* SH_RTC2_INT_CONFIG_TYPE */ | ||
285 | /* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */ | ||
286 | #define SH_RTC2_INT_CONFIG_TYPE_SHFT 0 | ||
287 | #define SH_RTC2_INT_CONFIG_TYPE_MASK __IA64_UL_CONST(0x0000000000000007) | ||
288 | |||
289 | /* SH_RTC2_INT_CONFIG_AGT */ | ||
290 | /* Description: Agent, must be 0 for SHub */ | ||
291 | #define SH_RTC2_INT_CONFIG_AGT_SHFT 3 | ||
292 | #define SH_RTC2_INT_CONFIG_AGT_MASK __IA64_UL_CONST(0x0000000000000008) | ||
293 | |||
294 | /* SH_RTC2_INT_CONFIG_PID */ | ||
295 | /* Description: Processor ID, same setting as on targeted McKinley */ | ||
296 | #define SH_RTC2_INT_CONFIG_PID_SHFT 4 | ||
297 | #define SH_RTC2_INT_CONFIG_PID_MASK __IA64_UL_CONST(0x00000000000ffff0) | ||
298 | |||
299 | /* SH_RTC2_INT_CONFIG_BASE */ | ||
300 | /* Description: Optional interrupt vector area, 2MB aligned */ | ||
301 | #define SH_RTC2_INT_CONFIG_BASE_SHFT 21 | ||
302 | #define SH_RTC2_INT_CONFIG_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000) | ||
303 | |||
304 | /* SH_RTC2_INT_CONFIG_IDX */ | ||
305 | /* Description: Targeted McKinley interrupt vector */ | ||
306 | #define SH_RTC2_INT_CONFIG_IDX_SHFT 52 | ||
307 | #define SH_RTC2_INT_CONFIG_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000) | ||
308 | |||
309 | /* ==================================================================== */ | ||
310 | /* Register "SH_RTC2_INT_ENABLE" */ | ||
311 | /* SHub RTC 2 Interrupt Enable Registers */ | ||
312 | /* ==================================================================== */ | ||
313 | |||
314 | #define SH1_RTC2_INT_ENABLE __IA64_UL_CONST(0x0000000110001600) | ||
315 | #define SH2_RTC2_INT_ENABLE __IA64_UL_CONST(0x0000000010001600) | ||
316 | #define SH_RTC2_INT_ENABLE_MASK __IA64_UL_CONST(0x0000000000000001) | ||
317 | #define SH_RTC2_INT_ENABLE_INIT __IA64_UL_CONST(0x0000000000000000) | ||
318 | |||
319 | /* SH_RTC2_INT_ENABLE_RTC2_ENABLE */ | ||
320 | /* Description: Enable RTC 2 Interrupt */ | ||
321 | #define SH_RTC2_INT_ENABLE_RTC2_ENABLE_SHFT 0 | ||
322 | #define SH_RTC2_INT_ENABLE_RTC2_ENABLE_MASK \ | ||
323 | __IA64_UL_CONST(0x0000000000000001) | ||
324 | |||
325 | /* ==================================================================== */ | ||
326 | /* Register "SH_RTC3_INT_CONFIG" */ | ||
327 | /* SHub RTC 3 Interrupt Config Registers */ | ||
328 | /* ==================================================================== */ | ||
329 | |||
330 | #define SH1_RTC3_INT_CONFIG __IA64_UL_CONST(0x0000000110001680) | ||
331 | #define SH2_RTC3_INT_CONFIG __IA64_UL_CONST(0x0000000010001680) | ||
332 | #define SH_RTC3_INT_CONFIG_MASK __IA64_UL_CONST(0x0ff3ffffffefffff) | ||
333 | #define SH_RTC3_INT_CONFIG_INIT __IA64_UL_CONST(0x0000000000000000) | ||
334 | |||
335 | /* SH_RTC3_INT_CONFIG_TYPE */ | ||
336 | /* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */ | ||
337 | #define SH_RTC3_INT_CONFIG_TYPE_SHFT 0 | ||
338 | #define SH_RTC3_INT_CONFIG_TYPE_MASK __IA64_UL_CONST(0x0000000000000007) | ||
339 | |||
340 | /* SH_RTC3_INT_CONFIG_AGT */ | ||
341 | /* Description: Agent, must be 0 for SHub */ | ||
342 | #define SH_RTC3_INT_CONFIG_AGT_SHFT 3 | ||
343 | #define SH_RTC3_INT_CONFIG_AGT_MASK __IA64_UL_CONST(0x0000000000000008) | ||
344 | |||
345 | /* SH_RTC3_INT_CONFIG_PID */ | ||
346 | /* Description: Processor ID, same setting as on targeted McKinley */ | ||
347 | #define SH_RTC3_INT_CONFIG_PID_SHFT 4 | ||
348 | #define SH_RTC3_INT_CONFIG_PID_MASK __IA64_UL_CONST(0x00000000000ffff0) | ||
349 | |||
350 | /* SH_RTC3_INT_CONFIG_BASE */ | ||
351 | /* Description: Optional interrupt vector area, 2MB aligned */ | ||
352 | #define SH_RTC3_INT_CONFIG_BASE_SHFT 21 | ||
353 | #define SH_RTC3_INT_CONFIG_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000) | ||
354 | |||
355 | /* SH_RTC3_INT_CONFIG_IDX */ | ||
356 | /* Description: Targeted McKinley interrupt vector */ | ||
357 | #define SH_RTC3_INT_CONFIG_IDX_SHFT 52 | ||
358 | #define SH_RTC3_INT_CONFIG_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000) | ||
359 | |||
360 | /* ==================================================================== */ | ||
361 | /* Register "SH_RTC3_INT_ENABLE" */ | ||
362 | /* SHub RTC 3 Interrupt Enable Registers */ | ||
363 | /* ==================================================================== */ | ||
364 | |||
365 | #define SH1_RTC3_INT_ENABLE __IA64_UL_CONST(0x0000000110001700) | ||
366 | #define SH2_RTC3_INT_ENABLE __IA64_UL_CONST(0x0000000010001700) | ||
367 | #define SH_RTC3_INT_ENABLE_MASK __IA64_UL_CONST(0x0000000000000001) | ||
368 | #define SH_RTC3_INT_ENABLE_INIT __IA64_UL_CONST(0x0000000000000000) | ||
369 | |||
370 | /* SH_RTC3_INT_ENABLE_RTC3_ENABLE */ | ||
371 | /* Description: Enable RTC 3 Interrupt */ | ||
372 | #define SH_RTC3_INT_ENABLE_RTC3_ENABLE_SHFT 0 | ||
373 | #define SH_RTC3_INT_ENABLE_RTC3_ENABLE_MASK \ | ||
374 | __IA64_UL_CONST(0x0000000000000001) | ||
375 | |||
376 | /* SH_EVENT_OCCURRED_RTC1_INT */ | ||
377 | /* Description: Pending RTC 1 Interrupt */ | ||
378 | #define SH_EVENT_OCCURRED_RTC1_INT_SHFT 24 | ||
379 | #define SH_EVENT_OCCURRED_RTC1_INT_MASK __IA64_UL_CONST(0x0000000001000000) | ||
380 | |||
381 | /* SH_EVENT_OCCURRED_RTC2_INT */ | ||
382 | /* Description: Pending RTC 2 Interrupt */ | ||
383 | #define SH_EVENT_OCCURRED_RTC2_INT_SHFT 25 | ||
384 | #define SH_EVENT_OCCURRED_RTC2_INT_MASK __IA64_UL_CONST(0x0000000002000000) | ||
385 | |||
386 | /* SH_EVENT_OCCURRED_RTC3_INT */ | ||
387 | /* Description: Pending RTC 3 Interrupt */ | ||
388 | #define SH_EVENT_OCCURRED_RTC3_INT_SHFT 26 | ||
389 | #define SH_EVENT_OCCURRED_RTC3_INT_MASK __IA64_UL_CONST(0x0000000004000000) | ||
390 | |||
391 | /* ==================================================================== */ | ||
392 | /* Register "SH_IPI_ACCESS" */ | ||
393 | /* CPU interrupt Access Permission Bits */ | ||
394 | /* ==================================================================== */ | ||
395 | |||
396 | #define SH1_IPI_ACCESS __IA64_UL_CONST(0x0000000110060480) | ||
397 | #define SH2_IPI_ACCESS0 __IA64_UL_CONST(0x0000000010060c00) | ||
398 | #define SH2_IPI_ACCESS1 __IA64_UL_CONST(0x0000000010060c80) | ||
399 | #define SH2_IPI_ACCESS2 __IA64_UL_CONST(0x0000000010060d00) | ||
400 | #define SH2_IPI_ACCESS3 __IA64_UL_CONST(0x0000000010060d80) | ||
401 | |||
402 | /* ==================================================================== */ | ||
403 | /* Register "SH_INT_CMPB" */ | ||
404 | /* RTC Compare Value for Processor B */ | ||
405 | /* ==================================================================== */ | ||
406 | |||
407 | #define SH1_INT_CMPB __IA64_UL_CONST(0x00000001101b0080) | ||
408 | #define SH2_INT_CMPB __IA64_UL_CONST(0x00000000101b0080) | ||
409 | #define SH_INT_CMPB_MASK __IA64_UL_CONST(0x007fffffffffffff) | ||
410 | #define SH_INT_CMPB_INIT __IA64_UL_CONST(0x0000000000000000) | ||
411 | |||
412 | /* SH_INT_CMPB_REAL_TIME_CMPB */ | ||
413 | /* Description: Real Time Clock Compare */ | ||
414 | #define SH_INT_CMPB_REAL_TIME_CMPB_SHFT 0 | ||
415 | #define SH_INT_CMPB_REAL_TIME_CMPB_MASK __IA64_UL_CONST(0x007fffffffffffff) | ||
416 | |||
417 | /* ==================================================================== */ | ||
418 | /* Register "SH_INT_CMPC" */ | ||
419 | /* RTC Compare Value for Processor C */ | ||
420 | /* ==================================================================== */ | ||
421 | |||
422 | #define SH1_INT_CMPC __IA64_UL_CONST(0x00000001101b0100) | ||
423 | #define SH2_INT_CMPC __IA64_UL_CONST(0x00000000101b0100) | ||
424 | #define SH_INT_CMPC_MASK __IA64_UL_CONST(0x007fffffffffffff) | ||
425 | #define SH_INT_CMPC_INIT __IA64_UL_CONST(0x0000000000000000) | ||
426 | |||
427 | /* SH_INT_CMPC_REAL_TIME_CMPC */ | ||
428 | /* Description: Real Time Clock Compare */ | ||
429 | #define SH_INT_CMPC_REAL_TIME_CMPC_SHFT 0 | ||
430 | #define SH_INT_CMPC_REAL_TIME_CMPC_MASK __IA64_UL_CONST(0x007fffffffffffff) | ||
431 | |||
432 | /* ==================================================================== */ | ||
433 | /* Register "SH_INT_CMPD" */ | ||
434 | /* RTC Compare Value for Processor D */ | ||
435 | /* ==================================================================== */ | ||
436 | |||
437 | #define SH1_INT_CMPD __IA64_UL_CONST(0x00000001101b0180) | ||
438 | #define SH2_INT_CMPD __IA64_UL_CONST(0x00000000101b0180) | ||
439 | #define SH_INT_CMPD_MASK __IA64_UL_CONST(0x007fffffffffffff) | ||
440 | #define SH_INT_CMPD_INIT __IA64_UL_CONST(0x0000000000000000) | ||
441 | |||
442 | /* SH_INT_CMPD_REAL_TIME_CMPD */ | ||
443 | /* Description: Real Time Clock Compare */ | ||
444 | #define SH_INT_CMPD_REAL_TIME_CMPD_SHFT 0 | ||
445 | #define SH_INT_CMPD_REAL_TIME_CMPD_MASK __IA64_UL_CONST(0x007fffffffffffff) | ||
446 | |||
447 | /* ==================================================================== */ | ||
448 | /* Register "SH_MD_DQLP_MMR_DIR_PRIVEC0" */ | ||
449 | /* privilege vector for acc=0 */ | ||
450 | /* ==================================================================== */ | ||
451 | #define SH1_MD_DQLP_MMR_DIR_PRIVEC0 __IA64_UL_CONST(0x0000000100030300) | ||
452 | |||
453 | /* ==================================================================== */ | ||
454 | /* Register "SH_MD_DQRP_MMR_DIR_PRIVEC0" */ | ||
455 | /* privilege vector for acc=0 */ | ||
456 | /* ==================================================================== */ | ||
457 | #define SH1_MD_DQRP_MMR_DIR_PRIVEC0 __IA64_UL_CONST(0x0000000100050300) | ||
458 | |||
459 | /* ==================================================================== */ | ||
460 | /* Some MMRs are functionally identical (or close enough) on both SHUB1 */ | ||
461 | /* and SHUB2 that it makes sense to define a geberic name for the MMR. */ | ||
462 | /* It is acceptable to use (for example) SH_IPI_INT to reference the */ | ||
463 | /* the IPI MMR. The value of SH_IPI_INT is determined at runtime based */ | ||
464 | /* on the type of the SHUB. Do not use these #defines in performance */ | ||
465 | /* critical code or loops - there is a small performance penalty. */ | ||
466 | /* ==================================================================== */ | ||
467 | #define shubmmr(a,b) (is_shub2() ? a##2_##b : a##1_##b) | ||
468 | |||
469 | #define SH_REAL_JUNK_BUS_LED0 shubmmr(SH, REAL_JUNK_BUS_LED0) | ||
470 | #define SH_IPI_INT shubmmr(SH, IPI_INT) | ||
471 | #define SH_EVENT_OCCURRED shubmmr(SH, EVENT_OCCURRED) | ||
472 | #define SH_EVENT_OCCURRED_ALIAS shubmmr(SH, EVENT_OCCURRED_ALIAS) | ||
473 | #define SH_RTC shubmmr(SH, RTC) | ||
474 | #define SH_RTC1_INT_CONFIG shubmmr(SH, RTC1_INT_CONFIG) | ||
475 | #define SH_RTC1_INT_ENABLE shubmmr(SH, RTC1_INT_ENABLE) | ||
476 | #define SH_RTC2_INT_CONFIG shubmmr(SH, RTC2_INT_CONFIG) | ||
477 | #define SH_RTC2_INT_ENABLE shubmmr(SH, RTC2_INT_ENABLE) | ||
478 | #define SH_RTC3_INT_CONFIG shubmmr(SH, RTC3_INT_CONFIG) | ||
479 | #define SH_RTC3_INT_ENABLE shubmmr(SH, RTC3_INT_ENABLE) | ||
480 | #define SH_INT_CMPB shubmmr(SH, INT_CMPB) | ||
481 | #define SH_INT_CMPC shubmmr(SH, INT_CMPC) | ||
482 | #define SH_INT_CMPD shubmmr(SH, INT_CMPD) | ||
483 | |||
484 | /* ========================================================================== */ | ||
485 | /* Register "SH2_BT_ENG_CSR_0" */ | ||
486 | /* Engine 0 Control and Status Register */ | ||
487 | /* ========================================================================== */ | ||
488 | |||
489 | #define SH2_BT_ENG_CSR_0 __IA64_UL_CONST(0x0000000030040000) | ||
490 | #define SH2_BT_ENG_SRC_ADDR_0 __IA64_UL_CONST(0x0000000030040080) | ||
491 | #define SH2_BT_ENG_DEST_ADDR_0 __IA64_UL_CONST(0x0000000030040100) | ||
492 | #define SH2_BT_ENG_NOTIF_ADDR_0 __IA64_UL_CONST(0x0000000030040180) | ||
493 | |||
494 | /* ========================================================================== */ | ||
495 | /* BTE interfaces 1-3 */ | ||
496 | /* ========================================================================== */ | ||
497 | |||
498 | #define SH2_BT_ENG_CSR_1 __IA64_UL_CONST(0x0000000030050000) | ||
499 | #define SH2_BT_ENG_CSR_2 __IA64_UL_CONST(0x0000000030060000) | ||
500 | #define SH2_BT_ENG_CSR_3 __IA64_UL_CONST(0x0000000030070000) | ||
501 | |||
502 | #endif /* _ASM_IA64_SN_SHUB_MMR_H */ | ||
diff --git a/arch/ia64/include/asm/sn/shubio.h b/arch/ia64/include/asm/sn/shubio.h deleted file mode 100644 index 8a1ec139f977..000000000000 --- a/arch/ia64/include/asm/sn/shubio.h +++ /dev/null | |||
@@ -1,3358 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_IA64_SN_SHUBIO_H | ||
10 | #define _ASM_IA64_SN_SHUBIO_H | ||
11 | |||
12 | #define HUB_WIDGET_ID_MAX 0xf | ||
13 | #define IIO_NUM_ITTES 7 | ||
14 | #define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1) | ||
15 | |||
16 | #define IIO_WID 0x00400000 /* Crosstalk Widget Identification */ | ||
17 | /* This register is also accessible from | ||
18 | * Crosstalk at address 0x0. */ | ||
19 | #define IIO_WSTAT 0x00400008 /* Crosstalk Widget Status */ | ||
20 | #define IIO_WCR 0x00400020 /* Crosstalk Widget Control Register */ | ||
21 | #define IIO_ILAPR 0x00400100 /* IO Local Access Protection Register */ | ||
22 | #define IIO_ILAPO 0x00400108 /* IO Local Access Protection Override */ | ||
23 | #define IIO_IOWA 0x00400110 /* IO Outbound Widget Access */ | ||
24 | #define IIO_IIWA 0x00400118 /* IO Inbound Widget Access */ | ||
25 | #define IIO_IIDEM 0x00400120 /* IO Inbound Device Error Mask */ | ||
26 | #define IIO_ILCSR 0x00400128 /* IO LLP Control and Status Register */ | ||
27 | #define IIO_ILLR 0x00400130 /* IO LLP Log Register */ | ||
28 | #define IIO_IIDSR 0x00400138 /* IO Interrupt Destination */ | ||
29 | |||
30 | #define IIO_IGFX0 0x00400140 /* IO Graphics Node-Widget Map 0 */ | ||
31 | #define IIO_IGFX1 0x00400148 /* IO Graphics Node-Widget Map 1 */ | ||
32 | |||
33 | #define IIO_ISCR0 0x00400150 /* IO Scratch Register 0 */ | ||
34 | #define IIO_ISCR1 0x00400158 /* IO Scratch Register 1 */ | ||
35 | |||
36 | #define IIO_ITTE1 0x00400160 /* IO Translation Table Entry 1 */ | ||
37 | #define IIO_ITTE2 0x00400168 /* IO Translation Table Entry 2 */ | ||
38 | #define IIO_ITTE3 0x00400170 /* IO Translation Table Entry 3 */ | ||
39 | #define IIO_ITTE4 0x00400178 /* IO Translation Table Entry 4 */ | ||
40 | #define IIO_ITTE5 0x00400180 /* IO Translation Table Entry 5 */ | ||
41 | #define IIO_ITTE6 0x00400188 /* IO Translation Table Entry 6 */ | ||
42 | #define IIO_ITTE7 0x00400190 /* IO Translation Table Entry 7 */ | ||
43 | |||
44 | #define IIO_IPRB0 0x00400198 /* IO PRB Entry 0 */ | ||
45 | #define IIO_IPRB8 0x004001A0 /* IO PRB Entry 8 */ | ||
46 | #define IIO_IPRB9 0x004001A8 /* IO PRB Entry 9 */ | ||
47 | #define IIO_IPRBA 0x004001B0 /* IO PRB Entry A */ | ||
48 | #define IIO_IPRBB 0x004001B8 /* IO PRB Entry B */ | ||
49 | #define IIO_IPRBC 0x004001C0 /* IO PRB Entry C */ | ||
50 | #define IIO_IPRBD 0x004001C8 /* IO PRB Entry D */ | ||
51 | #define IIO_IPRBE 0x004001D0 /* IO PRB Entry E */ | ||
52 | #define IIO_IPRBF 0x004001D8 /* IO PRB Entry F */ | ||
53 | |||
54 | #define IIO_IXCC 0x004001E0 /* IO Crosstalk Credit Count Timeout */ | ||
55 | #define IIO_IMEM 0x004001E8 /* IO Miscellaneous Error Mask */ | ||
56 | #define IIO_IXTT 0x004001F0 /* IO Crosstalk Timeout Threshold */ | ||
57 | #define IIO_IECLR 0x004001F8 /* IO Error Clear Register */ | ||
58 | #define IIO_IBCR 0x00400200 /* IO BTE Control Register */ | ||
59 | |||
60 | #define IIO_IXSM 0x00400208 /* IO Crosstalk Spurious Message */ | ||
61 | #define IIO_IXSS 0x00400210 /* IO Crosstalk Spurious Sideband */ | ||
62 | |||
63 | #define IIO_ILCT 0x00400218 /* IO LLP Channel Test */ | ||
64 | |||
65 | #define IIO_IIEPH1 0x00400220 /* IO Incoming Error Packet Header, Part 1 */ | ||
66 | #define IIO_IIEPH2 0x00400228 /* IO Incoming Error Packet Header, Part 2 */ | ||
67 | |||
68 | #define IIO_ISLAPR 0x00400230 /* IO SXB Local Access Protection Regster */ | ||
69 | #define IIO_ISLAPO 0x00400238 /* IO SXB Local Access Protection Override */ | ||
70 | |||
71 | #define IIO_IWI 0x00400240 /* IO Wrapper Interrupt Register */ | ||
72 | #define IIO_IWEL 0x00400248 /* IO Wrapper Error Log Register */ | ||
73 | #define IIO_IWC 0x00400250 /* IO Wrapper Control Register */ | ||
74 | #define IIO_IWS 0x00400258 /* IO Wrapper Status Register */ | ||
75 | #define IIO_IWEIM 0x00400260 /* IO Wrapper Error Interrupt Masking Register */ | ||
76 | |||
77 | #define IIO_IPCA 0x00400300 /* IO PRB Counter Adjust */ | ||
78 | |||
79 | #define IIO_IPRTE0_A 0x00400308 /* IO PIO Read Address Table Entry 0, Part A */ | ||
80 | #define IIO_IPRTE1_A 0x00400310 /* IO PIO Read Address Table Entry 1, Part A */ | ||
81 | #define IIO_IPRTE2_A 0x00400318 /* IO PIO Read Address Table Entry 2, Part A */ | ||
82 | #define IIO_IPRTE3_A 0x00400320 /* IO PIO Read Address Table Entry 3, Part A */ | ||
83 | #define IIO_IPRTE4_A 0x00400328 /* IO PIO Read Address Table Entry 4, Part A */ | ||
84 | #define IIO_IPRTE5_A 0x00400330 /* IO PIO Read Address Table Entry 5, Part A */ | ||
85 | #define IIO_IPRTE6_A 0x00400338 /* IO PIO Read Address Table Entry 6, Part A */ | ||
86 | #define IIO_IPRTE7_A 0x00400340 /* IO PIO Read Address Table Entry 7, Part A */ | ||
87 | |||
88 | #define IIO_IPRTE0_B 0x00400348 /* IO PIO Read Address Table Entry 0, Part B */ | ||
89 | #define IIO_IPRTE1_B 0x00400350 /* IO PIO Read Address Table Entry 1, Part B */ | ||
90 | #define IIO_IPRTE2_B 0x00400358 /* IO PIO Read Address Table Entry 2, Part B */ | ||
91 | #define IIO_IPRTE3_B 0x00400360 /* IO PIO Read Address Table Entry 3, Part B */ | ||
92 | #define IIO_IPRTE4_B 0x00400368 /* IO PIO Read Address Table Entry 4, Part B */ | ||
93 | #define IIO_IPRTE5_B 0x00400370 /* IO PIO Read Address Table Entry 5, Part B */ | ||
94 | #define IIO_IPRTE6_B 0x00400378 /* IO PIO Read Address Table Entry 6, Part B */ | ||
95 | #define IIO_IPRTE7_B 0x00400380 /* IO PIO Read Address Table Entry 7, Part B */ | ||
96 | |||
97 | #define IIO_IPDR 0x00400388 /* IO PIO Deallocation Register */ | ||
98 | #define IIO_ICDR 0x00400390 /* IO CRB Entry Deallocation Register */ | ||
99 | #define IIO_IFDR 0x00400398 /* IO IOQ FIFO Depth Register */ | ||
100 | #define IIO_IIAP 0x004003A0 /* IO IIQ Arbitration Parameters */ | ||
101 | #define IIO_ICMR 0x004003A8 /* IO CRB Management Register */ | ||
102 | #define IIO_ICCR 0x004003B0 /* IO CRB Control Register */ | ||
103 | #define IIO_ICTO 0x004003B8 /* IO CRB Timeout */ | ||
104 | #define IIO_ICTP 0x004003C0 /* IO CRB Timeout Prescalar */ | ||
105 | |||
106 | #define IIO_ICRB0_A 0x00400400 /* IO CRB Entry 0_A */ | ||
107 | #define IIO_ICRB0_B 0x00400408 /* IO CRB Entry 0_B */ | ||
108 | #define IIO_ICRB0_C 0x00400410 /* IO CRB Entry 0_C */ | ||
109 | #define IIO_ICRB0_D 0x00400418 /* IO CRB Entry 0_D */ | ||
110 | #define IIO_ICRB0_E 0x00400420 /* IO CRB Entry 0_E */ | ||
111 | |||
112 | #define IIO_ICRB1_A 0x00400430 /* IO CRB Entry 1_A */ | ||
113 | #define IIO_ICRB1_B 0x00400438 /* IO CRB Entry 1_B */ | ||
114 | #define IIO_ICRB1_C 0x00400440 /* IO CRB Entry 1_C */ | ||
115 | #define IIO_ICRB1_D 0x00400448 /* IO CRB Entry 1_D */ | ||
116 | #define IIO_ICRB1_E 0x00400450 /* IO CRB Entry 1_E */ | ||
117 | |||
118 | #define IIO_ICRB2_A 0x00400460 /* IO CRB Entry 2_A */ | ||
119 | #define IIO_ICRB2_B 0x00400468 /* IO CRB Entry 2_B */ | ||
120 | #define IIO_ICRB2_C 0x00400470 /* IO CRB Entry 2_C */ | ||
121 | #define IIO_ICRB2_D 0x00400478 /* IO CRB Entry 2_D */ | ||
122 | #define IIO_ICRB2_E 0x00400480 /* IO CRB Entry 2_E */ | ||
123 | |||
124 | #define IIO_ICRB3_A 0x00400490 /* IO CRB Entry 3_A */ | ||
125 | #define IIO_ICRB3_B 0x00400498 /* IO CRB Entry 3_B */ | ||
126 | #define IIO_ICRB3_C 0x004004a0 /* IO CRB Entry 3_C */ | ||
127 | #define IIO_ICRB3_D 0x004004a8 /* IO CRB Entry 3_D */ | ||
128 | #define IIO_ICRB3_E 0x004004b0 /* IO CRB Entry 3_E */ | ||
129 | |||
130 | #define IIO_ICRB4_A 0x004004c0 /* IO CRB Entry 4_A */ | ||
131 | #define IIO_ICRB4_B 0x004004c8 /* IO CRB Entry 4_B */ | ||
132 | #define IIO_ICRB4_C 0x004004d0 /* IO CRB Entry 4_C */ | ||
133 | #define IIO_ICRB4_D 0x004004d8 /* IO CRB Entry 4_D */ | ||
134 | #define IIO_ICRB4_E 0x004004e0 /* IO CRB Entry 4_E */ | ||
135 | |||
136 | #define IIO_ICRB5_A 0x004004f0 /* IO CRB Entry 5_A */ | ||
137 | #define IIO_ICRB5_B 0x004004f8 /* IO CRB Entry 5_B */ | ||
138 | #define IIO_ICRB5_C 0x00400500 /* IO CRB Entry 5_C */ | ||
139 | #define IIO_ICRB5_D 0x00400508 /* IO CRB Entry 5_D */ | ||
140 | #define IIO_ICRB5_E 0x00400510 /* IO CRB Entry 5_E */ | ||
141 | |||
142 | #define IIO_ICRB6_A 0x00400520 /* IO CRB Entry 6_A */ | ||
143 | #define IIO_ICRB6_B 0x00400528 /* IO CRB Entry 6_B */ | ||
144 | #define IIO_ICRB6_C 0x00400530 /* IO CRB Entry 6_C */ | ||
145 | #define IIO_ICRB6_D 0x00400538 /* IO CRB Entry 6_D */ | ||
146 | #define IIO_ICRB6_E 0x00400540 /* IO CRB Entry 6_E */ | ||
147 | |||
148 | #define IIO_ICRB7_A 0x00400550 /* IO CRB Entry 7_A */ | ||
149 | #define IIO_ICRB7_B 0x00400558 /* IO CRB Entry 7_B */ | ||
150 | #define IIO_ICRB7_C 0x00400560 /* IO CRB Entry 7_C */ | ||
151 | #define IIO_ICRB7_D 0x00400568 /* IO CRB Entry 7_D */ | ||
152 | #define IIO_ICRB7_E 0x00400570 /* IO CRB Entry 7_E */ | ||
153 | |||
154 | #define IIO_ICRB8_A 0x00400580 /* IO CRB Entry 8_A */ | ||
155 | #define IIO_ICRB8_B 0x00400588 /* IO CRB Entry 8_B */ | ||
156 | #define IIO_ICRB8_C 0x00400590 /* IO CRB Entry 8_C */ | ||
157 | #define IIO_ICRB8_D 0x00400598 /* IO CRB Entry 8_D */ | ||
158 | #define IIO_ICRB8_E 0x004005a0 /* IO CRB Entry 8_E */ | ||
159 | |||
160 | #define IIO_ICRB9_A 0x004005b0 /* IO CRB Entry 9_A */ | ||
161 | #define IIO_ICRB9_B 0x004005b8 /* IO CRB Entry 9_B */ | ||
162 | #define IIO_ICRB9_C 0x004005c0 /* IO CRB Entry 9_C */ | ||
163 | #define IIO_ICRB9_D 0x004005c8 /* IO CRB Entry 9_D */ | ||
164 | #define IIO_ICRB9_E 0x004005d0 /* IO CRB Entry 9_E */ | ||
165 | |||
166 | #define IIO_ICRBA_A 0x004005e0 /* IO CRB Entry A_A */ | ||
167 | #define IIO_ICRBA_B 0x004005e8 /* IO CRB Entry A_B */ | ||
168 | #define IIO_ICRBA_C 0x004005f0 /* IO CRB Entry A_C */ | ||
169 | #define IIO_ICRBA_D 0x004005f8 /* IO CRB Entry A_D */ | ||
170 | #define IIO_ICRBA_E 0x00400600 /* IO CRB Entry A_E */ | ||
171 | |||
172 | #define IIO_ICRBB_A 0x00400610 /* IO CRB Entry B_A */ | ||
173 | #define IIO_ICRBB_B 0x00400618 /* IO CRB Entry B_B */ | ||
174 | #define IIO_ICRBB_C 0x00400620 /* IO CRB Entry B_C */ | ||
175 | #define IIO_ICRBB_D 0x00400628 /* IO CRB Entry B_D */ | ||
176 | #define IIO_ICRBB_E 0x00400630 /* IO CRB Entry B_E */ | ||
177 | |||
178 | #define IIO_ICRBC_A 0x00400640 /* IO CRB Entry C_A */ | ||
179 | #define IIO_ICRBC_B 0x00400648 /* IO CRB Entry C_B */ | ||
180 | #define IIO_ICRBC_C 0x00400650 /* IO CRB Entry C_C */ | ||
181 | #define IIO_ICRBC_D 0x00400658 /* IO CRB Entry C_D */ | ||
182 | #define IIO_ICRBC_E 0x00400660 /* IO CRB Entry C_E */ | ||
183 | |||
184 | #define IIO_ICRBD_A 0x00400670 /* IO CRB Entry D_A */ | ||
185 | #define IIO_ICRBD_B 0x00400678 /* IO CRB Entry D_B */ | ||
186 | #define IIO_ICRBD_C 0x00400680 /* IO CRB Entry D_C */ | ||
187 | #define IIO_ICRBD_D 0x00400688 /* IO CRB Entry D_D */ | ||
188 | #define IIO_ICRBD_E 0x00400690 /* IO CRB Entry D_E */ | ||
189 | |||
190 | #define IIO_ICRBE_A 0x004006a0 /* IO CRB Entry E_A */ | ||
191 | #define IIO_ICRBE_B 0x004006a8 /* IO CRB Entry E_B */ | ||
192 | #define IIO_ICRBE_C 0x004006b0 /* IO CRB Entry E_C */ | ||
193 | #define IIO_ICRBE_D 0x004006b8 /* IO CRB Entry E_D */ | ||
194 | #define IIO_ICRBE_E 0x004006c0 /* IO CRB Entry E_E */ | ||
195 | |||
196 | #define IIO_ICSML 0x00400700 /* IO CRB Spurious Message Low */ | ||
197 | #define IIO_ICSMM 0x00400708 /* IO CRB Spurious Message Middle */ | ||
198 | #define IIO_ICSMH 0x00400710 /* IO CRB Spurious Message High */ | ||
199 | |||
200 | #define IIO_IDBSS 0x00400718 /* IO Debug Submenu Select */ | ||
201 | |||
202 | #define IIO_IBLS0 0x00410000 /* IO BTE Length Status 0 */ | ||
203 | #define IIO_IBSA0 0x00410008 /* IO BTE Source Address 0 */ | ||
204 | #define IIO_IBDA0 0x00410010 /* IO BTE Destination Address 0 */ | ||
205 | #define IIO_IBCT0 0x00410018 /* IO BTE Control Terminate 0 */ | ||
206 | #define IIO_IBNA0 0x00410020 /* IO BTE Notification Address 0 */ | ||
207 | #define IIO_IBIA0 0x00410028 /* IO BTE Interrupt Address 0 */ | ||
208 | #define IIO_IBLS1 0x00420000 /* IO BTE Length Status 1 */ | ||
209 | #define IIO_IBSA1 0x00420008 /* IO BTE Source Address 1 */ | ||
210 | #define IIO_IBDA1 0x00420010 /* IO BTE Destination Address 1 */ | ||
211 | #define IIO_IBCT1 0x00420018 /* IO BTE Control Terminate 1 */ | ||
212 | #define IIO_IBNA1 0x00420020 /* IO BTE Notification Address 1 */ | ||
213 | #define IIO_IBIA1 0x00420028 /* IO BTE Interrupt Address 1 */ | ||
214 | |||
215 | #define IIO_IPCR 0x00430000 /* IO Performance Control */ | ||
216 | #define IIO_IPPR 0x00430008 /* IO Performance Profiling */ | ||
217 | |||
218 | /************************************************************************ | ||
219 | * * | ||
220 | * Description: This register echoes some information from the * | ||
221 | * LB_REV_ID register. It is available through Crosstalk as described * | ||
222 | * above. The REV_NUM and MFG_NUM fields receive their values from * | ||
223 | * the REVISION and MANUFACTURER fields in the LB_REV_ID register. * | ||
224 | * The PART_NUM field's value is the Crosstalk device ID number that * | ||
225 | * Steve Miller assigned to the SHub chip. * | ||
226 | * * | ||
227 | ************************************************************************/ | ||
228 | |||
229 | typedef union ii_wid_u { | ||
230 | u64 ii_wid_regval; | ||
231 | struct { | ||
232 | u64 w_rsvd_1:1; | ||
233 | u64 w_mfg_num:11; | ||
234 | u64 w_part_num:16; | ||
235 | u64 w_rev_num:4; | ||
236 | u64 w_rsvd:32; | ||
237 | } ii_wid_fld_s; | ||
238 | } ii_wid_u_t; | ||
239 | |||
240 | /************************************************************************ | ||
241 | * * | ||
242 | * The fields in this register are set upon detection of an error * | ||
243 | * and cleared by various mechanisms, as explained in the * | ||
244 | * description. * | ||
245 | * * | ||
246 | ************************************************************************/ | ||
247 | |||
248 | typedef union ii_wstat_u { | ||
249 | u64 ii_wstat_regval; | ||
250 | struct { | ||
251 | u64 w_pending:4; | ||
252 | u64 w_xt_crd_to:1; | ||
253 | u64 w_xt_tail_to:1; | ||
254 | u64 w_rsvd_3:3; | ||
255 | u64 w_tx_mx_rty:1; | ||
256 | u64 w_rsvd_2:6; | ||
257 | u64 w_llp_tx_cnt:8; | ||
258 | u64 w_rsvd_1:8; | ||
259 | u64 w_crazy:1; | ||
260 | u64 w_rsvd:31; | ||
261 | } ii_wstat_fld_s; | ||
262 | } ii_wstat_u_t; | ||
263 | |||
264 | /************************************************************************ | ||
265 | * * | ||
266 | * Description: This is a read-write enabled register. It controls * | ||
267 | * various aspects of the Crosstalk flow control. * | ||
268 | * * | ||
269 | ************************************************************************/ | ||
270 | |||
271 | typedef union ii_wcr_u { | ||
272 | u64 ii_wcr_regval; | ||
273 | struct { | ||
274 | u64 w_wid:4; | ||
275 | u64 w_tag:1; | ||
276 | u64 w_rsvd_1:8; | ||
277 | u64 w_dst_crd:3; | ||
278 | u64 w_f_bad_pkt:1; | ||
279 | u64 w_dir_con:1; | ||
280 | u64 w_e_thresh:5; | ||
281 | u64 w_rsvd:41; | ||
282 | } ii_wcr_fld_s; | ||
283 | } ii_wcr_u_t; | ||
284 | |||
285 | /************************************************************************ | ||
286 | * * | ||
287 | * Description: This register's value is a bit vector that guards * | ||
288 | * access to local registers within the II as well as to external * | ||
289 | * Crosstalk widgets. Each bit in the register corresponds to a * | ||
290 | * particular region in the system; a region consists of one, two or * | ||
291 | * four nodes (depending on the value of the REGION_SIZE field in the * | ||
292 | * LB_REV_ID register, which is documented in Section 8.3.1.1). The * | ||
293 | * protection provided by this register applies to PIO read * | ||
294 | * operations as well as PIO write operations. The II will perform a * | ||
295 | * PIO read or write request only if the bit for the requestor's * | ||
296 | * region is set; otherwise, the II will not perform the requested * | ||
297 | * operation and will return an error response. When a PIO read or * | ||
298 | * write request targets an external Crosstalk widget, then not only * | ||
299 | * must the bit for the requestor's region be set in the ILAPR, but * | ||
300 | * also the target widget's bit in the IOWA register must be set in * | ||
301 | * order for the II to perform the requested operation; otherwise, * | ||
302 | * the II will return an error response. Hence, the protection * | ||
303 | * provided by the IOWA register supplements the protection provided * | ||
304 | * by the ILAPR for requests that target external Crosstalk widgets. * | ||
305 | * This register itself can be accessed only by the nodes whose * | ||
306 | * region ID bits are enabled in this same register. It can also be * | ||
307 | * accessed through the IAlias space by the local processors. * | ||
308 | * The reset value of this register allows access by all nodes. * | ||
309 | * * | ||
310 | ************************************************************************/ | ||
311 | |||
312 | typedef union ii_ilapr_u { | ||
313 | u64 ii_ilapr_regval; | ||
314 | struct { | ||
315 | u64 i_region:64; | ||
316 | } ii_ilapr_fld_s; | ||
317 | } ii_ilapr_u_t; | ||
318 | |||
319 | /************************************************************************ | ||
320 | * * | ||
321 | * Description: A write to this register of the 64-bit value * | ||
322 | * "SGIrules" in ASCII, will cause the bit in the ILAPR register * | ||
323 | * corresponding to the region of the requestor to be set (allow * | ||
324 | * access). A write of any other value will be ignored. Access * | ||
325 | * protection for this register is "SGIrules". * | ||
326 | * This register can also be accessed through the IAlias space. * | ||
327 | * However, this access will not change the access permissions in the * | ||
328 | * ILAPR. * | ||
329 | * * | ||
330 | ************************************************************************/ | ||
331 | |||
332 | typedef union ii_ilapo_u { | ||
333 | u64 ii_ilapo_regval; | ||
334 | struct { | ||
335 | u64 i_io_ovrride:64; | ||
336 | } ii_ilapo_fld_s; | ||
337 | } ii_ilapo_u_t; | ||
338 | |||
339 | /************************************************************************ | ||
340 | * * | ||
341 | * This register qualifies all the PIO and Graphics writes launched * | ||
342 | * from the SHUB towards a widget. * | ||
343 | * * | ||
344 | ************************************************************************/ | ||
345 | |||
346 | typedef union ii_iowa_u { | ||
347 | u64 ii_iowa_regval; | ||
348 | struct { | ||
349 | u64 i_w0_oac:1; | ||
350 | u64 i_rsvd_1:7; | ||
351 | u64 i_wx_oac:8; | ||
352 | u64 i_rsvd:48; | ||
353 | } ii_iowa_fld_s; | ||
354 | } ii_iowa_u_t; | ||
355 | |||
356 | /************************************************************************ | ||
357 | * * | ||
358 | * Description: This register qualifies all the requests launched * | ||
359 | * from a widget towards the Shub. This register is intended to be * | ||
360 | * used by software in case of misbehaving widgets. * | ||
361 | * * | ||
362 | * * | ||
363 | ************************************************************************/ | ||
364 | |||
365 | typedef union ii_iiwa_u { | ||
366 | u64 ii_iiwa_regval; | ||
367 | struct { | ||
368 | u64 i_w0_iac:1; | ||
369 | u64 i_rsvd_1:7; | ||
370 | u64 i_wx_iac:8; | ||
371 | u64 i_rsvd:48; | ||
372 | } ii_iiwa_fld_s; | ||
373 | } ii_iiwa_u_t; | ||
374 | |||
375 | /************************************************************************ | ||
376 | * * | ||
377 | * Description: This register qualifies all the operations launched * | ||
378 | * from a widget towards the SHub. It allows individual access * | ||
379 | * control for up to 8 devices per widget. A device refers to * | ||
380 | * individual DMA master hosted by a widget. * | ||
381 | * The bits in each field of this register are cleared by the Shub * | ||
382 | * upon detection of an error which requires the device to be * | ||
383 | * disabled. These fields assume that 0=TNUM=7 (i.e., Bridge-centric * | ||
384 | * Crosstalk). Whether or not a device has access rights to this * | ||
385 | * Shub is determined by an AND of the device enable bit in the * | ||
386 | * appropriate field of this register and the corresponding bit in * | ||
387 | * the Wx_IAC field (for the widget which this device belongs to). * | ||
388 | * The bits in this field are set by writing a 1 to them. Incoming * | ||
389 | * replies from Crosstalk are not subject to this access control * | ||
390 | * mechanism. * | ||
391 | * * | ||
392 | ************************************************************************/ | ||
393 | |||
394 | typedef union ii_iidem_u { | ||
395 | u64 ii_iidem_regval; | ||
396 | struct { | ||
397 | u64 i_w8_dxs:8; | ||
398 | u64 i_w9_dxs:8; | ||
399 | u64 i_wa_dxs:8; | ||
400 | u64 i_wb_dxs:8; | ||
401 | u64 i_wc_dxs:8; | ||
402 | u64 i_wd_dxs:8; | ||
403 | u64 i_we_dxs:8; | ||
404 | u64 i_wf_dxs:8; | ||
405 | } ii_iidem_fld_s; | ||
406 | } ii_iidem_u_t; | ||
407 | |||
408 | /************************************************************************ | ||
409 | * * | ||
410 | * This register contains the various programmable fields necessary * | ||
411 | * for controlling and observing the LLP signals. * | ||
412 | * * | ||
413 | ************************************************************************/ | ||
414 | |||
415 | typedef union ii_ilcsr_u { | ||
416 | u64 ii_ilcsr_regval; | ||
417 | struct { | ||
418 | u64 i_nullto:6; | ||
419 | u64 i_rsvd_4:2; | ||
420 | u64 i_wrmrst:1; | ||
421 | u64 i_rsvd_3:1; | ||
422 | u64 i_llp_en:1; | ||
423 | u64 i_bm8:1; | ||
424 | u64 i_llp_stat:2; | ||
425 | u64 i_remote_power:1; | ||
426 | u64 i_rsvd_2:1; | ||
427 | u64 i_maxrtry:10; | ||
428 | u64 i_d_avail_sel:2; | ||
429 | u64 i_rsvd_1:4; | ||
430 | u64 i_maxbrst:10; | ||
431 | u64 i_rsvd:22; | ||
432 | |||
433 | } ii_ilcsr_fld_s; | ||
434 | } ii_ilcsr_u_t; | ||
435 | |||
436 | /************************************************************************ | ||
437 | * * | ||
438 | * This is simply a status registers that monitors the LLP error * | ||
439 | * rate. * | ||
440 | * * | ||
441 | ************************************************************************/ | ||
442 | |||
443 | typedef union ii_illr_u { | ||
444 | u64 ii_illr_regval; | ||
445 | struct { | ||
446 | u64 i_sn_cnt:16; | ||
447 | u64 i_cb_cnt:16; | ||
448 | u64 i_rsvd:32; | ||
449 | } ii_illr_fld_s; | ||
450 | } ii_illr_u_t; | ||
451 | |||
452 | /************************************************************************ | ||
453 | * * | ||
454 | * Description: All II-detected non-BTE error interrupts are * | ||
455 | * specified via this register. * | ||
456 | * NOTE: The PI interrupt register address is hardcoded in the II. If * | ||
457 | * PI_ID==0, then the II sends an interrupt request (Duplonet PWRI * | ||
458 | * packet) to address offset 0x0180_0090 within the local register * | ||
459 | * address space of PI0 on the node specified by the NODE field. If * | ||
460 | * PI_ID==1, then the II sends the interrupt request to address * | ||
461 | * offset 0x01A0_0090 within the local register address space of PI1 * | ||
462 | * on the node specified by the NODE field. * | ||
463 | * * | ||
464 | ************************************************************************/ | ||
465 | |||
466 | typedef union ii_iidsr_u { | ||
467 | u64 ii_iidsr_regval; | ||
468 | struct { | ||
469 | u64 i_level:8; | ||
470 | u64 i_pi_id:1; | ||
471 | u64 i_node:11; | ||
472 | u64 i_rsvd_3:4; | ||
473 | u64 i_enable:1; | ||
474 | u64 i_rsvd_2:3; | ||
475 | u64 i_int_sent:2; | ||
476 | u64 i_rsvd_1:2; | ||
477 | u64 i_pi0_forward_int:1; | ||
478 | u64 i_pi1_forward_int:1; | ||
479 | u64 i_rsvd:30; | ||
480 | } ii_iidsr_fld_s; | ||
481 | } ii_iidsr_u_t; | ||
482 | |||
483 | /************************************************************************ | ||
484 | * * | ||
485 | * There are two instances of this register. This register is used * | ||
486 | * for matching up the incoming responses from the graphics widget to * | ||
487 | * the processor that initiated the graphics operation. The * | ||
488 | * write-responses are converted to graphics credits and returned to * | ||
489 | * the processor so that the processor interface can manage the flow * | ||
490 | * control. * | ||
491 | * * | ||
492 | ************************************************************************/ | ||
493 | |||
494 | typedef union ii_igfx0_u { | ||
495 | u64 ii_igfx0_regval; | ||
496 | struct { | ||
497 | u64 i_w_num:4; | ||
498 | u64 i_pi_id:1; | ||
499 | u64 i_n_num:12; | ||
500 | u64 i_p_num:1; | ||
501 | u64 i_rsvd:46; | ||
502 | } ii_igfx0_fld_s; | ||
503 | } ii_igfx0_u_t; | ||
504 | |||
505 | /************************************************************************ | ||
506 | * * | ||
507 | * There are two instances of this register. This register is used * | ||
508 | * for matching up the incoming responses from the graphics widget to * | ||
509 | * the processor that initiated the graphics operation. The * | ||
510 | * write-responses are converted to graphics credits and returned to * | ||
511 | * the processor so that the processor interface can manage the flow * | ||
512 | * control. * | ||
513 | * * | ||
514 | ************************************************************************/ | ||
515 | |||
516 | typedef union ii_igfx1_u { | ||
517 | u64 ii_igfx1_regval; | ||
518 | struct { | ||
519 | u64 i_w_num:4; | ||
520 | u64 i_pi_id:1; | ||
521 | u64 i_n_num:12; | ||
522 | u64 i_p_num:1; | ||
523 | u64 i_rsvd:46; | ||
524 | } ii_igfx1_fld_s; | ||
525 | } ii_igfx1_u_t; | ||
526 | |||
527 | /************************************************************************ | ||
528 | * * | ||
529 | * There are two instances of this registers. These registers are * | ||
530 | * used as scratch registers for software use. * | ||
531 | * * | ||
532 | ************************************************************************/ | ||
533 | |||
534 | typedef union ii_iscr0_u { | ||
535 | u64 ii_iscr0_regval; | ||
536 | struct { | ||
537 | u64 i_scratch:64; | ||
538 | } ii_iscr0_fld_s; | ||
539 | } ii_iscr0_u_t; | ||
540 | |||
541 | /************************************************************************ | ||
542 | * * | ||
543 | * There are two instances of this registers. These registers are * | ||
544 | * used as scratch registers for software use. * | ||
545 | * * | ||
546 | ************************************************************************/ | ||
547 | |||
548 | typedef union ii_iscr1_u { | ||
549 | u64 ii_iscr1_regval; | ||
550 | struct { | ||
551 | u64 i_scratch:64; | ||
552 | } ii_iscr1_fld_s; | ||
553 | } ii_iscr1_u_t; | ||
554 | |||
555 | /************************************************************************ | ||
556 | * * | ||
557 | * Description: There are seven instances of translation table entry * | ||
558 | * registers. Each register maps a Shub Big Window to a 48-bit * | ||
559 | * address on Crosstalk. * | ||
560 | * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window * | ||
561 | * number) are used to select one of these 7 registers. The Widget * | ||
562 | * number field is then derived from the W_NUM field for synthesizing * | ||
563 | * a Crosstalk packet. The 5 bits of OFFSET are concatenated with * | ||
564 | * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] * | ||
565 | * are padded with zeros. Although the maximum Crosstalk space * | ||
566 | * addressable by the SHub is thus the lower 16 GBytes per widget * | ||
567 | * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this * | ||
568 | * space can be accessed. * | ||
569 | * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big * | ||
570 | * Window number) are used to select one of these 7 registers. The * | ||
571 | * Widget number field is then derived from the W_NUM field for * | ||
572 | * synthesizing a Crosstalk packet. The 5 bits of OFFSET are * | ||
573 | * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP * | ||
574 | * field is used as Crosstalk[47], and remainder of the Crosstalk * | ||
575 | * address bits (Crosstalk[46:34]) are always zero. While the maximum * | ||
576 | * Crosstalk space addressable by the Shub is thus the lower * | ||
577 | * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * | ||
578 | * of this space can be accessed. * | ||
579 | * * | ||
580 | ************************************************************************/ | ||
581 | |||
582 | typedef union ii_itte1_u { | ||
583 | u64 ii_itte1_regval; | ||
584 | struct { | ||
585 | u64 i_offset:5; | ||
586 | u64 i_rsvd_1:3; | ||
587 | u64 i_w_num:4; | ||
588 | u64 i_iosp:1; | ||
589 | u64 i_rsvd:51; | ||
590 | } ii_itte1_fld_s; | ||
591 | } ii_itte1_u_t; | ||
592 | |||
593 | /************************************************************************ | ||
594 | * * | ||
595 | * Description: There are seven instances of translation table entry * | ||
596 | * registers. Each register maps a Shub Big Window to a 48-bit * | ||
597 | * address on Crosstalk. * | ||
598 | * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window * | ||
599 | * number) are used to select one of these 7 registers. The Widget * | ||
600 | * number field is then derived from the W_NUM field for synthesizing * | ||
601 | * a Crosstalk packet. The 5 bits of OFFSET are concatenated with * | ||
602 | * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] * | ||
603 | * are padded with zeros. Although the maximum Crosstalk space * | ||
604 | * addressable by the Shub is thus the lower 16 GBytes per widget * | ||
605 | * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this * | ||
606 | * space can be accessed. * | ||
607 | * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big * | ||
608 | * Window number) are used to select one of these 7 registers. The * | ||
609 | * Widget number field is then derived from the W_NUM field for * | ||
610 | * synthesizing a Crosstalk packet. The 5 bits of OFFSET are * | ||
611 | * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP * | ||
612 | * field is used as Crosstalk[47], and remainder of the Crosstalk * | ||
613 | * address bits (Crosstalk[46:34]) are always zero. While the maximum * | ||
614 | * Crosstalk space addressable by the Shub is thus the lower * | ||
615 | * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * | ||
616 | * of this space can be accessed. * | ||
617 | * * | ||
618 | ************************************************************************/ | ||
619 | |||
620 | typedef union ii_itte2_u { | ||
621 | u64 ii_itte2_regval; | ||
622 | struct { | ||
623 | u64 i_offset:5; | ||
624 | u64 i_rsvd_1:3; | ||
625 | u64 i_w_num:4; | ||
626 | u64 i_iosp:1; | ||
627 | u64 i_rsvd:51; | ||
628 | } ii_itte2_fld_s; | ||
629 | } ii_itte2_u_t; | ||
630 | |||
631 | /************************************************************************ | ||
632 | * * | ||
633 | * Description: There are seven instances of translation table entry * | ||
634 | * registers. Each register maps a Shub Big Window to a 48-bit * | ||
635 | * address on Crosstalk. * | ||
636 | * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window * | ||
637 | * number) are used to select one of these 7 registers. The Widget * | ||
638 | * number field is then derived from the W_NUM field for synthesizing * | ||
639 | * a Crosstalk packet. The 5 bits of OFFSET are concatenated with * | ||
640 | * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] * | ||
641 | * are padded with zeros. Although the maximum Crosstalk space * | ||
642 | * addressable by the Shub is thus the lower 16 GBytes per widget * | ||
643 | * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this * | ||
644 | * space can be accessed. * | ||
645 | * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big * | ||
646 | * Window number) are used to select one of these 7 registers. The * | ||
647 | * Widget number field is then derived from the W_NUM field for * | ||
648 | * synthesizing a Crosstalk packet. The 5 bits of OFFSET are * | ||
649 | * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP * | ||
650 | * field is used as Crosstalk[47], and remainder of the Crosstalk * | ||
651 | * address bits (Crosstalk[46:34]) are always zero. While the maximum * | ||
652 | * Crosstalk space addressable by the SHub is thus the lower * | ||
653 | * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * | ||
654 | * of this space can be accessed. * | ||
655 | * * | ||
656 | ************************************************************************/ | ||
657 | |||
658 | typedef union ii_itte3_u { | ||
659 | u64 ii_itte3_regval; | ||
660 | struct { | ||
661 | u64 i_offset:5; | ||
662 | u64 i_rsvd_1:3; | ||
663 | u64 i_w_num:4; | ||
664 | u64 i_iosp:1; | ||
665 | u64 i_rsvd:51; | ||
666 | } ii_itte3_fld_s; | ||
667 | } ii_itte3_u_t; | ||
668 | |||
669 | /************************************************************************ | ||
670 | * * | ||
671 | * Description: There are seven instances of translation table entry * | ||
672 | * registers. Each register maps a SHub Big Window to a 48-bit * | ||
673 | * address on Crosstalk. * | ||
674 | * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window * | ||
675 | * number) are used to select one of these 7 registers. The Widget * | ||
676 | * number field is then derived from the W_NUM field for synthesizing * | ||
677 | * a Crosstalk packet. The 5 bits of OFFSET are concatenated with * | ||
678 | * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] * | ||
679 | * are padded with zeros. Although the maximum Crosstalk space * | ||
680 | * addressable by the SHub is thus the lower 16 GBytes per widget * | ||
681 | * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this * | ||
682 | * space can be accessed. * | ||
683 | * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big * | ||
684 | * Window number) are used to select one of these 7 registers. The * | ||
685 | * Widget number field is then derived from the W_NUM field for * | ||
686 | * synthesizing a Crosstalk packet. The 5 bits of OFFSET are * | ||
687 | * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP * | ||
688 | * field is used as Crosstalk[47], and remainder of the Crosstalk * | ||
689 | * address bits (Crosstalk[46:34]) are always zero. While the maximum * | ||
690 | * Crosstalk space addressable by the SHub is thus the lower * | ||
691 | * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * | ||
692 | * of this space can be accessed. * | ||
693 | * * | ||
694 | ************************************************************************/ | ||
695 | |||
696 | typedef union ii_itte4_u { | ||
697 | u64 ii_itte4_regval; | ||
698 | struct { | ||
699 | u64 i_offset:5; | ||
700 | u64 i_rsvd_1:3; | ||
701 | u64 i_w_num:4; | ||
702 | u64 i_iosp:1; | ||
703 | u64 i_rsvd:51; | ||
704 | } ii_itte4_fld_s; | ||
705 | } ii_itte4_u_t; | ||
706 | |||
707 | /************************************************************************ | ||
708 | * * | ||
709 | * Description: There are seven instances of translation table entry * | ||
710 | * registers. Each register maps a SHub Big Window to a 48-bit * | ||
711 | * address on Crosstalk. * | ||
712 | * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window * | ||
713 | * number) are used to select one of these 7 registers. The Widget * | ||
714 | * number field is then derived from the W_NUM field for synthesizing * | ||
715 | * a Crosstalk packet. The 5 bits of OFFSET are concatenated with * | ||
716 | * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] * | ||
717 | * are padded with zeros. Although the maximum Crosstalk space * | ||
718 | * addressable by the Shub is thus the lower 16 GBytes per widget * | ||
719 | * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this * | ||
720 | * space can be accessed. * | ||
721 | * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big * | ||
722 | * Window number) are used to select one of these 7 registers. The * | ||
723 | * Widget number field is then derived from the W_NUM field for * | ||
724 | * synthesizing a Crosstalk packet. The 5 bits of OFFSET are * | ||
725 | * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP * | ||
726 | * field is used as Crosstalk[47], and remainder of the Crosstalk * | ||
727 | * address bits (Crosstalk[46:34]) are always zero. While the maximum * | ||
728 | * Crosstalk space addressable by the Shub is thus the lower * | ||
729 | * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * | ||
730 | * of this space can be accessed. * | ||
731 | * * | ||
732 | ************************************************************************/ | ||
733 | |||
734 | typedef union ii_itte5_u { | ||
735 | u64 ii_itte5_regval; | ||
736 | struct { | ||
737 | u64 i_offset:5; | ||
738 | u64 i_rsvd_1:3; | ||
739 | u64 i_w_num:4; | ||
740 | u64 i_iosp:1; | ||
741 | u64 i_rsvd:51; | ||
742 | } ii_itte5_fld_s; | ||
743 | } ii_itte5_u_t; | ||
744 | |||
745 | /************************************************************************ | ||
746 | * * | ||
747 | * Description: There are seven instances of translation table entry * | ||
748 | * registers. Each register maps a Shub Big Window to a 48-bit * | ||
749 | * address on Crosstalk. * | ||
750 | * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window * | ||
751 | * number) are used to select one of these 7 registers. The Widget * | ||
752 | * number field is then derived from the W_NUM field for synthesizing * | ||
753 | * a Crosstalk packet. The 5 bits of OFFSET are concatenated with * | ||
754 | * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] * | ||
755 | * are padded with zeros. Although the maximum Crosstalk space * | ||
756 | * addressable by the Shub is thus the lower 16 GBytes per widget * | ||
757 | * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this * | ||
758 | * space can be accessed. * | ||
759 | * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big * | ||
760 | * Window number) are used to select one of these 7 registers. The * | ||
761 | * Widget number field is then derived from the W_NUM field for * | ||
762 | * synthesizing a Crosstalk packet. The 5 bits of OFFSET are * | ||
763 | * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP * | ||
764 | * field is used as Crosstalk[47], and remainder of the Crosstalk * | ||
765 | * address bits (Crosstalk[46:34]) are always zero. While the maximum * | ||
766 | * Crosstalk space addressable by the Shub is thus the lower * | ||
767 | * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * | ||
768 | * of this space can be accessed. * | ||
769 | * * | ||
770 | ************************************************************************/ | ||
771 | |||
772 | typedef union ii_itte6_u { | ||
773 | u64 ii_itte6_regval; | ||
774 | struct { | ||
775 | u64 i_offset:5; | ||
776 | u64 i_rsvd_1:3; | ||
777 | u64 i_w_num:4; | ||
778 | u64 i_iosp:1; | ||
779 | u64 i_rsvd:51; | ||
780 | } ii_itte6_fld_s; | ||
781 | } ii_itte6_u_t; | ||
782 | |||
783 | /************************************************************************ | ||
784 | * * | ||
785 | * Description: There are seven instances of translation table entry * | ||
786 | * registers. Each register maps a Shub Big Window to a 48-bit * | ||
787 | * address on Crosstalk. * | ||
788 | * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window * | ||
789 | * number) are used to select one of these 7 registers. The Widget * | ||
790 | * number field is then derived from the W_NUM field for synthesizing * | ||
791 | * a Crosstalk packet. The 5 bits of OFFSET are concatenated with * | ||
792 | * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] * | ||
793 | * are padded with zeros. Although the maximum Crosstalk space * | ||
794 | * addressable by the Shub is thus the lower 16 GBytes per widget * | ||
795 | * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this * | ||
796 | * space can be accessed. * | ||
797 | * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big * | ||
798 | * Window number) are used to select one of these 7 registers. The * | ||
799 | * Widget number field is then derived from the W_NUM field for * | ||
800 | * synthesizing a Crosstalk packet. The 5 bits of OFFSET are * | ||
801 | * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP * | ||
802 | * field is used as Crosstalk[47], and remainder of the Crosstalk * | ||
803 | * address bits (Crosstalk[46:34]) are always zero. While the maximum * | ||
804 | * Crosstalk space addressable by the SHub is thus the lower * | ||
805 | * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * | ||
806 | * of this space can be accessed. * | ||
807 | * * | ||
808 | ************************************************************************/ | ||
809 | |||
810 | typedef union ii_itte7_u { | ||
811 | u64 ii_itte7_regval; | ||
812 | struct { | ||
813 | u64 i_offset:5; | ||
814 | u64 i_rsvd_1:3; | ||
815 | u64 i_w_num:4; | ||
816 | u64 i_iosp:1; | ||
817 | u64 i_rsvd:51; | ||
818 | } ii_itte7_fld_s; | ||
819 | } ii_itte7_u_t; | ||
820 | |||
821 | /************************************************************************ | ||
822 | * * | ||
823 | * Description: There are 9 instances of this register, one per * | ||
824 | * actual widget in this implementation of SHub and Crossbow. * | ||
825 | * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * | ||
826 | * refers to Crossbow's internal space. * | ||
827 | * This register contains the state elements per widget that are * | ||
828 | * necessary to manage the PIO flow control on Crosstalk and on the * | ||
829 | * Router Network. See the PIO Flow Control chapter for a complete * | ||
830 | * description of this register * | ||
831 | * The SPUR_WR bit requires some explanation. When this register is * | ||
832 | * written, the new value of the C field is captured in an internal * | ||
833 | * register so the hardware can remember what the programmer wrote * | ||
834 | * into the credit counter. The SPUR_WR bit sets whenever the C field * | ||
835 | * increments above this stored value, which indicates that there * | ||
836 | * have been more responses received than requests sent. The SPUR_WR * | ||
837 | * bit cannot be cleared until a value is written to the IPRBx * | ||
838 | * register; the write will correct the C field and capture its new * | ||
839 | * value in the internal register. Even if IECLR[E_PRB_x] is set, the * | ||
840 | * SPUR_WR bit will persist if IPRBx hasn't yet been written. * | ||
841 | * . * | ||
842 | * * | ||
843 | ************************************************************************/ | ||
844 | |||
845 | typedef union ii_iprb0_u { | ||
846 | u64 ii_iprb0_regval; | ||
847 | struct { | ||
848 | u64 i_c:8; | ||
849 | u64 i_na:14; | ||
850 | u64 i_rsvd_2:2; | ||
851 | u64 i_nb:14; | ||
852 | u64 i_rsvd_1:2; | ||
853 | u64 i_m:2; | ||
854 | u64 i_f:1; | ||
855 | u64 i_of_cnt:5; | ||
856 | u64 i_error:1; | ||
857 | u64 i_rd_to:1; | ||
858 | u64 i_spur_wr:1; | ||
859 | u64 i_spur_rd:1; | ||
860 | u64 i_rsvd:11; | ||
861 | u64 i_mult_err:1; | ||
862 | } ii_iprb0_fld_s; | ||
863 | } ii_iprb0_u_t; | ||
864 | |||
865 | /************************************************************************ | ||
866 | * * | ||
867 | * Description: There are 9 instances of this register, one per * | ||
868 | * actual widget in this implementation of SHub and Crossbow. * | ||
869 | * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * | ||
870 | * refers to Crossbow's internal space. * | ||
871 | * This register contains the state elements per widget that are * | ||
872 | * necessary to manage the PIO flow control on Crosstalk and on the * | ||
873 | * Router Network. See the PIO Flow Control chapter for a complete * | ||
874 | * description of this register * | ||
875 | * The SPUR_WR bit requires some explanation. When this register is * | ||
876 | * written, the new value of the C field is captured in an internal * | ||
877 | * register so the hardware can remember what the programmer wrote * | ||
878 | * into the credit counter. The SPUR_WR bit sets whenever the C field * | ||
879 | * increments above this stored value, which indicates that there * | ||
880 | * have been more responses received than requests sent. The SPUR_WR * | ||
881 | * bit cannot be cleared until a value is written to the IPRBx * | ||
882 | * register; the write will correct the C field and capture its new * | ||
883 | * value in the internal register. Even if IECLR[E_PRB_x] is set, the * | ||
884 | * SPUR_WR bit will persist if IPRBx hasn't yet been written. * | ||
885 | * . * | ||
886 | * * | ||
887 | ************************************************************************/ | ||
888 | |||
889 | typedef union ii_iprb8_u { | ||
890 | u64 ii_iprb8_regval; | ||
891 | struct { | ||
892 | u64 i_c:8; | ||
893 | u64 i_na:14; | ||
894 | u64 i_rsvd_2:2; | ||
895 | u64 i_nb:14; | ||
896 | u64 i_rsvd_1:2; | ||
897 | u64 i_m:2; | ||
898 | u64 i_f:1; | ||
899 | u64 i_of_cnt:5; | ||
900 | u64 i_error:1; | ||
901 | u64 i_rd_to:1; | ||
902 | u64 i_spur_wr:1; | ||
903 | u64 i_spur_rd:1; | ||
904 | u64 i_rsvd:11; | ||
905 | u64 i_mult_err:1; | ||
906 | } ii_iprb8_fld_s; | ||
907 | } ii_iprb8_u_t; | ||
908 | |||
909 | /************************************************************************ | ||
910 | * * | ||
911 | * Description: There are 9 instances of this register, one per * | ||
912 | * actual widget in this implementation of SHub and Crossbow. * | ||
913 | * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * | ||
914 | * refers to Crossbow's internal space. * | ||
915 | * This register contains the state elements per widget that are * | ||
916 | * necessary to manage the PIO flow control on Crosstalk and on the * | ||
917 | * Router Network. See the PIO Flow Control chapter for a complete * | ||
918 | * description of this register * | ||
919 | * The SPUR_WR bit requires some explanation. When this register is * | ||
920 | * written, the new value of the C field is captured in an internal * | ||
921 | * register so the hardware can remember what the programmer wrote * | ||
922 | * into the credit counter. The SPUR_WR bit sets whenever the C field * | ||
923 | * increments above this stored value, which indicates that there * | ||
924 | * have been more responses received than requests sent. The SPUR_WR * | ||
925 | * bit cannot be cleared until a value is written to the IPRBx * | ||
926 | * register; the write will correct the C field and capture its new * | ||
927 | * value in the internal register. Even if IECLR[E_PRB_x] is set, the * | ||
928 | * SPUR_WR bit will persist if IPRBx hasn't yet been written. * | ||
929 | * . * | ||
930 | * * | ||
931 | ************************************************************************/ | ||
932 | |||
933 | typedef union ii_iprb9_u { | ||
934 | u64 ii_iprb9_regval; | ||
935 | struct { | ||
936 | u64 i_c:8; | ||
937 | u64 i_na:14; | ||
938 | u64 i_rsvd_2:2; | ||
939 | u64 i_nb:14; | ||
940 | u64 i_rsvd_1:2; | ||
941 | u64 i_m:2; | ||
942 | u64 i_f:1; | ||
943 | u64 i_of_cnt:5; | ||
944 | u64 i_error:1; | ||
945 | u64 i_rd_to:1; | ||
946 | u64 i_spur_wr:1; | ||
947 | u64 i_spur_rd:1; | ||
948 | u64 i_rsvd:11; | ||
949 | u64 i_mult_err:1; | ||
950 | } ii_iprb9_fld_s; | ||
951 | } ii_iprb9_u_t; | ||
952 | |||
953 | /************************************************************************ | ||
954 | * * | ||
955 | * Description: There are 9 instances of this register, one per * | ||
956 | * actual widget in this implementation of SHub and Crossbow. * | ||
957 | * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * | ||
958 | * refers to Crossbow's internal space. * | ||
959 | * This register contains the state elements per widget that are * | ||
960 | * necessary to manage the PIO flow control on Crosstalk and on the * | ||
961 | * Router Network. See the PIO Flow Control chapter for a complete * | ||
962 | * description of this register * | ||
963 | * The SPUR_WR bit requires some explanation. When this register is * | ||
964 | * written, the new value of the C field is captured in an internal * | ||
965 | * register so the hardware can remember what the programmer wrote * | ||
966 | * into the credit counter. The SPUR_WR bit sets whenever the C field * | ||
967 | * increments above this stored value, which indicates that there * | ||
968 | * have been more responses received than requests sent. The SPUR_WR * | ||
969 | * bit cannot be cleared until a value is written to the IPRBx * | ||
970 | * register; the write will correct the C field and capture its new * | ||
971 | * value in the internal register. Even if IECLR[E_PRB_x] is set, the * | ||
972 | * SPUR_WR bit will persist if IPRBx hasn't yet been written. * | ||
973 | * * | ||
974 | * * | ||
975 | ************************************************************************/ | ||
976 | |||
977 | typedef union ii_iprba_u { | ||
978 | u64 ii_iprba_regval; | ||
979 | struct { | ||
980 | u64 i_c:8; | ||
981 | u64 i_na:14; | ||
982 | u64 i_rsvd_2:2; | ||
983 | u64 i_nb:14; | ||
984 | u64 i_rsvd_1:2; | ||
985 | u64 i_m:2; | ||
986 | u64 i_f:1; | ||
987 | u64 i_of_cnt:5; | ||
988 | u64 i_error:1; | ||
989 | u64 i_rd_to:1; | ||
990 | u64 i_spur_wr:1; | ||
991 | u64 i_spur_rd:1; | ||
992 | u64 i_rsvd:11; | ||
993 | u64 i_mult_err:1; | ||
994 | } ii_iprba_fld_s; | ||
995 | } ii_iprba_u_t; | ||
996 | |||
997 | /************************************************************************ | ||
998 | * * | ||
999 | * Description: There are 9 instances of this register, one per * | ||
1000 | * actual widget in this implementation of SHub and Crossbow. * | ||
1001 | * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * | ||
1002 | * refers to Crossbow's internal space. * | ||
1003 | * This register contains the state elements per widget that are * | ||
1004 | * necessary to manage the PIO flow control on Crosstalk and on the * | ||
1005 | * Router Network. See the PIO Flow Control chapter for a complete * | ||
1006 | * description of this register * | ||
1007 | * The SPUR_WR bit requires some explanation. When this register is * | ||
1008 | * written, the new value of the C field is captured in an internal * | ||
1009 | * register so the hardware can remember what the programmer wrote * | ||
1010 | * into the credit counter. The SPUR_WR bit sets whenever the C field * | ||
1011 | * increments above this stored value, which indicates that there * | ||
1012 | * have been more responses received than requests sent. The SPUR_WR * | ||
1013 | * bit cannot be cleared until a value is written to the IPRBx * | ||
1014 | * register; the write will correct the C field and capture its new * | ||
1015 | * value in the internal register. Even if IECLR[E_PRB_x] is set, the * | ||
1016 | * SPUR_WR bit will persist if IPRBx hasn't yet been written. * | ||
1017 | * . * | ||
1018 | * * | ||
1019 | ************************************************************************/ | ||
1020 | |||
1021 | typedef union ii_iprbb_u { | ||
1022 | u64 ii_iprbb_regval; | ||
1023 | struct { | ||
1024 | u64 i_c:8; | ||
1025 | u64 i_na:14; | ||
1026 | u64 i_rsvd_2:2; | ||
1027 | u64 i_nb:14; | ||
1028 | u64 i_rsvd_1:2; | ||
1029 | u64 i_m:2; | ||
1030 | u64 i_f:1; | ||
1031 | u64 i_of_cnt:5; | ||
1032 | u64 i_error:1; | ||
1033 | u64 i_rd_to:1; | ||
1034 | u64 i_spur_wr:1; | ||
1035 | u64 i_spur_rd:1; | ||
1036 | u64 i_rsvd:11; | ||
1037 | u64 i_mult_err:1; | ||
1038 | } ii_iprbb_fld_s; | ||
1039 | } ii_iprbb_u_t; | ||
1040 | |||
1041 | /************************************************************************ | ||
1042 | * * | ||
1043 | * Description: There are 9 instances of this register, one per * | ||
1044 | * actual widget in this implementation of SHub and Crossbow. * | ||
1045 | * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * | ||
1046 | * refers to Crossbow's internal space. * | ||
1047 | * This register contains the state elements per widget that are * | ||
1048 | * necessary to manage the PIO flow control on Crosstalk and on the * | ||
1049 | * Router Network. See the PIO Flow Control chapter for a complete * | ||
1050 | * description of this register * | ||
1051 | * The SPUR_WR bit requires some explanation. When this register is * | ||
1052 | * written, the new value of the C field is captured in an internal * | ||
1053 | * register so the hardware can remember what the programmer wrote * | ||
1054 | * into the credit counter. The SPUR_WR bit sets whenever the C field * | ||
1055 | * increments above this stored value, which indicates that there * | ||
1056 | * have been more responses received than requests sent. The SPUR_WR * | ||
1057 | * bit cannot be cleared until a value is written to the IPRBx * | ||
1058 | * register; the write will correct the C field and capture its new * | ||
1059 | * value in the internal register. Even if IECLR[E_PRB_x] is set, the * | ||
1060 | * SPUR_WR bit will persist if IPRBx hasn't yet been written. * | ||
1061 | * . * | ||
1062 | * * | ||
1063 | ************************************************************************/ | ||
1064 | |||
1065 | typedef union ii_iprbc_u { | ||
1066 | u64 ii_iprbc_regval; | ||
1067 | struct { | ||
1068 | u64 i_c:8; | ||
1069 | u64 i_na:14; | ||
1070 | u64 i_rsvd_2:2; | ||
1071 | u64 i_nb:14; | ||
1072 | u64 i_rsvd_1:2; | ||
1073 | u64 i_m:2; | ||
1074 | u64 i_f:1; | ||
1075 | u64 i_of_cnt:5; | ||
1076 | u64 i_error:1; | ||
1077 | u64 i_rd_to:1; | ||
1078 | u64 i_spur_wr:1; | ||
1079 | u64 i_spur_rd:1; | ||
1080 | u64 i_rsvd:11; | ||
1081 | u64 i_mult_err:1; | ||
1082 | } ii_iprbc_fld_s; | ||
1083 | } ii_iprbc_u_t; | ||
1084 | |||
1085 | /************************************************************************ | ||
1086 | * * | ||
1087 | * Description: There are 9 instances of this register, one per * | ||
1088 | * actual widget in this implementation of SHub and Crossbow. * | ||
1089 | * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * | ||
1090 | * refers to Crossbow's internal space. * | ||
1091 | * This register contains the state elements per widget that are * | ||
1092 | * necessary to manage the PIO flow control on Crosstalk and on the * | ||
1093 | * Router Network. See the PIO Flow Control chapter for a complete * | ||
1094 | * description of this register * | ||
1095 | * The SPUR_WR bit requires some explanation. When this register is * | ||
1096 | * written, the new value of the C field is captured in an internal * | ||
1097 | * register so the hardware can remember what the programmer wrote * | ||
1098 | * into the credit counter. The SPUR_WR bit sets whenever the C field * | ||
1099 | * increments above this stored value, which indicates that there * | ||
1100 | * have been more responses received than requests sent. The SPUR_WR * | ||
1101 | * bit cannot be cleared until a value is written to the IPRBx * | ||
1102 | * register; the write will correct the C field and capture its new * | ||
1103 | * value in the internal register. Even if IECLR[E_PRB_x] is set, the * | ||
1104 | * SPUR_WR bit will persist if IPRBx hasn't yet been written. * | ||
1105 | * . * | ||
1106 | * * | ||
1107 | ************************************************************************/ | ||
1108 | |||
1109 | typedef union ii_iprbd_u { | ||
1110 | u64 ii_iprbd_regval; | ||
1111 | struct { | ||
1112 | u64 i_c:8; | ||
1113 | u64 i_na:14; | ||
1114 | u64 i_rsvd_2:2; | ||
1115 | u64 i_nb:14; | ||
1116 | u64 i_rsvd_1:2; | ||
1117 | u64 i_m:2; | ||
1118 | u64 i_f:1; | ||
1119 | u64 i_of_cnt:5; | ||
1120 | u64 i_error:1; | ||
1121 | u64 i_rd_to:1; | ||
1122 | u64 i_spur_wr:1; | ||
1123 | u64 i_spur_rd:1; | ||
1124 | u64 i_rsvd:11; | ||
1125 | u64 i_mult_err:1; | ||
1126 | } ii_iprbd_fld_s; | ||
1127 | } ii_iprbd_u_t; | ||
1128 | |||
1129 | /************************************************************************ | ||
1130 | * * | ||
1131 | * Description: There are 9 instances of this register, one per * | ||
1132 | * actual widget in this implementation of SHub and Crossbow. * | ||
1133 | * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * | ||
1134 | * refers to Crossbow's internal space. * | ||
1135 | * This register contains the state elements per widget that are * | ||
1136 | * necessary to manage the PIO flow control on Crosstalk and on the * | ||
1137 | * Router Network. See the PIO Flow Control chapter for a complete * | ||
1138 | * description of this register * | ||
1139 | * The SPUR_WR bit requires some explanation. When this register is * | ||
1140 | * written, the new value of the C field is captured in an internal * | ||
1141 | * register so the hardware can remember what the programmer wrote * | ||
1142 | * into the credit counter. The SPUR_WR bit sets whenever the C field * | ||
1143 | * increments above this stored value, which indicates that there * | ||
1144 | * have been more responses received than requests sent. The SPUR_WR * | ||
1145 | * bit cannot be cleared until a value is written to the IPRBx * | ||
1146 | * register; the write will correct the C field and capture its new * | ||
1147 | * value in the internal register. Even if IECLR[E_PRB_x] is set, the * | ||
1148 | * SPUR_WR bit will persist if IPRBx hasn't yet been written. * | ||
1149 | * . * | ||
1150 | * * | ||
1151 | ************************************************************************/ | ||
1152 | |||
1153 | typedef union ii_iprbe_u { | ||
1154 | u64 ii_iprbe_regval; | ||
1155 | struct { | ||
1156 | u64 i_c:8; | ||
1157 | u64 i_na:14; | ||
1158 | u64 i_rsvd_2:2; | ||
1159 | u64 i_nb:14; | ||
1160 | u64 i_rsvd_1:2; | ||
1161 | u64 i_m:2; | ||
1162 | u64 i_f:1; | ||
1163 | u64 i_of_cnt:5; | ||
1164 | u64 i_error:1; | ||
1165 | u64 i_rd_to:1; | ||
1166 | u64 i_spur_wr:1; | ||
1167 | u64 i_spur_rd:1; | ||
1168 | u64 i_rsvd:11; | ||
1169 | u64 i_mult_err:1; | ||
1170 | } ii_iprbe_fld_s; | ||
1171 | } ii_iprbe_u_t; | ||
1172 | |||
1173 | /************************************************************************ | ||
1174 | * * | ||
1175 | * Description: There are 9 instances of this register, one per * | ||
1176 | * actual widget in this implementation of Shub and Crossbow. * | ||
1177 | * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * | ||
1178 | * refers to Crossbow's internal space. * | ||
1179 | * This register contains the state elements per widget that are * | ||
1180 | * necessary to manage the PIO flow control on Crosstalk and on the * | ||
1181 | * Router Network. See the PIO Flow Control chapter for a complete * | ||
1182 | * description of this register * | ||
1183 | * The SPUR_WR bit requires some explanation. When this register is * | ||
1184 | * written, the new value of the C field is captured in an internal * | ||
1185 | * register so the hardware can remember what the programmer wrote * | ||
1186 | * into the credit counter. The SPUR_WR bit sets whenever the C field * | ||
1187 | * increments above this stored value, which indicates that there * | ||
1188 | * have been more responses received than requests sent. The SPUR_WR * | ||
1189 | * bit cannot be cleared until a value is written to the IPRBx * | ||
1190 | * register; the write will correct the C field and capture its new * | ||
1191 | * value in the internal register. Even if IECLR[E_PRB_x] is set, the * | ||
1192 | * SPUR_WR bit will persist if IPRBx hasn't yet been written. * | ||
1193 | * . * | ||
1194 | * * | ||
1195 | ************************************************************************/ | ||
1196 | |||
1197 | typedef union ii_iprbf_u { | ||
1198 | u64 ii_iprbf_regval; | ||
1199 | struct { | ||
1200 | u64 i_c:8; | ||
1201 | u64 i_na:14; | ||
1202 | u64 i_rsvd_2:2; | ||
1203 | u64 i_nb:14; | ||
1204 | u64 i_rsvd_1:2; | ||
1205 | u64 i_m:2; | ||
1206 | u64 i_f:1; | ||
1207 | u64 i_of_cnt:5; | ||
1208 | u64 i_error:1; | ||
1209 | u64 i_rd_to:1; | ||
1210 | u64 i_spur_wr:1; | ||
1211 | u64 i_spur_rd:1; | ||
1212 | u64 i_rsvd:11; | ||
1213 | u64 i_mult_err:1; | ||
1214 | } ii_iprbe_fld_s; | ||
1215 | } ii_iprbf_u_t; | ||
1216 | |||
1217 | /************************************************************************ | ||
1218 | * * | ||
1219 | * This register specifies the timeout value to use for monitoring * | ||
1220 | * Crosstalk credits which are used outbound to Crosstalk. An * | ||
1221 | * internal counter called the Crosstalk Credit Timeout Counter * | ||
1222 | * increments every 128 II clocks. The counter starts counting * | ||
1223 | * anytime the credit count drops below a threshold, and resets to * | ||
1224 | * zero (stops counting) anytime the credit count is at or above the * | ||
1225 | * threshold. The threshold is 1 credit in direct connect mode and 2 * | ||
1226 | * in Crossbow connect mode. When the internal Crosstalk Credit * | ||
1227 | * Timeout Counter reaches the value programmed in this register, a * | ||
1228 | * Crosstalk Credit Timeout has occurred. The internal counter is not * | ||
1229 | * readable from software, and stops counting at its maximum value, * | ||
1230 | * so it cannot cause more than one interrupt. * | ||
1231 | * * | ||
1232 | ************************************************************************/ | ||
1233 | |||
1234 | typedef union ii_ixcc_u { | ||
1235 | u64 ii_ixcc_regval; | ||
1236 | struct { | ||
1237 | u64 i_time_out:26; | ||
1238 | u64 i_rsvd:38; | ||
1239 | } ii_ixcc_fld_s; | ||
1240 | } ii_ixcc_u_t; | ||
1241 | |||
1242 | /************************************************************************ | ||
1243 | * * | ||
1244 | * Description: This register qualifies all the PIO and DMA * | ||
1245 | * operations launched from widget 0 towards the SHub. In * | ||
1246 | * addition, it also qualifies accesses by the BTE streams. * | ||
1247 | * The bits in each field of this register are cleared by the SHub * | ||
1248 | * upon detection of an error which requires widget 0 or the BTE * | ||
1249 | * streams to be terminated. Whether or not widget x has access * | ||
1250 | * rights to this SHub is determined by an AND of the device * | ||
1251 | * enable bit in the appropriate field of this register and bit 0 in * | ||
1252 | * the Wx_IAC field. The bits in this field are set by writing a 1 to * | ||
1253 | * them. Incoming replies from Crosstalk are not subject to this * | ||
1254 | * access control mechanism. * | ||
1255 | * * | ||
1256 | ************************************************************************/ | ||
1257 | |||
1258 | typedef union ii_imem_u { | ||
1259 | u64 ii_imem_regval; | ||
1260 | struct { | ||
1261 | u64 i_w0_esd:1; | ||
1262 | u64 i_rsvd_3:3; | ||
1263 | u64 i_b0_esd:1; | ||
1264 | u64 i_rsvd_2:3; | ||
1265 | u64 i_b1_esd:1; | ||
1266 | u64 i_rsvd_1:3; | ||
1267 | u64 i_clr_precise:1; | ||
1268 | u64 i_rsvd:51; | ||
1269 | } ii_imem_fld_s; | ||
1270 | } ii_imem_u_t; | ||
1271 | |||
1272 | /************************************************************************ | ||
1273 | * * | ||
1274 | * Description: This register specifies the timeout value to use for * | ||
1275 | * monitoring Crosstalk tail flits coming into the Shub in the * | ||
1276 | * TAIL_TO field. An internal counter associated with this register * | ||
1277 | * is incremented every 128 II internal clocks (7 bits). The counter * | ||
1278 | * starts counting anytime a header micropacket is received and stops * | ||
1279 | * counting (and resets to zero) any time a micropacket with a Tail * | ||
1280 | * bit is received. Once the counter reaches the threshold value * | ||
1281 | * programmed in this register, it generates an interrupt to the * | ||
1282 | * processor that is programmed into the IIDSR. The counter saturates * | ||
1283 | * (does not roll over) at its maximum value, so it cannot cause * | ||
1284 | * another interrupt until after it is cleared. * | ||
1285 | * The register also contains the Read Response Timeout values. The * | ||
1286 | * Prescalar is 23 bits, and counts II clocks. An internal counter * | ||
1287 | * increments on every II clock and when it reaches the value in the * | ||
1288 | * Prescalar field, all IPRTE registers with their valid bits set * | ||
1289 | * have their Read Response timers bumped. Whenever any of them match * | ||
1290 | * the value in the RRSP_TO field, a Read Response Timeout has * | ||
1291 | * occurred, and error handling occurs as described in the Error * | ||
1292 | * Handling section of this document. * | ||
1293 | * * | ||
1294 | ************************************************************************/ | ||
1295 | |||
1296 | typedef union ii_ixtt_u { | ||
1297 | u64 ii_ixtt_regval; | ||
1298 | struct { | ||
1299 | u64 i_tail_to:26; | ||
1300 | u64 i_rsvd_1:6; | ||
1301 | u64 i_rrsp_ps:23; | ||
1302 | u64 i_rrsp_to:5; | ||
1303 | u64 i_rsvd:4; | ||
1304 | } ii_ixtt_fld_s; | ||
1305 | } ii_ixtt_u_t; | ||
1306 | |||
1307 | /************************************************************************ | ||
1308 | * * | ||
1309 | * Writing a 1 to the fields of this register clears the appropriate * | ||
1310 | * error bits in other areas of SHub. Note that when the * | ||
1311 | * E_PRB_x bits are used to clear error bits in PRB registers, * | ||
1312 | * SPUR_RD and SPUR_WR may persist, because they require additional * | ||
1313 | * action to clear them. See the IPRBx and IXSS Register * | ||
1314 | * specifications. * | ||
1315 | * * | ||
1316 | ************************************************************************/ | ||
1317 | |||
1318 | typedef union ii_ieclr_u { | ||
1319 | u64 ii_ieclr_regval; | ||
1320 | struct { | ||
1321 | u64 i_e_prb_0:1; | ||
1322 | u64 i_rsvd:7; | ||
1323 | u64 i_e_prb_8:1; | ||
1324 | u64 i_e_prb_9:1; | ||
1325 | u64 i_e_prb_a:1; | ||
1326 | u64 i_e_prb_b:1; | ||
1327 | u64 i_e_prb_c:1; | ||
1328 | u64 i_e_prb_d:1; | ||
1329 | u64 i_e_prb_e:1; | ||
1330 | u64 i_e_prb_f:1; | ||
1331 | u64 i_e_crazy:1; | ||
1332 | u64 i_e_bte_0:1; | ||
1333 | u64 i_e_bte_1:1; | ||
1334 | u64 i_reserved_1:10; | ||
1335 | u64 i_spur_rd_hdr:1; | ||
1336 | u64 i_cam_intr_to:1; | ||
1337 | u64 i_cam_overflow:1; | ||
1338 | u64 i_cam_read_miss:1; | ||
1339 | u64 i_ioq_rep_underflow:1; | ||
1340 | u64 i_ioq_req_underflow:1; | ||
1341 | u64 i_ioq_rep_overflow:1; | ||
1342 | u64 i_ioq_req_overflow:1; | ||
1343 | u64 i_iiq_rep_overflow:1; | ||
1344 | u64 i_iiq_req_overflow:1; | ||
1345 | u64 i_ii_xn_rep_cred_overflow:1; | ||
1346 | u64 i_ii_xn_req_cred_overflow:1; | ||
1347 | u64 i_ii_xn_invalid_cmd:1; | ||
1348 | u64 i_xn_ii_invalid_cmd:1; | ||
1349 | u64 i_reserved_2:21; | ||
1350 | } ii_ieclr_fld_s; | ||
1351 | } ii_ieclr_u_t; | ||
1352 | |||
1353 | /************************************************************************ | ||
1354 | * * | ||
1355 | * This register controls both BTEs. SOFT_RESET is intended for * | ||
1356 | * recovery after an error. COUNT controls the total number of CRBs * | ||
1357 | * that both BTEs (combined) can use, which affects total BTE * | ||
1358 | * bandwidth. * | ||
1359 | * * | ||
1360 | ************************************************************************/ | ||
1361 | |||
1362 | typedef union ii_ibcr_u { | ||
1363 | u64 ii_ibcr_regval; | ||
1364 | struct { | ||
1365 | u64 i_count:4; | ||
1366 | u64 i_rsvd_1:4; | ||
1367 | u64 i_soft_reset:1; | ||
1368 | u64 i_rsvd:55; | ||
1369 | } ii_ibcr_fld_s; | ||
1370 | } ii_ibcr_u_t; | ||
1371 | |||
1372 | /************************************************************************ | ||
1373 | * * | ||
1374 | * This register contains the header of a spurious read response * | ||
1375 | * received from Crosstalk. A spurious read response is defined as a * | ||
1376 | * read response received by II from a widget for which (1) the SIDN * | ||
1377 | * has a value between 1 and 7, inclusive (II never sends requests to * | ||
1378 | * these widgets (2) there is no valid IPRTE register which * | ||
1379 | * corresponds to the TNUM, or (3) the widget indicated in SIDN is * | ||
1380 | * not the same as the widget recorded in the IPRTE register * | ||
1381 | * referenced by the TNUM. If this condition is true, and if the * | ||
1382 | * IXSS[VALID] bit is clear, then the header of the spurious read * | ||
1383 | * response is capture in IXSM and IXSS, and IXSS[VALID] is set. The * | ||
1384 | * errant header is thereby captured, and no further spurious read * | ||
1385 | * respones are captured until IXSS[VALID] is cleared by setting the * | ||
1386 | * appropriate bit in IECLR. Every time a spurious read response is * | ||
1387 | * detected, the SPUR_RD bit of the PRB corresponding to the incoming * | ||
1388 | * message's SIDN field is set. This always happens, regardless of * | ||
1389 | * whether a header is captured. The programmer should check * | ||
1390 | * IXSM[SIDN] to determine which widget sent the spurious response, * | ||
1391 | * because there may be more than one SPUR_RD bit set in the PRB * | ||
1392 | * registers. The widget indicated by IXSM[SIDN] was the first * | ||
1393 | * spurious read response to be received since the last time * | ||
1394 | * IXSS[VALID] was clear. The SPUR_RD bit of the corresponding PRB * | ||
1395 | * will be set. Any SPUR_RD bits in any other PRB registers indicate * | ||
1396 | * spurious messages from other widets which were detected after the * | ||
1397 | * header was captured.. * | ||
1398 | * * | ||
1399 | ************************************************************************/ | ||
1400 | |||
1401 | typedef union ii_ixsm_u { | ||
1402 | u64 ii_ixsm_regval; | ||
1403 | struct { | ||
1404 | u64 i_byte_en:32; | ||
1405 | u64 i_reserved:1; | ||
1406 | u64 i_tag:3; | ||
1407 | u64 i_alt_pactyp:4; | ||
1408 | u64 i_bo:1; | ||
1409 | u64 i_error:1; | ||
1410 | u64 i_vbpm:1; | ||
1411 | u64 i_gbr:1; | ||
1412 | u64 i_ds:2; | ||
1413 | u64 i_ct:1; | ||
1414 | u64 i_tnum:5; | ||
1415 | u64 i_pactyp:4; | ||
1416 | u64 i_sidn:4; | ||
1417 | u64 i_didn:4; | ||
1418 | } ii_ixsm_fld_s; | ||
1419 | } ii_ixsm_u_t; | ||
1420 | |||
1421 | /************************************************************************ | ||
1422 | * * | ||
1423 | * This register contains the sideband bits of a spurious read * | ||
1424 | * response received from Crosstalk. * | ||
1425 | * * | ||
1426 | ************************************************************************/ | ||
1427 | |||
1428 | typedef union ii_ixss_u { | ||
1429 | u64 ii_ixss_regval; | ||
1430 | struct { | ||
1431 | u64 i_sideband:8; | ||
1432 | u64 i_rsvd:55; | ||
1433 | u64 i_valid:1; | ||
1434 | } ii_ixss_fld_s; | ||
1435 | } ii_ixss_u_t; | ||
1436 | |||
1437 | /************************************************************************ | ||
1438 | * * | ||
1439 | * This register enables software to access the II LLP's test port. * | ||
1440 | * Refer to the LLP 2.5 documentation for an explanation of the test * | ||
1441 | * port. Software can write to this register to program the values * | ||
1442 | * for the control fields (TestErrCapture, TestClear, TestFlit, * | ||
1443 | * TestMask and TestSeed). Similarly, software can read from this * | ||
1444 | * register to obtain the values of the test port's status outputs * | ||
1445 | * (TestCBerr, TestValid and TestData). * | ||
1446 | * * | ||
1447 | ************************************************************************/ | ||
1448 | |||
1449 | typedef union ii_ilct_u { | ||
1450 | u64 ii_ilct_regval; | ||
1451 | struct { | ||
1452 | u64 i_test_seed:20; | ||
1453 | u64 i_test_mask:8; | ||
1454 | u64 i_test_data:20; | ||
1455 | u64 i_test_valid:1; | ||
1456 | u64 i_test_cberr:1; | ||
1457 | u64 i_test_flit:3; | ||
1458 | u64 i_test_clear:1; | ||
1459 | u64 i_test_err_capture:1; | ||
1460 | u64 i_rsvd:9; | ||
1461 | } ii_ilct_fld_s; | ||
1462 | } ii_ilct_u_t; | ||
1463 | |||
1464 | /************************************************************************ | ||
1465 | * * | ||
1466 | * If the II detects an illegal incoming Duplonet packet (request or * | ||
1467 | * reply) when VALID==0 in the IIEPH1 register, then it saves the * | ||
1468 | * contents of the packet's header flit in the IIEPH1 and IIEPH2 * | ||
1469 | * registers, sets the VALID bit in IIEPH1, clears the OVERRUN bit, * | ||
1470 | * and assigns a value to the ERR_TYPE field which indicates the * | ||
1471 | * specific nature of the error. The II recognizes four different * | ||
1472 | * types of errors: short request packets (ERR_TYPE==2), short reply * | ||
1473 | * packets (ERR_TYPE==3), long request packets (ERR_TYPE==4) and long * | ||
1474 | * reply packets (ERR_TYPE==5). The encodings for these types of * | ||
1475 | * errors were chosen to be consistent with the same types of errors * | ||
1476 | * indicated by the ERR_TYPE field in the LB_ERROR_HDR1 register (in * | ||
1477 | * the LB unit). If the II detects an illegal incoming Duplonet * | ||
1478 | * packet when VALID==1 in the IIEPH1 register, then it merely sets * | ||
1479 | * the OVERRUN bit to indicate that a subsequent error has happened, * | ||
1480 | * and does nothing further. * | ||
1481 | * * | ||
1482 | ************************************************************************/ | ||
1483 | |||
1484 | typedef union ii_iieph1_u { | ||
1485 | u64 ii_iieph1_regval; | ||
1486 | struct { | ||
1487 | u64 i_command:7; | ||
1488 | u64 i_rsvd_5:1; | ||
1489 | u64 i_suppl:14; | ||
1490 | u64 i_rsvd_4:1; | ||
1491 | u64 i_source:14; | ||
1492 | u64 i_rsvd_3:1; | ||
1493 | u64 i_err_type:4; | ||
1494 | u64 i_rsvd_2:4; | ||
1495 | u64 i_overrun:1; | ||
1496 | u64 i_rsvd_1:3; | ||
1497 | u64 i_valid:1; | ||
1498 | u64 i_rsvd:13; | ||
1499 | } ii_iieph1_fld_s; | ||
1500 | } ii_iieph1_u_t; | ||
1501 | |||
1502 | /************************************************************************ | ||
1503 | * * | ||
1504 | * This register holds the Address field from the header flit of an * | ||
1505 | * incoming erroneous Duplonet packet, along with the tail bit which * | ||
1506 | * accompanied this header flit. This register is essentially an * | ||
1507 | * extension of IIEPH1. Two registers were necessary because the 64 * | ||
1508 | * bits available in only a single register were insufficient to * | ||
1509 | * capture the entire header flit of an erroneous packet. * | ||
1510 | * * | ||
1511 | ************************************************************************/ | ||
1512 | |||
1513 | typedef union ii_iieph2_u { | ||
1514 | u64 ii_iieph2_regval; | ||
1515 | struct { | ||
1516 | u64 i_rsvd_0:3; | ||
1517 | u64 i_address:47; | ||
1518 | u64 i_rsvd_1:10; | ||
1519 | u64 i_tail:1; | ||
1520 | u64 i_rsvd:3; | ||
1521 | } ii_iieph2_fld_s; | ||
1522 | } ii_iieph2_u_t; | ||
1523 | |||
1524 | /******************************/ | ||
1525 | |||
1526 | /************************************************************************ | ||
1527 | * * | ||
1528 | * This register's value is a bit vector that guards access from SXBs * | ||
1529 | * to local registers within the II as well as to external Crosstalk * | ||
1530 | * widgets * | ||
1531 | * * | ||
1532 | ************************************************************************/ | ||
1533 | |||
1534 | typedef union ii_islapr_u { | ||
1535 | u64 ii_islapr_regval; | ||
1536 | struct { | ||
1537 | u64 i_region:64; | ||
1538 | } ii_islapr_fld_s; | ||
1539 | } ii_islapr_u_t; | ||
1540 | |||
1541 | /************************************************************************ | ||
1542 | * * | ||
1543 | * A write to this register of the 56-bit value "Pup+Bun" will cause * | ||
1544 | * the bit in the ISLAPR register corresponding to the region of the * | ||
1545 | * requestor to be set (access allowed). ( | ||
1546 | * * | ||
1547 | ************************************************************************/ | ||
1548 | |||
1549 | typedef union ii_islapo_u { | ||
1550 | u64 ii_islapo_regval; | ||
1551 | struct { | ||
1552 | u64 i_io_sbx_ovrride:56; | ||
1553 | u64 i_rsvd:8; | ||
1554 | } ii_islapo_fld_s; | ||
1555 | } ii_islapo_u_t; | ||
1556 | |||
1557 | /************************************************************************ | ||
1558 | * * | ||
1559 | * Determines how long the wrapper will wait aftr an interrupt is * | ||
1560 | * initially issued from the II before it times out the outstanding * | ||
1561 | * interrupt and drops it from the interrupt queue. * | ||
1562 | * * | ||
1563 | ************************************************************************/ | ||
1564 | |||
1565 | typedef union ii_iwi_u { | ||
1566 | u64 ii_iwi_regval; | ||
1567 | struct { | ||
1568 | u64 i_prescale:24; | ||
1569 | u64 i_rsvd:8; | ||
1570 | u64 i_timeout:8; | ||
1571 | u64 i_rsvd1:8; | ||
1572 | u64 i_intrpt_retry_period:8; | ||
1573 | u64 i_rsvd2:8; | ||
1574 | } ii_iwi_fld_s; | ||
1575 | } ii_iwi_u_t; | ||
1576 | |||
1577 | /************************************************************************ | ||
1578 | * * | ||
1579 | * Log errors which have occurred in the II wrapper. The errors are * | ||
1580 | * cleared by writing to the IECLR register. * | ||
1581 | * * | ||
1582 | ************************************************************************/ | ||
1583 | |||
1584 | typedef union ii_iwel_u { | ||
1585 | u64 ii_iwel_regval; | ||
1586 | struct { | ||
1587 | u64 i_intr_timed_out:1; | ||
1588 | u64 i_rsvd:7; | ||
1589 | u64 i_cam_overflow:1; | ||
1590 | u64 i_cam_read_miss:1; | ||
1591 | u64 i_rsvd1:2; | ||
1592 | u64 i_ioq_rep_underflow:1; | ||
1593 | u64 i_ioq_req_underflow:1; | ||
1594 | u64 i_ioq_rep_overflow:1; | ||
1595 | u64 i_ioq_req_overflow:1; | ||
1596 | u64 i_iiq_rep_overflow:1; | ||
1597 | u64 i_iiq_req_overflow:1; | ||
1598 | u64 i_rsvd2:6; | ||
1599 | u64 i_ii_xn_rep_cred_over_under:1; | ||
1600 | u64 i_ii_xn_req_cred_over_under:1; | ||
1601 | u64 i_rsvd3:6; | ||
1602 | u64 i_ii_xn_invalid_cmd:1; | ||
1603 | u64 i_xn_ii_invalid_cmd:1; | ||
1604 | u64 i_rsvd4:30; | ||
1605 | } ii_iwel_fld_s; | ||
1606 | } ii_iwel_u_t; | ||
1607 | |||
1608 | /************************************************************************ | ||
1609 | * * | ||
1610 | * Controls the II wrapper. * | ||
1611 | * * | ||
1612 | ************************************************************************/ | ||
1613 | |||
1614 | typedef union ii_iwc_u { | ||
1615 | u64 ii_iwc_regval; | ||
1616 | struct { | ||
1617 | u64 i_dma_byte_swap:1; | ||
1618 | u64 i_rsvd:3; | ||
1619 | u64 i_cam_read_lines_reset:1; | ||
1620 | u64 i_rsvd1:3; | ||
1621 | u64 i_ii_xn_cred_over_under_log:1; | ||
1622 | u64 i_rsvd2:19; | ||
1623 | u64 i_xn_rep_iq_depth:5; | ||
1624 | u64 i_rsvd3:3; | ||
1625 | u64 i_xn_req_iq_depth:5; | ||
1626 | u64 i_rsvd4:3; | ||
1627 | u64 i_iiq_depth:6; | ||
1628 | u64 i_rsvd5:12; | ||
1629 | u64 i_force_rep_cred:1; | ||
1630 | u64 i_force_req_cred:1; | ||
1631 | } ii_iwc_fld_s; | ||
1632 | } ii_iwc_u_t; | ||
1633 | |||
1634 | /************************************************************************ | ||
1635 | * * | ||
1636 | * Status in the II wrapper. * | ||
1637 | * * | ||
1638 | ************************************************************************/ | ||
1639 | |||
1640 | typedef union ii_iws_u { | ||
1641 | u64 ii_iws_regval; | ||
1642 | struct { | ||
1643 | u64 i_xn_rep_iq_credits:5; | ||
1644 | u64 i_rsvd:3; | ||
1645 | u64 i_xn_req_iq_credits:5; | ||
1646 | u64 i_rsvd1:51; | ||
1647 | } ii_iws_fld_s; | ||
1648 | } ii_iws_u_t; | ||
1649 | |||
1650 | /************************************************************************ | ||
1651 | * * | ||
1652 | * Masks errors in the IWEL register. * | ||
1653 | * * | ||
1654 | ************************************************************************/ | ||
1655 | |||
1656 | typedef union ii_iweim_u { | ||
1657 | u64 ii_iweim_regval; | ||
1658 | struct { | ||
1659 | u64 i_intr_timed_out:1; | ||
1660 | u64 i_rsvd:7; | ||
1661 | u64 i_cam_overflow:1; | ||
1662 | u64 i_cam_read_miss:1; | ||
1663 | u64 i_rsvd1:2; | ||
1664 | u64 i_ioq_rep_underflow:1; | ||
1665 | u64 i_ioq_req_underflow:1; | ||
1666 | u64 i_ioq_rep_overflow:1; | ||
1667 | u64 i_ioq_req_overflow:1; | ||
1668 | u64 i_iiq_rep_overflow:1; | ||
1669 | u64 i_iiq_req_overflow:1; | ||
1670 | u64 i_rsvd2:6; | ||
1671 | u64 i_ii_xn_rep_cred_overflow:1; | ||
1672 | u64 i_ii_xn_req_cred_overflow:1; | ||
1673 | u64 i_rsvd3:6; | ||
1674 | u64 i_ii_xn_invalid_cmd:1; | ||
1675 | u64 i_xn_ii_invalid_cmd:1; | ||
1676 | u64 i_rsvd4:30; | ||
1677 | } ii_iweim_fld_s; | ||
1678 | } ii_iweim_u_t; | ||
1679 | |||
1680 | /************************************************************************ | ||
1681 | * * | ||
1682 | * A write to this register causes a particular field in the * | ||
1683 | * corresponding widget's PRB entry to be adjusted up or down by 1. * | ||
1684 | * This counter should be used when recovering from error and reset * | ||
1685 | * conditions. Note that software would be capable of causing * | ||
1686 | * inadvertent overflow or underflow of these counters. * | ||
1687 | * * | ||
1688 | ************************************************************************/ | ||
1689 | |||
1690 | typedef union ii_ipca_u { | ||
1691 | u64 ii_ipca_regval; | ||
1692 | struct { | ||
1693 | u64 i_wid:4; | ||
1694 | u64 i_adjust:1; | ||
1695 | u64 i_rsvd_1:3; | ||
1696 | u64 i_field:2; | ||
1697 | u64 i_rsvd:54; | ||
1698 | } ii_ipca_fld_s; | ||
1699 | } ii_ipca_u_t; | ||
1700 | |||
1701 | /************************************************************************ | ||
1702 | * * | ||
1703 | * There are 8 instances of this register. This register contains * | ||
1704 | * the information that the II has to remember once it has launched a * | ||
1705 | * PIO Read operation. The contents are used to form the correct * | ||
1706 | * Router Network packet and direct the Crosstalk reply to the * | ||
1707 | * appropriate processor. * | ||
1708 | * * | ||
1709 | ************************************************************************/ | ||
1710 | |||
1711 | typedef union ii_iprte0a_u { | ||
1712 | u64 ii_iprte0a_regval; | ||
1713 | struct { | ||
1714 | u64 i_rsvd_1:54; | ||
1715 | u64 i_widget:4; | ||
1716 | u64 i_to_cnt:5; | ||
1717 | u64 i_vld:1; | ||
1718 | } ii_iprte0a_fld_s; | ||
1719 | } ii_iprte0a_u_t; | ||
1720 | |||
1721 | /************************************************************************ | ||
1722 | * * | ||
1723 | * There are 8 instances of this register. This register contains * | ||
1724 | * the information that the II has to remember once it has launched a * | ||
1725 | * PIO Read operation. The contents are used to form the correct * | ||
1726 | * Router Network packet and direct the Crosstalk reply to the * | ||
1727 | * appropriate processor. * | ||
1728 | * * | ||
1729 | ************************************************************************/ | ||
1730 | |||
1731 | typedef union ii_iprte1a_u { | ||
1732 | u64 ii_iprte1a_regval; | ||
1733 | struct { | ||
1734 | u64 i_rsvd_1:54; | ||
1735 | u64 i_widget:4; | ||
1736 | u64 i_to_cnt:5; | ||
1737 | u64 i_vld:1; | ||
1738 | } ii_iprte1a_fld_s; | ||
1739 | } ii_iprte1a_u_t; | ||
1740 | |||
1741 | /************************************************************************ | ||
1742 | * * | ||
1743 | * There are 8 instances of this register. This register contains * | ||
1744 | * the information that the II has to remember once it has launched a * | ||
1745 | * PIO Read operation. The contents are used to form the correct * | ||
1746 | * Router Network packet and direct the Crosstalk reply to the * | ||
1747 | * appropriate processor. * | ||
1748 | * * | ||
1749 | ************************************************************************/ | ||
1750 | |||
1751 | typedef union ii_iprte2a_u { | ||
1752 | u64 ii_iprte2a_regval; | ||
1753 | struct { | ||
1754 | u64 i_rsvd_1:54; | ||
1755 | u64 i_widget:4; | ||
1756 | u64 i_to_cnt:5; | ||
1757 | u64 i_vld:1; | ||
1758 | } ii_iprte2a_fld_s; | ||
1759 | } ii_iprte2a_u_t; | ||
1760 | |||
1761 | /************************************************************************ | ||
1762 | * * | ||
1763 | * There are 8 instances of this register. This register contains * | ||
1764 | * the information that the II has to remember once it has launched a * | ||
1765 | * PIO Read operation. The contents are used to form the correct * | ||
1766 | * Router Network packet and direct the Crosstalk reply to the * | ||
1767 | * appropriate processor. * | ||
1768 | * * | ||
1769 | ************************************************************************/ | ||
1770 | |||
1771 | typedef union ii_iprte3a_u { | ||
1772 | u64 ii_iprte3a_regval; | ||
1773 | struct { | ||
1774 | u64 i_rsvd_1:54; | ||
1775 | u64 i_widget:4; | ||
1776 | u64 i_to_cnt:5; | ||
1777 | u64 i_vld:1; | ||
1778 | } ii_iprte3a_fld_s; | ||
1779 | } ii_iprte3a_u_t; | ||
1780 | |||
1781 | /************************************************************************ | ||
1782 | * * | ||
1783 | * There are 8 instances of this register. This register contains * | ||
1784 | * the information that the II has to remember once it has launched a * | ||
1785 | * PIO Read operation. The contents are used to form the correct * | ||
1786 | * Router Network packet and direct the Crosstalk reply to the * | ||
1787 | * appropriate processor. * | ||
1788 | * * | ||
1789 | ************************************************************************/ | ||
1790 | |||
1791 | typedef union ii_iprte4a_u { | ||
1792 | u64 ii_iprte4a_regval; | ||
1793 | struct { | ||
1794 | u64 i_rsvd_1:54; | ||
1795 | u64 i_widget:4; | ||
1796 | u64 i_to_cnt:5; | ||
1797 | u64 i_vld:1; | ||
1798 | } ii_iprte4a_fld_s; | ||
1799 | } ii_iprte4a_u_t; | ||
1800 | |||
1801 | /************************************************************************ | ||
1802 | * * | ||
1803 | * There are 8 instances of this register. This register contains * | ||
1804 | * the information that the II has to remember once it has launched a * | ||
1805 | * PIO Read operation. The contents are used to form the correct * | ||
1806 | * Router Network packet and direct the Crosstalk reply to the * | ||
1807 | * appropriate processor. * | ||
1808 | * * | ||
1809 | ************************************************************************/ | ||
1810 | |||
1811 | typedef union ii_iprte5a_u { | ||
1812 | u64 ii_iprte5a_regval; | ||
1813 | struct { | ||
1814 | u64 i_rsvd_1:54; | ||
1815 | u64 i_widget:4; | ||
1816 | u64 i_to_cnt:5; | ||
1817 | u64 i_vld:1; | ||
1818 | } ii_iprte5a_fld_s; | ||
1819 | } ii_iprte5a_u_t; | ||
1820 | |||
1821 | /************************************************************************ | ||
1822 | * * | ||
1823 | * There are 8 instances of this register. This register contains * | ||
1824 | * the information that the II has to remember once it has launched a * | ||
1825 | * PIO Read operation. The contents are used to form the correct * | ||
1826 | * Router Network packet and direct the Crosstalk reply to the * | ||
1827 | * appropriate processor. * | ||
1828 | * * | ||
1829 | ************************************************************************/ | ||
1830 | |||
1831 | typedef union ii_iprte6a_u { | ||
1832 | u64 ii_iprte6a_regval; | ||
1833 | struct { | ||
1834 | u64 i_rsvd_1:54; | ||
1835 | u64 i_widget:4; | ||
1836 | u64 i_to_cnt:5; | ||
1837 | u64 i_vld:1; | ||
1838 | } ii_iprte6a_fld_s; | ||
1839 | } ii_iprte6a_u_t; | ||
1840 | |||
1841 | /************************************************************************ | ||
1842 | * * | ||
1843 | * There are 8 instances of this register. This register contains * | ||
1844 | * the information that the II has to remember once it has launched a * | ||
1845 | * PIO Read operation. The contents are used to form the correct * | ||
1846 | * Router Network packet and direct the Crosstalk reply to the * | ||
1847 | * appropriate processor. * | ||
1848 | * * | ||
1849 | ************************************************************************/ | ||
1850 | |||
1851 | typedef union ii_iprte7a_u { | ||
1852 | u64 ii_iprte7a_regval; | ||
1853 | struct { | ||
1854 | u64 i_rsvd_1:54; | ||
1855 | u64 i_widget:4; | ||
1856 | u64 i_to_cnt:5; | ||
1857 | u64 i_vld:1; | ||
1858 | } ii_iprtea7_fld_s; | ||
1859 | } ii_iprte7a_u_t; | ||
1860 | |||
1861 | /************************************************************************ | ||
1862 | * * | ||
1863 | * There are 8 instances of this register. This register contains * | ||
1864 | * the information that the II has to remember once it has launched a * | ||
1865 | * PIO Read operation. The contents are used to form the correct * | ||
1866 | * Router Network packet and direct the Crosstalk reply to the * | ||
1867 | * appropriate processor. * | ||
1868 | * * | ||
1869 | ************************************************************************/ | ||
1870 | |||
1871 | typedef union ii_iprte0b_u { | ||
1872 | u64 ii_iprte0b_regval; | ||
1873 | struct { | ||
1874 | u64 i_rsvd_1:3; | ||
1875 | u64 i_address:47; | ||
1876 | u64 i_init:3; | ||
1877 | u64 i_source:11; | ||
1878 | } ii_iprte0b_fld_s; | ||
1879 | } ii_iprte0b_u_t; | ||
1880 | |||
1881 | /************************************************************************ | ||
1882 | * * | ||
1883 | * There are 8 instances of this register. This register contains * | ||
1884 | * the information that the II has to remember once it has launched a * | ||
1885 | * PIO Read operation. The contents are used to form the correct * | ||
1886 | * Router Network packet and direct the Crosstalk reply to the * | ||
1887 | * appropriate processor. * | ||
1888 | * * | ||
1889 | ************************************************************************/ | ||
1890 | |||
1891 | typedef union ii_iprte1b_u { | ||
1892 | u64 ii_iprte1b_regval; | ||
1893 | struct { | ||
1894 | u64 i_rsvd_1:3; | ||
1895 | u64 i_address:47; | ||
1896 | u64 i_init:3; | ||
1897 | u64 i_source:11; | ||
1898 | } ii_iprte1b_fld_s; | ||
1899 | } ii_iprte1b_u_t; | ||
1900 | |||
1901 | /************************************************************************ | ||
1902 | * * | ||
1903 | * There are 8 instances of this register. This register contains * | ||
1904 | * the information that the II has to remember once it has launched a * | ||
1905 | * PIO Read operation. The contents are used to form the correct * | ||
1906 | * Router Network packet and direct the Crosstalk reply to the * | ||
1907 | * appropriate processor. * | ||
1908 | * * | ||
1909 | ************************************************************************/ | ||
1910 | |||
1911 | typedef union ii_iprte2b_u { | ||
1912 | u64 ii_iprte2b_regval; | ||
1913 | struct { | ||
1914 | u64 i_rsvd_1:3; | ||
1915 | u64 i_address:47; | ||
1916 | u64 i_init:3; | ||
1917 | u64 i_source:11; | ||
1918 | } ii_iprte2b_fld_s; | ||
1919 | } ii_iprte2b_u_t; | ||
1920 | |||
1921 | /************************************************************************ | ||
1922 | * * | ||
1923 | * There are 8 instances of this register. This register contains * | ||
1924 | * the information that the II has to remember once it has launched a * | ||
1925 | * PIO Read operation. The contents are used to form the correct * | ||
1926 | * Router Network packet and direct the Crosstalk reply to the * | ||
1927 | * appropriate processor. * | ||
1928 | * * | ||
1929 | ************************************************************************/ | ||
1930 | |||
1931 | typedef union ii_iprte3b_u { | ||
1932 | u64 ii_iprte3b_regval; | ||
1933 | struct { | ||
1934 | u64 i_rsvd_1:3; | ||
1935 | u64 i_address:47; | ||
1936 | u64 i_init:3; | ||
1937 | u64 i_source:11; | ||
1938 | } ii_iprte3b_fld_s; | ||
1939 | } ii_iprte3b_u_t; | ||
1940 | |||
1941 | /************************************************************************ | ||
1942 | * * | ||
1943 | * There are 8 instances of this register. This register contains * | ||
1944 | * the information that the II has to remember once it has launched a * | ||
1945 | * PIO Read operation. The contents are used to form the correct * | ||
1946 | * Router Network packet and direct the Crosstalk reply to the * | ||
1947 | * appropriate processor. * | ||
1948 | * * | ||
1949 | ************************************************************************/ | ||
1950 | |||
1951 | typedef union ii_iprte4b_u { | ||
1952 | u64 ii_iprte4b_regval; | ||
1953 | struct { | ||
1954 | u64 i_rsvd_1:3; | ||
1955 | u64 i_address:47; | ||
1956 | u64 i_init:3; | ||
1957 | u64 i_source:11; | ||
1958 | } ii_iprte4b_fld_s; | ||
1959 | } ii_iprte4b_u_t; | ||
1960 | |||
1961 | /************************************************************************ | ||
1962 | * * | ||
1963 | * There are 8 instances of this register. This register contains * | ||
1964 | * the information that the II has to remember once it has launched a * | ||
1965 | * PIO Read operation. The contents are used to form the correct * | ||
1966 | * Router Network packet and direct the Crosstalk reply to the * | ||
1967 | * appropriate processor. * | ||
1968 | * * | ||
1969 | ************************************************************************/ | ||
1970 | |||
1971 | typedef union ii_iprte5b_u { | ||
1972 | u64 ii_iprte5b_regval; | ||
1973 | struct { | ||
1974 | u64 i_rsvd_1:3; | ||
1975 | u64 i_address:47; | ||
1976 | u64 i_init:3; | ||
1977 | u64 i_source:11; | ||
1978 | } ii_iprte5b_fld_s; | ||
1979 | } ii_iprte5b_u_t; | ||
1980 | |||
1981 | /************************************************************************ | ||
1982 | * * | ||
1983 | * There are 8 instances of this register. This register contains * | ||
1984 | * the information that the II has to remember once it has launched a * | ||
1985 | * PIO Read operation. The contents are used to form the correct * | ||
1986 | * Router Network packet and direct the Crosstalk reply to the * | ||
1987 | * appropriate processor. * | ||
1988 | * * | ||
1989 | ************************************************************************/ | ||
1990 | |||
1991 | typedef union ii_iprte6b_u { | ||
1992 | u64 ii_iprte6b_regval; | ||
1993 | struct { | ||
1994 | u64 i_rsvd_1:3; | ||
1995 | u64 i_address:47; | ||
1996 | u64 i_init:3; | ||
1997 | u64 i_source:11; | ||
1998 | |||
1999 | } ii_iprte6b_fld_s; | ||
2000 | } ii_iprte6b_u_t; | ||
2001 | |||
2002 | /************************************************************************ | ||
2003 | * * | ||
2004 | * There are 8 instances of this register. This register contains * | ||
2005 | * the information that the II has to remember once it has launched a * | ||
2006 | * PIO Read operation. The contents are used to form the correct * | ||
2007 | * Router Network packet and direct the Crosstalk reply to the * | ||
2008 | * appropriate processor. * | ||
2009 | * * | ||
2010 | ************************************************************************/ | ||
2011 | |||
2012 | typedef union ii_iprte7b_u { | ||
2013 | u64 ii_iprte7b_regval; | ||
2014 | struct { | ||
2015 | u64 i_rsvd_1:3; | ||
2016 | u64 i_address:47; | ||
2017 | u64 i_init:3; | ||
2018 | u64 i_source:11; | ||
2019 | } ii_iprte7b_fld_s; | ||
2020 | } ii_iprte7b_u_t; | ||
2021 | |||
2022 | /************************************************************************ | ||
2023 | * * | ||
2024 | * Description: SHub II contains a feature which did not exist in * | ||
2025 | * the Hub which automatically cleans up after a Read Response * | ||
2026 | * timeout, including deallocation of the IPRTE and recovery of IBuf * | ||
2027 | * space. The inclusion of this register in SHub is for backward * | ||
2028 | * compatibility * | ||
2029 | * A write to this register causes an entry from the table of * | ||
2030 | * outstanding PIO Read Requests to be freed and returned to the * | ||
2031 | * stack of free entries. This register is used in handling the * | ||
2032 | * timeout errors that result in a PIO Reply never returning from * | ||
2033 | * Crosstalk. * | ||
2034 | * Note that this register does not affect the contents of the IPRTE * | ||
2035 | * registers. The Valid bits in those registers have to be * | ||
2036 | * specifically turned off by software. * | ||
2037 | * * | ||
2038 | ************************************************************************/ | ||
2039 | |||
2040 | typedef union ii_ipdr_u { | ||
2041 | u64 ii_ipdr_regval; | ||
2042 | struct { | ||
2043 | u64 i_te:3; | ||
2044 | u64 i_rsvd_1:1; | ||
2045 | u64 i_pnd:1; | ||
2046 | u64 i_init_rpcnt:1; | ||
2047 | u64 i_rsvd:58; | ||
2048 | } ii_ipdr_fld_s; | ||
2049 | } ii_ipdr_u_t; | ||
2050 | |||
2051 | /************************************************************************ | ||
2052 | * * | ||
2053 | * A write to this register causes a CRB entry to be returned to the * | ||
2054 | * queue of free CRBs. The entry should have previously been cleared * | ||
2055 | * (mark bit) via backdoor access to the pertinent CRB entry. This * | ||
2056 | * register is used in the last step of handling the errors that are * | ||
2057 | * captured and marked in CRB entries. Briefly: 1) first error for * | ||
2058 | * DMA write from a particular device, and first error for a * | ||
2059 | * particular BTE stream, lead to a marked CRB entry, and processor * | ||
2060 | * interrupt, 2) software reads the error information captured in the * | ||
2061 | * CRB entry, and presumably takes some corrective action, 3) * | ||
2062 | * software clears the mark bit, and finally 4) software writes to * | ||
2063 | * the ICDR register to return the CRB entry to the list of free CRB * | ||
2064 | * entries. * | ||
2065 | * * | ||
2066 | ************************************************************************/ | ||
2067 | |||
2068 | typedef union ii_icdr_u { | ||
2069 | u64 ii_icdr_regval; | ||
2070 | struct { | ||
2071 | u64 i_crb_num:4; | ||
2072 | u64 i_pnd:1; | ||
2073 | u64 i_rsvd:59; | ||
2074 | } ii_icdr_fld_s; | ||
2075 | } ii_icdr_u_t; | ||
2076 | |||
2077 | /************************************************************************ | ||
2078 | * * | ||
2079 | * This register provides debug access to two FIFOs inside of II. * | ||
2080 | * Both IOQ_MAX* fields of this register contain the instantaneous * | ||
2081 | * depth (in units of the number of available entries) of the * | ||
2082 | * associated IOQ FIFO. A read of this register will return the * | ||
2083 | * number of free entries on each FIFO at the time of the read. So * | ||
2084 | * when a FIFO is idle, the associated field contains the maximum * | ||
2085 | * depth of the FIFO. This register is writable for debug reasons * | ||
2086 | * and is intended to be written with the maximum desired FIFO depth * | ||
2087 | * while the FIFO is idle. Software must assure that II is idle when * | ||
2088 | * this register is written. If there are any active entries in any * | ||
2089 | * of these FIFOs when this register is written, the results are * | ||
2090 | * undefined. * | ||
2091 | * * | ||
2092 | ************************************************************************/ | ||
2093 | |||
2094 | typedef union ii_ifdr_u { | ||
2095 | u64 ii_ifdr_regval; | ||
2096 | struct { | ||
2097 | u64 i_ioq_max_rq:7; | ||
2098 | u64 i_set_ioq_rq:1; | ||
2099 | u64 i_ioq_max_rp:7; | ||
2100 | u64 i_set_ioq_rp:1; | ||
2101 | u64 i_rsvd:48; | ||
2102 | } ii_ifdr_fld_s; | ||
2103 | } ii_ifdr_u_t; | ||
2104 | |||
2105 | /************************************************************************ | ||
2106 | * * | ||
2107 | * This register allows the II to become sluggish in removing * | ||
2108 | * messages from its inbound queue (IIQ). This will cause messages to * | ||
2109 | * back up in either virtual channel. Disabling the "molasses" mode * | ||
2110 | * subsequently allows the II to be tested under stress. In the * | ||
2111 | * sluggish ("Molasses") mode, the localized effects of congestion * | ||
2112 | * can be observed. * | ||
2113 | * * | ||
2114 | ************************************************************************/ | ||
2115 | |||
2116 | typedef union ii_iiap_u { | ||
2117 | u64 ii_iiap_regval; | ||
2118 | struct { | ||
2119 | u64 i_rq_mls:6; | ||
2120 | u64 i_rsvd_1:2; | ||
2121 | u64 i_rp_mls:6; | ||
2122 | u64 i_rsvd:50; | ||
2123 | } ii_iiap_fld_s; | ||
2124 | } ii_iiap_u_t; | ||
2125 | |||
2126 | /************************************************************************ | ||
2127 | * * | ||
2128 | * This register allows several parameters of CRB operation to be * | ||
2129 | * set. Note that writing to this register can have catastrophic side * | ||
2130 | * effects, if the CRB is not quiescent, i.e. if the CRB is * | ||
2131 | * processing protocol messages when the write occurs. * | ||
2132 | * * | ||
2133 | ************************************************************************/ | ||
2134 | |||
2135 | typedef union ii_icmr_u { | ||
2136 | u64 ii_icmr_regval; | ||
2137 | struct { | ||
2138 | u64 i_sp_msg:1; | ||
2139 | u64 i_rd_hdr:1; | ||
2140 | u64 i_rsvd_4:2; | ||
2141 | u64 i_c_cnt:4; | ||
2142 | u64 i_rsvd_3:4; | ||
2143 | u64 i_clr_rqpd:1; | ||
2144 | u64 i_clr_rppd:1; | ||
2145 | u64 i_rsvd_2:2; | ||
2146 | u64 i_fc_cnt:4; | ||
2147 | u64 i_crb_vld:15; | ||
2148 | u64 i_crb_mark:15; | ||
2149 | u64 i_rsvd_1:2; | ||
2150 | u64 i_precise:1; | ||
2151 | u64 i_rsvd:11; | ||
2152 | } ii_icmr_fld_s; | ||
2153 | } ii_icmr_u_t; | ||
2154 | |||
2155 | /************************************************************************ | ||
2156 | * * | ||
2157 | * This register allows control of the table portion of the CRB * | ||
2158 | * logic via software. Control operations from this register have * | ||
2159 | * priority over all incoming Crosstalk or BTE requests. * | ||
2160 | * * | ||
2161 | ************************************************************************/ | ||
2162 | |||
2163 | typedef union ii_iccr_u { | ||
2164 | u64 ii_iccr_regval; | ||
2165 | struct { | ||
2166 | u64 i_crb_num:4; | ||
2167 | u64 i_rsvd_1:4; | ||
2168 | u64 i_cmd:8; | ||
2169 | u64 i_pending:1; | ||
2170 | u64 i_rsvd:47; | ||
2171 | } ii_iccr_fld_s; | ||
2172 | } ii_iccr_u_t; | ||
2173 | |||
2174 | /************************************************************************ | ||
2175 | * * | ||
2176 | * This register allows the maximum timeout value to be programmed. * | ||
2177 | * * | ||
2178 | ************************************************************************/ | ||
2179 | |||
2180 | typedef union ii_icto_u { | ||
2181 | u64 ii_icto_regval; | ||
2182 | struct { | ||
2183 | u64 i_timeout:8; | ||
2184 | u64 i_rsvd:56; | ||
2185 | } ii_icto_fld_s; | ||
2186 | } ii_icto_u_t; | ||
2187 | |||
2188 | /************************************************************************ | ||
2189 | * * | ||
2190 | * This register allows the timeout prescalar to be programmed. An * | ||
2191 | * internal counter is associated with this register. When the * | ||
2192 | * internal counter reaches the value of the PRESCALE field, the * | ||
2193 | * timer registers in all valid CRBs are incremented (CRBx_D[TIMEOUT] * | ||
2194 | * field). The internal counter resets to zero, and then continues * | ||
2195 | * counting. * | ||
2196 | * * | ||
2197 | ************************************************************************/ | ||
2198 | |||
2199 | typedef union ii_ictp_u { | ||
2200 | u64 ii_ictp_regval; | ||
2201 | struct { | ||
2202 | u64 i_prescale:24; | ||
2203 | u64 i_rsvd:40; | ||
2204 | } ii_ictp_fld_s; | ||
2205 | } ii_ictp_u_t; | ||
2206 | |||
2207 | /************************************************************************ | ||
2208 | * * | ||
2209 | * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * | ||
2210 | * used for Crosstalk operations (both cacheline and partial * | ||
2211 | * operations) or BTE/IO. Because the CRB entries are very wide, five * | ||
2212 | * registers (_A to _E) are required to read and write each entry. * | ||
2213 | * The CRB Entry registers can be conceptualized as rows and columns * | ||
2214 | * (illustrated in the table above). Each row contains the 4 * | ||
2215 | * registers required for a single CRB Entry. The first doubleword * | ||
2216 | * (column) for each entry is labeled A, and the second doubleword * | ||
2217 | * (higher address) is labeled B, the third doubleword is labeled C, * | ||
2218 | * the fourth doubleword is labeled D and the fifth doubleword is * | ||
2219 | * labeled E. All CRB entries have their addresses on a quarter * | ||
2220 | * cacheline aligned boundary. * | ||
2221 | * Upon reset, only the following fields are initialized: valid * | ||
2222 | * (VLD), priority count, timeout, timeout valid, and context valid. * | ||
2223 | * All other bits should be cleared by software before use (after * | ||
2224 | * recovering any potential error state from before the reset). * | ||
2225 | * The following four tables summarize the format for the four * | ||
2226 | * registers that are used for each ICRB# Entry. * | ||
2227 | * * | ||
2228 | ************************************************************************/ | ||
2229 | |||
2230 | typedef union ii_icrb0_a_u { | ||
2231 | u64 ii_icrb0_a_regval; | ||
2232 | struct { | ||
2233 | u64 ia_iow:1; | ||
2234 | u64 ia_vld:1; | ||
2235 | u64 ia_addr:47; | ||
2236 | u64 ia_tnum:5; | ||
2237 | u64 ia_sidn:4; | ||
2238 | u64 ia_rsvd:6; | ||
2239 | } ii_icrb0_a_fld_s; | ||
2240 | } ii_icrb0_a_u_t; | ||
2241 | |||
2242 | /************************************************************************ | ||
2243 | * * | ||
2244 | * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * | ||
2245 | * used for Crosstalk operations (both cacheline and partial * | ||
2246 | * operations) or BTE/IO. Because the CRB entries are very wide, five * | ||
2247 | * registers (_A to _E) are required to read and write each entry. * | ||
2248 | * * | ||
2249 | ************************************************************************/ | ||
2250 | |||
2251 | typedef union ii_icrb0_b_u { | ||
2252 | u64 ii_icrb0_b_regval; | ||
2253 | struct { | ||
2254 | u64 ib_xt_err:1; | ||
2255 | u64 ib_mark:1; | ||
2256 | u64 ib_ln_uce:1; | ||
2257 | u64 ib_errcode:3; | ||
2258 | u64 ib_error:1; | ||
2259 | u64 ib_stall__bte_1:1; | ||
2260 | u64 ib_stall__bte_0:1; | ||
2261 | u64 ib_stall__intr:1; | ||
2262 | u64 ib_stall_ib:1; | ||
2263 | u64 ib_intvn:1; | ||
2264 | u64 ib_wb:1; | ||
2265 | u64 ib_hold:1; | ||
2266 | u64 ib_ack:1; | ||
2267 | u64 ib_resp:1; | ||
2268 | u64 ib_ack_cnt:11; | ||
2269 | u64 ib_rsvd:7; | ||
2270 | u64 ib_exc:5; | ||
2271 | u64 ib_init:3; | ||
2272 | u64 ib_imsg:8; | ||
2273 | u64 ib_imsgtype:2; | ||
2274 | u64 ib_use_old:1; | ||
2275 | u64 ib_rsvd_1:11; | ||
2276 | } ii_icrb0_b_fld_s; | ||
2277 | } ii_icrb0_b_u_t; | ||
2278 | |||
2279 | /************************************************************************ | ||
2280 | * * | ||
2281 | * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * | ||
2282 | * used for Crosstalk operations (both cacheline and partial * | ||
2283 | * operations) or BTE/IO. Because the CRB entries are very wide, five * | ||
2284 | * registers (_A to _E) are required to read and write each entry. * | ||
2285 | * * | ||
2286 | ************************************************************************/ | ||
2287 | |||
2288 | typedef union ii_icrb0_c_u { | ||
2289 | u64 ii_icrb0_c_regval; | ||
2290 | struct { | ||
2291 | u64 ic_source:15; | ||
2292 | u64 ic_size:2; | ||
2293 | u64 ic_ct:1; | ||
2294 | u64 ic_bte_num:1; | ||
2295 | u64 ic_gbr:1; | ||
2296 | u64 ic_resprqd:1; | ||
2297 | u64 ic_bo:1; | ||
2298 | u64 ic_suppl:15; | ||
2299 | u64 ic_rsvd:27; | ||
2300 | } ii_icrb0_c_fld_s; | ||
2301 | } ii_icrb0_c_u_t; | ||
2302 | |||
2303 | /************************************************************************ | ||
2304 | * * | ||
2305 | * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * | ||
2306 | * used for Crosstalk operations (both cacheline and partial * | ||
2307 | * operations) or BTE/IO. Because the CRB entries are very wide, five * | ||
2308 | * registers (_A to _E) are required to read and write each entry. * | ||
2309 | * * | ||
2310 | ************************************************************************/ | ||
2311 | |||
2312 | typedef union ii_icrb0_d_u { | ||
2313 | u64 ii_icrb0_d_regval; | ||
2314 | struct { | ||
2315 | u64 id_pa_be:43; | ||
2316 | u64 id_bte_op:1; | ||
2317 | u64 id_pr_psc:4; | ||
2318 | u64 id_pr_cnt:4; | ||
2319 | u64 id_sleep:1; | ||
2320 | u64 id_rsvd:11; | ||
2321 | } ii_icrb0_d_fld_s; | ||
2322 | } ii_icrb0_d_u_t; | ||
2323 | |||
2324 | /************************************************************************ | ||
2325 | * * | ||
2326 | * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * | ||
2327 | * used for Crosstalk operations (both cacheline and partial * | ||
2328 | * operations) or BTE/IO. Because the CRB entries are very wide, five * | ||
2329 | * registers (_A to _E) are required to read and write each entry. * | ||
2330 | * * | ||
2331 | ************************************************************************/ | ||
2332 | |||
2333 | typedef union ii_icrb0_e_u { | ||
2334 | u64 ii_icrb0_e_regval; | ||
2335 | struct { | ||
2336 | u64 ie_timeout:8; | ||
2337 | u64 ie_context:15; | ||
2338 | u64 ie_rsvd:1; | ||
2339 | u64 ie_tvld:1; | ||
2340 | u64 ie_cvld:1; | ||
2341 | u64 ie_rsvd_0:38; | ||
2342 | } ii_icrb0_e_fld_s; | ||
2343 | } ii_icrb0_e_u_t; | ||
2344 | |||
2345 | /************************************************************************ | ||
2346 | * * | ||
2347 | * This register contains the lower 64 bits of the header of the * | ||
2348 | * spurious message captured by II. Valid when the SP_MSG bit in ICMR * | ||
2349 | * register is set. * | ||
2350 | * * | ||
2351 | ************************************************************************/ | ||
2352 | |||
2353 | typedef union ii_icsml_u { | ||
2354 | u64 ii_icsml_regval; | ||
2355 | struct { | ||
2356 | u64 i_tt_addr:47; | ||
2357 | u64 i_newsuppl_ex:14; | ||
2358 | u64 i_reserved:2; | ||
2359 | u64 i_overflow:1; | ||
2360 | } ii_icsml_fld_s; | ||
2361 | } ii_icsml_u_t; | ||
2362 | |||
2363 | /************************************************************************ | ||
2364 | * * | ||
2365 | * This register contains the middle 64 bits of the header of the * | ||
2366 | * spurious message captured by II. Valid when the SP_MSG bit in ICMR * | ||
2367 | * register is set. * | ||
2368 | * * | ||
2369 | ************************************************************************/ | ||
2370 | |||
2371 | typedef union ii_icsmm_u { | ||
2372 | u64 ii_icsmm_regval; | ||
2373 | struct { | ||
2374 | u64 i_tt_ack_cnt:11; | ||
2375 | u64 i_reserved:53; | ||
2376 | } ii_icsmm_fld_s; | ||
2377 | } ii_icsmm_u_t; | ||
2378 | |||
2379 | /************************************************************************ | ||
2380 | * * | ||
2381 | * This register contains the microscopic state, all the inputs to * | ||
2382 | * the protocol table, captured with the spurious message. Valid when * | ||
2383 | * the SP_MSG bit in the ICMR register is set. * | ||
2384 | * * | ||
2385 | ************************************************************************/ | ||
2386 | |||
2387 | typedef union ii_icsmh_u { | ||
2388 | u64 ii_icsmh_regval; | ||
2389 | struct { | ||
2390 | u64 i_tt_vld:1; | ||
2391 | u64 i_xerr:1; | ||
2392 | u64 i_ft_cwact_o:1; | ||
2393 | u64 i_ft_wact_o:1; | ||
2394 | u64 i_ft_active_o:1; | ||
2395 | u64 i_sync:1; | ||
2396 | u64 i_mnusg:1; | ||
2397 | u64 i_mnusz:1; | ||
2398 | u64 i_plusz:1; | ||
2399 | u64 i_plusg:1; | ||
2400 | u64 i_tt_exc:5; | ||
2401 | u64 i_tt_wb:1; | ||
2402 | u64 i_tt_hold:1; | ||
2403 | u64 i_tt_ack:1; | ||
2404 | u64 i_tt_resp:1; | ||
2405 | u64 i_tt_intvn:1; | ||
2406 | u64 i_g_stall_bte1:1; | ||
2407 | u64 i_g_stall_bte0:1; | ||
2408 | u64 i_g_stall_il:1; | ||
2409 | u64 i_g_stall_ib:1; | ||
2410 | u64 i_tt_imsg:8; | ||
2411 | u64 i_tt_imsgtype:2; | ||
2412 | u64 i_tt_use_old:1; | ||
2413 | u64 i_tt_respreqd:1; | ||
2414 | u64 i_tt_bte_num:1; | ||
2415 | u64 i_cbn:1; | ||
2416 | u64 i_match:1; | ||
2417 | u64 i_rpcnt_lt_34:1; | ||
2418 | u64 i_rpcnt_ge_34:1; | ||
2419 | u64 i_rpcnt_lt_18:1; | ||
2420 | u64 i_rpcnt_ge_18:1; | ||
2421 | u64 i_rpcnt_lt_2:1; | ||
2422 | u64 i_rpcnt_ge_2:1; | ||
2423 | u64 i_rqcnt_lt_18:1; | ||
2424 | u64 i_rqcnt_ge_18:1; | ||
2425 | u64 i_rqcnt_lt_2:1; | ||
2426 | u64 i_rqcnt_ge_2:1; | ||
2427 | u64 i_tt_device:7; | ||
2428 | u64 i_tt_init:3; | ||
2429 | u64 i_reserved:5; | ||
2430 | } ii_icsmh_fld_s; | ||
2431 | } ii_icsmh_u_t; | ||
2432 | |||
2433 | /************************************************************************ | ||
2434 | * * | ||
2435 | * The Shub DEBUG unit provides a 3-bit selection signal to the * | ||
2436 | * II core and a 3-bit selection signal to the fsbclk domain in the II * | ||
2437 | * wrapper. * | ||
2438 | * * | ||
2439 | ************************************************************************/ | ||
2440 | |||
2441 | typedef union ii_idbss_u { | ||
2442 | u64 ii_idbss_regval; | ||
2443 | struct { | ||
2444 | u64 i_iioclk_core_submenu:3; | ||
2445 | u64 i_rsvd:5; | ||
2446 | u64 i_fsbclk_wrapper_submenu:3; | ||
2447 | u64 i_rsvd_1:5; | ||
2448 | u64 i_iioclk_menu:5; | ||
2449 | u64 i_rsvd_2:43; | ||
2450 | } ii_idbss_fld_s; | ||
2451 | } ii_idbss_u_t; | ||
2452 | |||
2453 | /************************************************************************ | ||
2454 | * * | ||
2455 | * Description: This register is used to set up the length for a * | ||
2456 | * transfer and then to monitor the progress of that transfer. This * | ||
2457 | * register needs to be initialized before a transfer is started. A * | ||
2458 | * legitimate write to this register will set the Busy bit, clear the * | ||
2459 | * Error bit, and initialize the length to the value desired. * | ||
2460 | * While the transfer is in progress, hardware will decrement the * | ||
2461 | * length field with each successful block that is copied. Once the * | ||
2462 | * transfer completes, hardware will clear the Busy bit. The length * | ||
2463 | * field will also contain the number of cache lines left to be * | ||
2464 | * transferred. * | ||
2465 | * * | ||
2466 | ************************************************************************/ | ||
2467 | |||
2468 | typedef union ii_ibls0_u { | ||
2469 | u64 ii_ibls0_regval; | ||
2470 | struct { | ||
2471 | u64 i_length:16; | ||
2472 | u64 i_error:1; | ||
2473 | u64 i_rsvd_1:3; | ||
2474 | u64 i_busy:1; | ||
2475 | u64 i_rsvd:43; | ||
2476 | } ii_ibls0_fld_s; | ||
2477 | } ii_ibls0_u_t; | ||
2478 | |||
2479 | /************************************************************************ | ||
2480 | * * | ||
2481 | * This register should be loaded before a transfer is started. The * | ||
2482 | * address to be loaded in bits 39:0 is the 40-bit TRex+ physical * | ||
2483 | * address as described in Section 1.3, Figure2 and Figure3. Since * | ||
2484 | * the bottom 7 bits of the address are always taken to be zero, BTE * | ||
2485 | * transfers are always cacheline-aligned. * | ||
2486 | * * | ||
2487 | ************************************************************************/ | ||
2488 | |||
2489 | typedef union ii_ibsa0_u { | ||
2490 | u64 ii_ibsa0_regval; | ||
2491 | struct { | ||
2492 | u64 i_rsvd_1:7; | ||
2493 | u64 i_addr:42; | ||
2494 | u64 i_rsvd:15; | ||
2495 | } ii_ibsa0_fld_s; | ||
2496 | } ii_ibsa0_u_t; | ||
2497 | |||
2498 | /************************************************************************ | ||
2499 | * * | ||
2500 | * This register should be loaded before a transfer is started. The * | ||
2501 | * address to be loaded in bits 39:0 is the 40-bit TRex+ physical * | ||
2502 | * address as described in Section 1.3, Figure2 and Figure3. Since * | ||
2503 | * the bottom 7 bits of the address are always taken to be zero, BTE * | ||
2504 | * transfers are always cacheline-aligned. * | ||
2505 | * * | ||
2506 | ************************************************************************/ | ||
2507 | |||
2508 | typedef union ii_ibda0_u { | ||
2509 | u64 ii_ibda0_regval; | ||
2510 | struct { | ||
2511 | u64 i_rsvd_1:7; | ||
2512 | u64 i_addr:42; | ||
2513 | u64 i_rsvd:15; | ||
2514 | } ii_ibda0_fld_s; | ||
2515 | } ii_ibda0_u_t; | ||
2516 | |||
2517 | /************************************************************************ | ||
2518 | * * | ||
2519 | * Writing to this register sets up the attributes of the transfer * | ||
2520 | * and initiates the transfer operation. Reading this register has * | ||
2521 | * the side effect of terminating any transfer in progress. Note: * | ||
2522 | * stopping a transfer midstream could have an adverse impact on the * | ||
2523 | * other BTE. If a BTE stream has to be stopped (due to error * | ||
2524 | * handling for example), both BTE streams should be stopped and * | ||
2525 | * their transfers discarded. * | ||
2526 | * * | ||
2527 | ************************************************************************/ | ||
2528 | |||
2529 | typedef union ii_ibct0_u { | ||
2530 | u64 ii_ibct0_regval; | ||
2531 | struct { | ||
2532 | u64 i_zerofill:1; | ||
2533 | u64 i_rsvd_2:3; | ||
2534 | u64 i_notify:1; | ||
2535 | u64 i_rsvd_1:3; | ||
2536 | u64 i_poison:1; | ||
2537 | u64 i_rsvd:55; | ||
2538 | } ii_ibct0_fld_s; | ||
2539 | } ii_ibct0_u_t; | ||
2540 | |||
2541 | /************************************************************************ | ||
2542 | * * | ||
2543 | * This register contains the address to which the WINV is sent. * | ||
2544 | * This address has to be cache line aligned. * | ||
2545 | * * | ||
2546 | ************************************************************************/ | ||
2547 | |||
2548 | typedef union ii_ibna0_u { | ||
2549 | u64 ii_ibna0_regval; | ||
2550 | struct { | ||
2551 | u64 i_rsvd_1:7; | ||
2552 | u64 i_addr:42; | ||
2553 | u64 i_rsvd:15; | ||
2554 | } ii_ibna0_fld_s; | ||
2555 | } ii_ibna0_u_t; | ||
2556 | |||
2557 | /************************************************************************ | ||
2558 | * * | ||
2559 | * This register contains the programmable level as well as the node * | ||
2560 | * ID and PI unit of the processor to which the interrupt will be * | ||
2561 | * sent. * | ||
2562 | * * | ||
2563 | ************************************************************************/ | ||
2564 | |||
2565 | typedef union ii_ibia0_u { | ||
2566 | u64 ii_ibia0_regval; | ||
2567 | struct { | ||
2568 | u64 i_rsvd_2:1; | ||
2569 | u64 i_node_id:11; | ||
2570 | u64 i_rsvd_1:4; | ||
2571 | u64 i_level:7; | ||
2572 | u64 i_rsvd:41; | ||
2573 | } ii_ibia0_fld_s; | ||
2574 | } ii_ibia0_u_t; | ||
2575 | |||
2576 | /************************************************************************ | ||
2577 | * * | ||
2578 | * Description: This register is used to set up the length for a * | ||
2579 | * transfer and then to monitor the progress of that transfer. This * | ||
2580 | * register needs to be initialized before a transfer is started. A * | ||
2581 | * legitimate write to this register will set the Busy bit, clear the * | ||
2582 | * Error bit, and initialize the length to the value desired. * | ||
2583 | * While the transfer is in progress, hardware will decrement the * | ||
2584 | * length field with each successful block that is copied. Once the * | ||
2585 | * transfer completes, hardware will clear the Busy bit. The length * | ||
2586 | * field will also contain the number of cache lines left to be * | ||
2587 | * transferred. * | ||
2588 | * * | ||
2589 | ************************************************************************/ | ||
2590 | |||
2591 | typedef union ii_ibls1_u { | ||
2592 | u64 ii_ibls1_regval; | ||
2593 | struct { | ||
2594 | u64 i_length:16; | ||
2595 | u64 i_error:1; | ||
2596 | u64 i_rsvd_1:3; | ||
2597 | u64 i_busy:1; | ||
2598 | u64 i_rsvd:43; | ||
2599 | } ii_ibls1_fld_s; | ||
2600 | } ii_ibls1_u_t; | ||
2601 | |||
2602 | /************************************************************************ | ||
2603 | * * | ||
2604 | * This register should be loaded before a transfer is started. The * | ||
2605 | * address to be loaded in bits 39:0 is the 40-bit TRex+ physical * | ||
2606 | * address as described in Section 1.3, Figure2 and Figure3. Since * | ||
2607 | * the bottom 7 bits of the address are always taken to be zero, BTE * | ||
2608 | * transfers are always cacheline-aligned. * | ||
2609 | * * | ||
2610 | ************************************************************************/ | ||
2611 | |||
2612 | typedef union ii_ibsa1_u { | ||
2613 | u64 ii_ibsa1_regval; | ||
2614 | struct { | ||
2615 | u64 i_rsvd_1:7; | ||
2616 | u64 i_addr:33; | ||
2617 | u64 i_rsvd:24; | ||
2618 | } ii_ibsa1_fld_s; | ||
2619 | } ii_ibsa1_u_t; | ||
2620 | |||
2621 | /************************************************************************ | ||
2622 | * * | ||
2623 | * This register should be loaded before a transfer is started. The * | ||
2624 | * address to be loaded in bits 39:0 is the 40-bit TRex+ physical * | ||
2625 | * address as described in Section 1.3, Figure2 and Figure3. Since * | ||
2626 | * the bottom 7 bits of the address are always taken to be zero, BTE * | ||
2627 | * transfers are always cacheline-aligned. * | ||
2628 | * * | ||
2629 | ************************************************************************/ | ||
2630 | |||
2631 | typedef union ii_ibda1_u { | ||
2632 | u64 ii_ibda1_regval; | ||
2633 | struct { | ||
2634 | u64 i_rsvd_1:7; | ||
2635 | u64 i_addr:33; | ||
2636 | u64 i_rsvd:24; | ||
2637 | } ii_ibda1_fld_s; | ||
2638 | } ii_ibda1_u_t; | ||
2639 | |||
2640 | /************************************************************************ | ||
2641 | * * | ||
2642 | * Writing to this register sets up the attributes of the transfer * | ||
2643 | * and initiates the transfer operation. Reading this register has * | ||
2644 | * the side effect of terminating any transfer in progress. Note: * | ||
2645 | * stopping a transfer midstream could have an adverse impact on the * | ||
2646 | * other BTE. If a BTE stream has to be stopped (due to error * | ||
2647 | * handling for example), both BTE streams should be stopped and * | ||
2648 | * their transfers discarded. * | ||
2649 | * * | ||
2650 | ************************************************************************/ | ||
2651 | |||
2652 | typedef union ii_ibct1_u { | ||
2653 | u64 ii_ibct1_regval; | ||
2654 | struct { | ||
2655 | u64 i_zerofill:1; | ||
2656 | u64 i_rsvd_2:3; | ||
2657 | u64 i_notify:1; | ||
2658 | u64 i_rsvd_1:3; | ||
2659 | u64 i_poison:1; | ||
2660 | u64 i_rsvd:55; | ||
2661 | } ii_ibct1_fld_s; | ||
2662 | } ii_ibct1_u_t; | ||
2663 | |||
2664 | /************************************************************************ | ||
2665 | * * | ||
2666 | * This register contains the address to which the WINV is sent. * | ||
2667 | * This address has to be cache line aligned. * | ||
2668 | * * | ||
2669 | ************************************************************************/ | ||
2670 | |||
2671 | typedef union ii_ibna1_u { | ||
2672 | u64 ii_ibna1_regval; | ||
2673 | struct { | ||
2674 | u64 i_rsvd_1:7; | ||
2675 | u64 i_addr:33; | ||
2676 | u64 i_rsvd:24; | ||
2677 | } ii_ibna1_fld_s; | ||
2678 | } ii_ibna1_u_t; | ||
2679 | |||
2680 | /************************************************************************ | ||
2681 | * * | ||
2682 | * This register contains the programmable level as well as the node * | ||
2683 | * ID and PI unit of the processor to which the interrupt will be * | ||
2684 | * sent. * | ||
2685 | * * | ||
2686 | ************************************************************************/ | ||
2687 | |||
2688 | typedef union ii_ibia1_u { | ||
2689 | u64 ii_ibia1_regval; | ||
2690 | struct { | ||
2691 | u64 i_pi_id:1; | ||
2692 | u64 i_node_id:8; | ||
2693 | u64 i_rsvd_1:7; | ||
2694 | u64 i_level:7; | ||
2695 | u64 i_rsvd:41; | ||
2696 | } ii_ibia1_fld_s; | ||
2697 | } ii_ibia1_u_t; | ||
2698 | |||
2699 | /************************************************************************ | ||
2700 | * * | ||
2701 | * This register defines the resources that feed information into * | ||
2702 | * the two performance counters located in the IO Performance * | ||
2703 | * Profiling Register. There are 17 different quantities that can be * | ||
2704 | * measured. Given these 17 different options, the two performance * | ||
2705 | * counters have 15 of them in common; menu selections 0 through 0xE * | ||
2706 | * are identical for each performance counter. As for the other two * | ||
2707 | * options, one is available from one performance counter and the * | ||
2708 | * other is available from the other performance counter. Hence, the * | ||
2709 | * II supports all 17*16=272 possible combinations of quantities to * | ||
2710 | * measure. * | ||
2711 | * * | ||
2712 | ************************************************************************/ | ||
2713 | |||
2714 | typedef union ii_ipcr_u { | ||
2715 | u64 ii_ipcr_regval; | ||
2716 | struct { | ||
2717 | u64 i_ippr0_c:4; | ||
2718 | u64 i_ippr1_c:4; | ||
2719 | u64 i_icct:8; | ||
2720 | u64 i_rsvd:48; | ||
2721 | } ii_ipcr_fld_s; | ||
2722 | } ii_ipcr_u_t; | ||
2723 | |||
2724 | /************************************************************************ | ||
2725 | * * | ||
2726 | * * | ||
2727 | * * | ||
2728 | ************************************************************************/ | ||
2729 | |||
2730 | typedef union ii_ippr_u { | ||
2731 | u64 ii_ippr_regval; | ||
2732 | struct { | ||
2733 | u64 i_ippr0:32; | ||
2734 | u64 i_ippr1:32; | ||
2735 | } ii_ippr_fld_s; | ||
2736 | } ii_ippr_u_t; | ||
2737 | |||
2738 | /************************************************************************ | ||
2739 | * * | ||
2740 | * The following defines which were not formed into structures are * | ||
2741 | * probably identical to another register, and the name of the * | ||
2742 | * register is provided against each of these registers. This * | ||
2743 | * information needs to be checked carefully * | ||
2744 | * * | ||
2745 | * IIO_ICRB1_A IIO_ICRB0_A * | ||
2746 | * IIO_ICRB1_B IIO_ICRB0_B * | ||
2747 | * IIO_ICRB1_C IIO_ICRB0_C * | ||
2748 | * IIO_ICRB1_D IIO_ICRB0_D * | ||
2749 | * IIO_ICRB1_E IIO_ICRB0_E * | ||
2750 | * IIO_ICRB2_A IIO_ICRB0_A * | ||
2751 | * IIO_ICRB2_B IIO_ICRB0_B * | ||
2752 | * IIO_ICRB2_C IIO_ICRB0_C * | ||
2753 | * IIO_ICRB2_D IIO_ICRB0_D * | ||
2754 | * IIO_ICRB2_E IIO_ICRB0_E * | ||
2755 | * IIO_ICRB3_A IIO_ICRB0_A * | ||
2756 | * IIO_ICRB3_B IIO_ICRB0_B * | ||
2757 | * IIO_ICRB3_C IIO_ICRB0_C * | ||
2758 | * IIO_ICRB3_D IIO_ICRB0_D * | ||
2759 | * IIO_ICRB3_E IIO_ICRB0_E * | ||
2760 | * IIO_ICRB4_A IIO_ICRB0_A * | ||
2761 | * IIO_ICRB4_B IIO_ICRB0_B * | ||
2762 | * IIO_ICRB4_C IIO_ICRB0_C * | ||
2763 | * IIO_ICRB4_D IIO_ICRB0_D * | ||
2764 | * IIO_ICRB4_E IIO_ICRB0_E * | ||
2765 | * IIO_ICRB5_A IIO_ICRB0_A * | ||
2766 | * IIO_ICRB5_B IIO_ICRB0_B * | ||
2767 | * IIO_ICRB5_C IIO_ICRB0_C * | ||
2768 | * IIO_ICRB5_D IIO_ICRB0_D * | ||
2769 | * IIO_ICRB5_E IIO_ICRB0_E * | ||
2770 | * IIO_ICRB6_A IIO_ICRB0_A * | ||
2771 | * IIO_ICRB6_B IIO_ICRB0_B * | ||
2772 | * IIO_ICRB6_C IIO_ICRB0_C * | ||
2773 | * IIO_ICRB6_D IIO_ICRB0_D * | ||
2774 | * IIO_ICRB6_E IIO_ICRB0_E * | ||
2775 | * IIO_ICRB7_A IIO_ICRB0_A * | ||
2776 | * IIO_ICRB7_B IIO_ICRB0_B * | ||
2777 | * IIO_ICRB7_C IIO_ICRB0_C * | ||
2778 | * IIO_ICRB7_D IIO_ICRB0_D * | ||
2779 | * IIO_ICRB7_E IIO_ICRB0_E * | ||
2780 | * IIO_ICRB8_A IIO_ICRB0_A * | ||
2781 | * IIO_ICRB8_B IIO_ICRB0_B * | ||
2782 | * IIO_ICRB8_C IIO_ICRB0_C * | ||
2783 | * IIO_ICRB8_D IIO_ICRB0_D * | ||
2784 | * IIO_ICRB8_E IIO_ICRB0_E * | ||
2785 | * IIO_ICRB9_A IIO_ICRB0_A * | ||
2786 | * IIO_ICRB9_B IIO_ICRB0_B * | ||
2787 | * IIO_ICRB9_C IIO_ICRB0_C * | ||
2788 | * IIO_ICRB9_D IIO_ICRB0_D * | ||
2789 | * IIO_ICRB9_E IIO_ICRB0_E * | ||
2790 | * IIO_ICRBA_A IIO_ICRB0_A * | ||
2791 | * IIO_ICRBA_B IIO_ICRB0_B * | ||
2792 | * IIO_ICRBA_C IIO_ICRB0_C * | ||
2793 | * IIO_ICRBA_D IIO_ICRB0_D * | ||
2794 | * IIO_ICRBA_E IIO_ICRB0_E * | ||
2795 | * IIO_ICRBB_A IIO_ICRB0_A * | ||
2796 | * IIO_ICRBB_B IIO_ICRB0_B * | ||
2797 | * IIO_ICRBB_C IIO_ICRB0_C * | ||
2798 | * IIO_ICRBB_D IIO_ICRB0_D * | ||
2799 | * IIO_ICRBB_E IIO_ICRB0_E * | ||
2800 | * IIO_ICRBC_A IIO_ICRB0_A * | ||
2801 | * IIO_ICRBC_B IIO_ICRB0_B * | ||
2802 | * IIO_ICRBC_C IIO_ICRB0_C * | ||
2803 | * IIO_ICRBC_D IIO_ICRB0_D * | ||
2804 | * IIO_ICRBC_E IIO_ICRB0_E * | ||
2805 | * IIO_ICRBD_A IIO_ICRB0_A * | ||
2806 | * IIO_ICRBD_B IIO_ICRB0_B * | ||
2807 | * IIO_ICRBD_C IIO_ICRB0_C * | ||
2808 | * IIO_ICRBD_D IIO_ICRB0_D * | ||
2809 | * IIO_ICRBD_E IIO_ICRB0_E * | ||
2810 | * IIO_ICRBE_A IIO_ICRB0_A * | ||
2811 | * IIO_ICRBE_B IIO_ICRB0_B * | ||
2812 | * IIO_ICRBE_C IIO_ICRB0_C * | ||
2813 | * IIO_ICRBE_D IIO_ICRB0_D * | ||
2814 | * IIO_ICRBE_E IIO_ICRB0_E * | ||
2815 | * * | ||
2816 | ************************************************************************/ | ||
2817 | |||
2818 | /* | ||
2819 | * Slightly friendlier names for some common registers. | ||
2820 | */ | ||
2821 | #define IIO_WIDGET IIO_WID /* Widget identification */ | ||
2822 | #define IIO_WIDGET_STAT IIO_WSTAT /* Widget status register */ | ||
2823 | #define IIO_WIDGET_CTRL IIO_WCR /* Widget control register */ | ||
2824 | #define IIO_PROTECT IIO_ILAPR /* IO interface protection */ | ||
2825 | #define IIO_PROTECT_OVRRD IIO_ILAPO /* IO protect override */ | ||
2826 | #define IIO_OUTWIDGET_ACCESS IIO_IOWA /* Outbound widget access */ | ||
2827 | #define IIO_INWIDGET_ACCESS IIO_IIWA /* Inbound widget access */ | ||
2828 | #define IIO_INDEV_ERR_MASK IIO_IIDEM /* Inbound device error mask */ | ||
2829 | #define IIO_LLP_CSR IIO_ILCSR /* LLP control and status */ | ||
2830 | #define IIO_LLP_LOG IIO_ILLR /* LLP log */ | ||
2831 | #define IIO_XTALKCC_TOUT IIO_IXCC /* Xtalk credit count timeout */ | ||
2832 | #define IIO_XTALKTT_TOUT IIO_IXTT /* Xtalk tail timeout */ | ||
2833 | #define IIO_IO_ERR_CLR IIO_IECLR /* IO error clear */ | ||
2834 | #define IIO_IGFX_0 IIO_IGFX0 | ||
2835 | #define IIO_IGFX_1 IIO_IGFX1 | ||
2836 | #define IIO_IBCT_0 IIO_IBCT0 | ||
2837 | #define IIO_IBCT_1 IIO_IBCT1 | ||
2838 | #define IIO_IBLS_0 IIO_IBLS0 | ||
2839 | #define IIO_IBLS_1 IIO_IBLS1 | ||
2840 | #define IIO_IBSA_0 IIO_IBSA0 | ||
2841 | #define IIO_IBSA_1 IIO_IBSA1 | ||
2842 | #define IIO_IBDA_0 IIO_IBDA0 | ||
2843 | #define IIO_IBDA_1 IIO_IBDA1 | ||
2844 | #define IIO_IBNA_0 IIO_IBNA0 | ||
2845 | #define IIO_IBNA_1 IIO_IBNA1 | ||
2846 | #define IIO_IBIA_0 IIO_IBIA0 | ||
2847 | #define IIO_IBIA_1 IIO_IBIA1 | ||
2848 | #define IIO_IOPRB_0 IIO_IPRB0 | ||
2849 | |||
2850 | #define IIO_PRTE_A(_x) (IIO_IPRTE0_A + (8 * (_x))) | ||
2851 | #define IIO_PRTE_B(_x) (IIO_IPRTE0_B + (8 * (_x))) | ||
2852 | #define IIO_NUM_PRTES 8 /* Total number of PRB table entries */ | ||
2853 | #define IIO_WIDPRTE_A(x) IIO_PRTE_A(((x) - 8)) /* widget ID to its PRTE num */ | ||
2854 | #define IIO_WIDPRTE_B(x) IIO_PRTE_B(((x) - 8)) /* widget ID to its PRTE num */ | ||
2855 | |||
2856 | #define IIO_NUM_IPRBS 9 | ||
2857 | |||
2858 | #define IIO_LLP_CSR_IS_UP 0x00002000 | ||
2859 | #define IIO_LLP_CSR_LLP_STAT_MASK 0x00003000 | ||
2860 | #define IIO_LLP_CSR_LLP_STAT_SHFT 12 | ||
2861 | |||
2862 | #define IIO_LLP_CB_MAX 0xffff /* in ILLR CB_CNT, Max Check Bit errors */ | ||
2863 | #define IIO_LLP_SN_MAX 0xffff /* in ILLR SN_CNT, Max Sequence Number errors */ | ||
2864 | |||
2865 | /* key to IIO_PROTECT_OVRRD */ | ||
2866 | #define IIO_PROTECT_OVRRD_KEY 0x53474972756c6573ull /* "SGIrules" */ | ||
2867 | |||
2868 | /* BTE register names */ | ||
2869 | #define IIO_BTE_STAT_0 IIO_IBLS_0 /* Also BTE length/status 0 */ | ||
2870 | #define IIO_BTE_SRC_0 IIO_IBSA_0 /* Also BTE source address 0 */ | ||
2871 | #define IIO_BTE_DEST_0 IIO_IBDA_0 /* Also BTE dest. address 0 */ | ||
2872 | #define IIO_BTE_CTRL_0 IIO_IBCT_0 /* Also BTE control/terminate 0 */ | ||
2873 | #define IIO_BTE_NOTIFY_0 IIO_IBNA_0 /* Also BTE notification 0 */ | ||
2874 | #define IIO_BTE_INT_0 IIO_IBIA_0 /* Also BTE interrupt 0 */ | ||
2875 | #define IIO_BTE_OFF_0 0 /* Base offset from BTE 0 regs. */ | ||
2876 | #define IIO_BTE_OFF_1 (IIO_IBLS_1 - IIO_IBLS_0) /* Offset from base to BTE 1 */ | ||
2877 | |||
2878 | /* BTE register offsets from base */ | ||
2879 | #define BTEOFF_STAT 0 | ||
2880 | #define BTEOFF_SRC (IIO_BTE_SRC_0 - IIO_BTE_STAT_0) | ||
2881 | #define BTEOFF_DEST (IIO_BTE_DEST_0 - IIO_BTE_STAT_0) | ||
2882 | #define BTEOFF_CTRL (IIO_BTE_CTRL_0 - IIO_BTE_STAT_0) | ||
2883 | #define BTEOFF_NOTIFY (IIO_BTE_NOTIFY_0 - IIO_BTE_STAT_0) | ||
2884 | #define BTEOFF_INT (IIO_BTE_INT_0 - IIO_BTE_STAT_0) | ||
2885 | |||
2886 | /* names used in shub diags */ | ||
2887 | #define IIO_BASE_BTE0 IIO_IBLS_0 | ||
2888 | #define IIO_BASE_BTE1 IIO_IBLS_1 | ||
2889 | |||
2890 | /* | ||
2891 | * Macro which takes the widget number, and returns the | ||
2892 | * IO PRB address of that widget. | ||
2893 | * value _x is expected to be a widget number in the range | ||
2894 | * 0, 8 - 0xF | ||
2895 | */ | ||
2896 | #define IIO_IOPRB(_x) (IIO_IOPRB_0 + ( ( (_x) < HUB_WIDGET_ID_MIN ? \ | ||
2897 | (_x) : \ | ||
2898 | (_x) - (HUB_WIDGET_ID_MIN-1)) << 3) ) | ||
2899 | |||
2900 | /* GFX Flow Control Node/Widget Register */ | ||
2901 | #define IIO_IGFX_W_NUM_BITS 4 /* size of widget num field */ | ||
2902 | #define IIO_IGFX_W_NUM_MASK ((1<<IIO_IGFX_W_NUM_BITS)-1) | ||
2903 | #define IIO_IGFX_W_NUM_SHIFT 0 | ||
2904 | #define IIO_IGFX_PI_NUM_BITS 1 /* size of PI num field */ | ||
2905 | #define IIO_IGFX_PI_NUM_MASK ((1<<IIO_IGFX_PI_NUM_BITS)-1) | ||
2906 | #define IIO_IGFX_PI_NUM_SHIFT 4 | ||
2907 | #define IIO_IGFX_N_NUM_BITS 8 /* size of node num field */ | ||
2908 | #define IIO_IGFX_N_NUM_MASK ((1<<IIO_IGFX_N_NUM_BITS)-1) | ||
2909 | #define IIO_IGFX_N_NUM_SHIFT 5 | ||
2910 | #define IIO_IGFX_P_NUM_BITS 1 /* size of processor num field */ | ||
2911 | #define IIO_IGFX_P_NUM_MASK ((1<<IIO_IGFX_P_NUM_BITS)-1) | ||
2912 | #define IIO_IGFX_P_NUM_SHIFT 16 | ||
2913 | #define IIO_IGFX_INIT(widget, pi, node, cpu) (\ | ||
2914 | (((widget) & IIO_IGFX_W_NUM_MASK) << IIO_IGFX_W_NUM_SHIFT) | \ | ||
2915 | (((pi) & IIO_IGFX_PI_NUM_MASK)<< IIO_IGFX_PI_NUM_SHIFT)| \ | ||
2916 | (((node) & IIO_IGFX_N_NUM_MASK) << IIO_IGFX_N_NUM_SHIFT) | \ | ||
2917 | (((cpu) & IIO_IGFX_P_NUM_MASK) << IIO_IGFX_P_NUM_SHIFT)) | ||
2918 | |||
2919 | /* Scratch registers (all bits available) */ | ||
2920 | #define IIO_SCRATCH_REG0 IIO_ISCR0 | ||
2921 | #define IIO_SCRATCH_REG1 IIO_ISCR1 | ||
2922 | #define IIO_SCRATCH_MASK 0xffffffffffffffffUL | ||
2923 | |||
2924 | #define IIO_SCRATCH_BIT0_0 0x0000000000000001UL | ||
2925 | #define IIO_SCRATCH_BIT0_1 0x0000000000000002UL | ||
2926 | #define IIO_SCRATCH_BIT0_2 0x0000000000000004UL | ||
2927 | #define IIO_SCRATCH_BIT0_3 0x0000000000000008UL | ||
2928 | #define IIO_SCRATCH_BIT0_4 0x0000000000000010UL | ||
2929 | #define IIO_SCRATCH_BIT0_5 0x0000000000000020UL | ||
2930 | #define IIO_SCRATCH_BIT0_6 0x0000000000000040UL | ||
2931 | #define IIO_SCRATCH_BIT0_7 0x0000000000000080UL | ||
2932 | #define IIO_SCRATCH_BIT0_8 0x0000000000000100UL | ||
2933 | #define IIO_SCRATCH_BIT0_9 0x0000000000000200UL | ||
2934 | #define IIO_SCRATCH_BIT0_A 0x0000000000000400UL | ||
2935 | |||
2936 | #define IIO_SCRATCH_BIT1_0 0x0000000000000001UL | ||
2937 | #define IIO_SCRATCH_BIT1_1 0x0000000000000002UL | ||
2938 | /* IO Translation Table Entries */ | ||
2939 | #define IIO_NUM_ITTES 7 /* ITTEs numbered 0..6 */ | ||
2940 | /* Hw manuals number them 1..7! */ | ||
2941 | /* | ||
2942 | * IIO_IMEM Register fields. | ||
2943 | */ | ||
2944 | #define IIO_IMEM_W0ESD 0x1UL /* Widget 0 shut down due to error */ | ||
2945 | #define IIO_IMEM_B0ESD (1UL << 4) /* BTE 0 shut down due to error */ | ||
2946 | #define IIO_IMEM_B1ESD (1UL << 8) /* BTE 1 Shut down due to error */ | ||
2947 | |||
2948 | /* | ||
2949 | * As a permanent workaround for a bug in the PI side of the shub, we've | ||
2950 | * redefined big window 7 as small window 0. | ||
2951 | XXX does this still apply for SN1?? | ||
2952 | */ | ||
2953 | #define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1) | ||
2954 | |||
2955 | /* | ||
2956 | * Use the top big window as a surrogate for the first small window | ||
2957 | */ | ||
2958 | #define SWIN0_BIGWIN HUB_NUM_BIG_WINDOW | ||
2959 | |||
2960 | #define ILCSR_WARM_RESET 0x100 | ||
2961 | |||
2962 | /* | ||
2963 | * CRB manipulation macros | ||
2964 | * The CRB macros are slightly complicated, since there are up to | ||
2965 | * four registers associated with each CRB entry. | ||
2966 | */ | ||
2967 | #define IIO_NUM_CRBS 15 /* Number of CRBs */ | ||
2968 | #define IIO_NUM_PC_CRBS 4 /* Number of partial cache CRBs */ | ||
2969 | #define IIO_ICRB_OFFSET 8 | ||
2970 | #define IIO_ICRB_0 IIO_ICRB0_A | ||
2971 | #define IIO_ICRB_ADDR_SHFT 2 /* Shift to get proper address */ | ||
2972 | /* XXX - This is now tuneable: | ||
2973 | #define IIO_FIRST_PC_ENTRY 12 | ||
2974 | */ | ||
2975 | |||
2976 | #define IIO_ICRB_A(_x) ((u64)(IIO_ICRB_0 + (6 * IIO_ICRB_OFFSET * (_x)))) | ||
2977 | #define IIO_ICRB_B(_x) ((u64)((char *)IIO_ICRB_A(_x) + 1*IIO_ICRB_OFFSET)) | ||
2978 | #define IIO_ICRB_C(_x) ((u64)((char *)IIO_ICRB_A(_x) + 2*IIO_ICRB_OFFSET)) | ||
2979 | #define IIO_ICRB_D(_x) ((u64)((char *)IIO_ICRB_A(_x) + 3*IIO_ICRB_OFFSET)) | ||
2980 | #define IIO_ICRB_E(_x) ((u64)((char *)IIO_ICRB_A(_x) + 4*IIO_ICRB_OFFSET)) | ||
2981 | |||
2982 | #define TNUM_TO_WIDGET_DEV(_tnum) (_tnum & 0x7) | ||
2983 | |||
2984 | /* | ||
2985 | * values for "ecode" field | ||
2986 | */ | ||
2987 | #define IIO_ICRB_ECODE_DERR 0 /* Directory error due to IIO access */ | ||
2988 | #define IIO_ICRB_ECODE_PERR 1 /* Poison error on IO access */ | ||
2989 | #define IIO_ICRB_ECODE_WERR 2 /* Write error by IIO access | ||
2990 | * e.g. WINV to a Read only line. */ | ||
2991 | #define IIO_ICRB_ECODE_AERR 3 /* Access error caused by IIO access */ | ||
2992 | #define IIO_ICRB_ECODE_PWERR 4 /* Error on partial write */ | ||
2993 | #define IIO_ICRB_ECODE_PRERR 5 /* Error on partial read */ | ||
2994 | #define IIO_ICRB_ECODE_TOUT 6 /* CRB timeout before deallocating */ | ||
2995 | #define IIO_ICRB_ECODE_XTERR 7 /* Incoming xtalk pkt had error bit */ | ||
2996 | |||
2997 | /* | ||
2998 | * Values for field imsgtype | ||
2999 | */ | ||
3000 | #define IIO_ICRB_IMSGT_XTALK 0 /* Incoming message from Xtalk */ | ||
3001 | #define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */ | ||
3002 | #define IIO_ICRB_IMSGT_SN1NET 2 /* Incoming message from SN1 net */ | ||
3003 | #define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */ | ||
3004 | |||
3005 | /* | ||
3006 | * values for field initiator. | ||
3007 | */ | ||
3008 | #define IIO_ICRB_INIT_XTALK 0 /* Message originated in xtalk */ | ||
3009 | #define IIO_ICRB_INIT_BTE0 0x1 /* Message originated in BTE 0 */ | ||
3010 | #define IIO_ICRB_INIT_SN1NET 0x2 /* Message originated in SN1net */ | ||
3011 | #define IIO_ICRB_INIT_CRB 0x3 /* Message originated in CRB ? */ | ||
3012 | #define IIO_ICRB_INIT_BTE1 0x5 /* MEssage originated in BTE 1 */ | ||
3013 | |||
3014 | /* | ||
3015 | * Number of credits Hub widget has while sending req/response to | ||
3016 | * xbow. | ||
3017 | * Value of 3 is required by Xbow 1.1 | ||
3018 | * We may be able to increase this to 4 with Xbow 1.2. | ||
3019 | */ | ||
3020 | #define HUBII_XBOW_CREDIT 3 | ||
3021 | #define HUBII_XBOW_REV2_CREDIT 4 | ||
3022 | |||
3023 | /* | ||
3024 | * Number of credits that xtalk devices should use when communicating | ||
3025 | * with a SHub (depth of SHub's queue). | ||
3026 | */ | ||
3027 | #define HUB_CREDIT 4 | ||
3028 | |||
3029 | /* | ||
3030 | * Some IIO_PRB fields | ||
3031 | */ | ||
3032 | #define IIO_PRB_MULTI_ERR (1LL << 63) | ||
3033 | #define IIO_PRB_SPUR_RD (1LL << 51) | ||
3034 | #define IIO_PRB_SPUR_WR (1LL << 50) | ||
3035 | #define IIO_PRB_RD_TO (1LL << 49) | ||
3036 | #define IIO_PRB_ERROR (1LL << 48) | ||
3037 | |||
3038 | /************************************************************************* | ||
3039 | |||
3040 | Some of the IIO field masks and shifts are defined here. | ||
3041 | This is in order to maintain compatibility in SN0 and SN1 code | ||
3042 | |||
3043 | **************************************************************************/ | ||
3044 | |||
3045 | /* | ||
3046 | * ICMR register fields | ||
3047 | * (Note: the IIO_ICMR_P_CNT and IIO_ICMR_PC_VLD from Hub are not | ||
3048 | * present in SHub) | ||
3049 | */ | ||
3050 | |||
3051 | #define IIO_ICMR_CRB_VLD_SHFT 20 | ||
3052 | #define IIO_ICMR_CRB_VLD_MASK (0x7fffUL << IIO_ICMR_CRB_VLD_SHFT) | ||
3053 | |||
3054 | #define IIO_ICMR_FC_CNT_SHFT 16 | ||
3055 | #define IIO_ICMR_FC_CNT_MASK (0xf << IIO_ICMR_FC_CNT_SHFT) | ||
3056 | |||
3057 | #define IIO_ICMR_C_CNT_SHFT 4 | ||
3058 | #define IIO_ICMR_C_CNT_MASK (0xf << IIO_ICMR_C_CNT_SHFT) | ||
3059 | |||
3060 | #define IIO_ICMR_PRECISE (1UL << 52) | ||
3061 | #define IIO_ICMR_CLR_RPPD (1UL << 13) | ||
3062 | #define IIO_ICMR_CLR_RQPD (1UL << 12) | ||
3063 | |||
3064 | /* | ||
3065 | * IIO PIO Deallocation register field masks : (IIO_IPDR) | ||
3066 | XXX present but not needed in bedrock? See the manual. | ||
3067 | */ | ||
3068 | #define IIO_IPDR_PND (1 << 4) | ||
3069 | |||
3070 | /* | ||
3071 | * IIO CRB deallocation register field masks: (IIO_ICDR) | ||
3072 | */ | ||
3073 | #define IIO_ICDR_PND (1 << 4) | ||
3074 | |||
3075 | /* | ||
3076 | * IO BTE Length/Status (IIO_IBLS) register bit field definitions | ||
3077 | */ | ||
3078 | #define IBLS_BUSY (0x1UL << 20) | ||
3079 | #define IBLS_ERROR_SHFT 16 | ||
3080 | #define IBLS_ERROR (0x1UL << IBLS_ERROR_SHFT) | ||
3081 | #define IBLS_LENGTH_MASK 0xffff | ||
3082 | |||
3083 | /* | ||
3084 | * IO BTE Control/Terminate register (IBCT) register bit field definitions | ||
3085 | */ | ||
3086 | #define IBCT_POISON (0x1UL << 8) | ||
3087 | #define IBCT_NOTIFY (0x1UL << 4) | ||
3088 | #define IBCT_ZFIL_MODE (0x1UL << 0) | ||
3089 | |||
3090 | /* | ||
3091 | * IIO Incoming Error Packet Header (IIO_IIEPH1/IIO_IIEPH2) | ||
3092 | */ | ||
3093 | #define IIEPH1_VALID (1UL << 44) | ||
3094 | #define IIEPH1_OVERRUN (1UL << 40) | ||
3095 | #define IIEPH1_ERR_TYPE_SHFT 32 | ||
3096 | #define IIEPH1_ERR_TYPE_MASK 0xf | ||
3097 | #define IIEPH1_SOURCE_SHFT 20 | ||
3098 | #define IIEPH1_SOURCE_MASK 11 | ||
3099 | #define IIEPH1_SUPPL_SHFT 8 | ||
3100 | #define IIEPH1_SUPPL_MASK 11 | ||
3101 | #define IIEPH1_CMD_SHFT 0 | ||
3102 | #define IIEPH1_CMD_MASK 7 | ||
3103 | |||
3104 | #define IIEPH2_TAIL (1UL << 40) | ||
3105 | #define IIEPH2_ADDRESS_SHFT 0 | ||
3106 | #define IIEPH2_ADDRESS_MASK 38 | ||
3107 | |||
3108 | #define IIEPH1_ERR_SHORT_REQ 2 | ||
3109 | #define IIEPH1_ERR_SHORT_REPLY 3 | ||
3110 | #define IIEPH1_ERR_LONG_REQ 4 | ||
3111 | #define IIEPH1_ERR_LONG_REPLY 5 | ||
3112 | |||
3113 | /* | ||
3114 | * IO Error Clear register bit field definitions | ||
3115 | */ | ||
3116 | #define IECLR_PI1_FWD_INT (1UL << 31) /* clear PI1_FORWARD_INT in iidsr */ | ||
3117 | #define IECLR_PI0_FWD_INT (1UL << 30) /* clear PI0_FORWARD_INT in iidsr */ | ||
3118 | #define IECLR_SPUR_RD_HDR (1UL << 29) /* clear valid bit in ixss reg */ | ||
3119 | #define IECLR_BTE1 (1UL << 18) /* clear bte error 1 */ | ||
3120 | #define IECLR_BTE0 (1UL << 17) /* clear bte error 0 */ | ||
3121 | #define IECLR_CRAZY (1UL << 16) /* clear crazy bit in wstat reg */ | ||
3122 | #define IECLR_PRB_F (1UL << 15) /* clear err bit in PRB_F reg */ | ||
3123 | #define IECLR_PRB_E (1UL << 14) /* clear err bit in PRB_E reg */ | ||
3124 | #define IECLR_PRB_D (1UL << 13) /* clear err bit in PRB_D reg */ | ||
3125 | #define IECLR_PRB_C (1UL << 12) /* clear err bit in PRB_C reg */ | ||
3126 | #define IECLR_PRB_B (1UL << 11) /* clear err bit in PRB_B reg */ | ||
3127 | #define IECLR_PRB_A (1UL << 10) /* clear err bit in PRB_A reg */ | ||
3128 | #define IECLR_PRB_9 (1UL << 9) /* clear err bit in PRB_9 reg */ | ||
3129 | #define IECLR_PRB_8 (1UL << 8) /* clear err bit in PRB_8 reg */ | ||
3130 | #define IECLR_PRB_0 (1UL << 0) /* clear err bit in PRB_0 reg */ | ||
3131 | |||
3132 | /* | ||
3133 | * IIO CRB control register Fields: IIO_ICCR | ||
3134 | */ | ||
3135 | #define IIO_ICCR_PENDING 0x10000 | ||
3136 | #define IIO_ICCR_CMD_MASK 0xFF | ||
3137 | #define IIO_ICCR_CMD_SHFT 7 | ||
3138 | #define IIO_ICCR_CMD_NOP 0x0 /* No Op */ | ||
3139 | #define IIO_ICCR_CMD_WAKE 0x100 /* Reactivate CRB entry and process */ | ||
3140 | #define IIO_ICCR_CMD_TIMEOUT 0x200 /* Make CRB timeout & mark invalid */ | ||
3141 | #define IIO_ICCR_CMD_EJECT 0x400 /* Contents of entry written to memory | ||
3142 | * via a WB | ||
3143 | */ | ||
3144 | #define IIO_ICCR_CMD_FLUSH 0x800 | ||
3145 | |||
3146 | /* | ||
3147 | * | ||
3148 | * CRB Register description. | ||
3149 | * | ||
3150 | * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING | ||
3151 | * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING | ||
3152 | * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING | ||
3153 | * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING | ||
3154 | * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING | ||
3155 | * | ||
3156 | * Many of the fields in CRB are status bits used by hardware | ||
3157 | * for implementation of the protocol. It's very dangerous to | ||
3158 | * mess around with the CRB registers. | ||
3159 | * | ||
3160 | * It's OK to read the CRB registers and try to make sense out of the | ||
3161 | * fields in CRB. | ||
3162 | * | ||
3163 | * Updating CRB requires all activities in Hub IIO to be quiesced. | ||
3164 | * otherwise, a write to CRB could corrupt other CRB entries. | ||
3165 | * CRBs are here only as a back door peek to shub IIO's status. | ||
3166 | * Quiescing implies no dmas no PIOs | ||
3167 | * either directly from the cpu or from sn0net. | ||
3168 | * this is not something that can be done easily. So, AVOID updating | ||
3169 | * CRBs. | ||
3170 | */ | ||
3171 | |||
3172 | /* | ||
3173 | * Easy access macros for CRBs, all 5 registers (A-E) | ||
3174 | */ | ||
3175 | typedef ii_icrb0_a_u_t icrba_t; | ||
3176 | #define a_sidn ii_icrb0_a_fld_s.ia_sidn | ||
3177 | #define a_tnum ii_icrb0_a_fld_s.ia_tnum | ||
3178 | #define a_addr ii_icrb0_a_fld_s.ia_addr | ||
3179 | #define a_valid ii_icrb0_a_fld_s.ia_vld | ||
3180 | #define a_iow ii_icrb0_a_fld_s.ia_iow | ||
3181 | #define a_regvalue ii_icrb0_a_regval | ||
3182 | |||
3183 | typedef ii_icrb0_b_u_t icrbb_t; | ||
3184 | #define b_use_old ii_icrb0_b_fld_s.ib_use_old | ||
3185 | #define b_imsgtype ii_icrb0_b_fld_s.ib_imsgtype | ||
3186 | #define b_imsg ii_icrb0_b_fld_s.ib_imsg | ||
3187 | #define b_initiator ii_icrb0_b_fld_s.ib_init | ||
3188 | #define b_exc ii_icrb0_b_fld_s.ib_exc | ||
3189 | #define b_ackcnt ii_icrb0_b_fld_s.ib_ack_cnt | ||
3190 | #define b_resp ii_icrb0_b_fld_s.ib_resp | ||
3191 | #define b_ack ii_icrb0_b_fld_s.ib_ack | ||
3192 | #define b_hold ii_icrb0_b_fld_s.ib_hold | ||
3193 | #define b_wb ii_icrb0_b_fld_s.ib_wb | ||
3194 | #define b_intvn ii_icrb0_b_fld_s.ib_intvn | ||
3195 | #define b_stall_ib ii_icrb0_b_fld_s.ib_stall_ib | ||
3196 | #define b_stall_int ii_icrb0_b_fld_s.ib_stall__intr | ||
3197 | #define b_stall_bte_0 ii_icrb0_b_fld_s.ib_stall__bte_0 | ||
3198 | #define b_stall_bte_1 ii_icrb0_b_fld_s.ib_stall__bte_1 | ||
3199 | #define b_error ii_icrb0_b_fld_s.ib_error | ||
3200 | #define b_ecode ii_icrb0_b_fld_s.ib_errcode | ||
3201 | #define b_lnetuce ii_icrb0_b_fld_s.ib_ln_uce | ||
3202 | #define b_mark ii_icrb0_b_fld_s.ib_mark | ||
3203 | #define b_xerr ii_icrb0_b_fld_s.ib_xt_err | ||
3204 | #define b_regvalue ii_icrb0_b_regval | ||
3205 | |||
3206 | typedef ii_icrb0_c_u_t icrbc_t; | ||
3207 | #define c_suppl ii_icrb0_c_fld_s.ic_suppl | ||
3208 | #define c_barrop ii_icrb0_c_fld_s.ic_bo | ||
3209 | #define c_doresp ii_icrb0_c_fld_s.ic_resprqd | ||
3210 | #define c_gbr ii_icrb0_c_fld_s.ic_gbr | ||
3211 | #define c_btenum ii_icrb0_c_fld_s.ic_bte_num | ||
3212 | #define c_cohtrans ii_icrb0_c_fld_s.ic_ct | ||
3213 | #define c_xtsize ii_icrb0_c_fld_s.ic_size | ||
3214 | #define c_source ii_icrb0_c_fld_s.ic_source | ||
3215 | #define c_regvalue ii_icrb0_c_regval | ||
3216 | |||
3217 | typedef ii_icrb0_d_u_t icrbd_t; | ||
3218 | #define d_sleep ii_icrb0_d_fld_s.id_sleep | ||
3219 | #define d_pricnt ii_icrb0_d_fld_s.id_pr_cnt | ||
3220 | #define d_pripsc ii_icrb0_d_fld_s.id_pr_psc | ||
3221 | #define d_bteop ii_icrb0_d_fld_s.id_bte_op | ||
3222 | #define d_bteaddr ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names */ | ||
3223 | #define d_benable ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names */ | ||
3224 | #define d_regvalue ii_icrb0_d_regval | ||
3225 | |||
3226 | typedef ii_icrb0_e_u_t icrbe_t; | ||
3227 | #define icrbe_ctxtvld ii_icrb0_e_fld_s.ie_cvld | ||
3228 | #define icrbe_toutvld ii_icrb0_e_fld_s.ie_tvld | ||
3229 | #define icrbe_context ii_icrb0_e_fld_s.ie_context | ||
3230 | #define icrbe_timeout ii_icrb0_e_fld_s.ie_timeout | ||
3231 | #define e_regvalue ii_icrb0_e_regval | ||
3232 | |||
3233 | /* Number of widgets supported by shub */ | ||
3234 | #define HUB_NUM_WIDGET 9 | ||
3235 | #define HUB_WIDGET_ID_MIN 0x8 | ||
3236 | #define HUB_WIDGET_ID_MAX 0xf | ||
3237 | |||
3238 | #define HUB_WIDGET_PART_NUM 0xc120 | ||
3239 | #define MAX_HUBS_PER_XBOW 2 | ||
3240 | |||
3241 | /* A few more #defines for backwards compatibility */ | ||
3242 | #define iprb_t ii_iprb0_u_t | ||
3243 | #define iprb_regval ii_iprb0_regval | ||
3244 | #define iprb_mult_err ii_iprb0_fld_s.i_mult_err | ||
3245 | #define iprb_spur_rd ii_iprb0_fld_s.i_spur_rd | ||
3246 | #define iprb_spur_wr ii_iprb0_fld_s.i_spur_wr | ||
3247 | #define iprb_rd_to ii_iprb0_fld_s.i_rd_to | ||
3248 | #define iprb_ovflow ii_iprb0_fld_s.i_of_cnt | ||
3249 | #define iprb_error ii_iprb0_fld_s.i_error | ||
3250 | #define iprb_ff ii_iprb0_fld_s.i_f | ||
3251 | #define iprb_mode ii_iprb0_fld_s.i_m | ||
3252 | #define iprb_bnakctr ii_iprb0_fld_s.i_nb | ||
3253 | #define iprb_anakctr ii_iprb0_fld_s.i_na | ||
3254 | #define iprb_xtalkctr ii_iprb0_fld_s.i_c | ||
3255 | |||
3256 | #define LNK_STAT_WORKING 0x2 /* LLP is working */ | ||
3257 | |||
3258 | #define IIO_WSTAT_ECRAZY (1ULL << 32) /* Hub gone crazy */ | ||
3259 | #define IIO_WSTAT_TXRETRY (1ULL << 9) /* Hub Tx Retry timeout */ | ||
3260 | #define IIO_WSTAT_TXRETRY_MASK 0x7F /* should be 0xFF?? */ | ||
3261 | #define IIO_WSTAT_TXRETRY_SHFT 16 | ||
3262 | #define IIO_WSTAT_TXRETRY_CNT(w) (((w) >> IIO_WSTAT_TXRETRY_SHFT) & \ | ||
3263 | IIO_WSTAT_TXRETRY_MASK) | ||
3264 | |||
3265 | /* Number of II perf. counters we can multiplex at once */ | ||
3266 | |||
3267 | #define IO_PERF_SETS 32 | ||
3268 | |||
3269 | /* Bit for the widget in inbound access register */ | ||
3270 | #define IIO_IIWA_WIDGET(_w) ((u64)(1ULL << _w)) | ||
3271 | /* Bit for the widget in outbound access register */ | ||
3272 | #define IIO_IOWA_WIDGET(_w) ((u64)(1ULL << _w)) | ||
3273 | |||
3274 | /* NOTE: The following define assumes that we are going to get | ||
3275 | * widget numbers from 8 thru F and the device numbers within | ||
3276 | * widget from 0 thru 7. | ||
3277 | */ | ||
3278 | #define IIO_IIDEM_WIDGETDEV_MASK(w, d) ((u64)(1ULL << (8 * ((w) - 8) + (d)))) | ||
3279 | |||
3280 | /* IO Interrupt Destination Register */ | ||
3281 | #define IIO_IIDSR_SENT_SHIFT 28 | ||
3282 | #define IIO_IIDSR_SENT_MASK 0x30000000 | ||
3283 | #define IIO_IIDSR_ENB_SHIFT 24 | ||
3284 | #define IIO_IIDSR_ENB_MASK 0x01000000 | ||
3285 | #define IIO_IIDSR_NODE_SHIFT 9 | ||
3286 | #define IIO_IIDSR_NODE_MASK 0x000ff700 | ||
3287 | #define IIO_IIDSR_PI_ID_SHIFT 8 | ||
3288 | #define IIO_IIDSR_PI_ID_MASK 0x00000100 | ||
3289 | #define IIO_IIDSR_LVL_SHIFT 0 | ||
3290 | #define IIO_IIDSR_LVL_MASK 0x000000ff | ||
3291 | |||
3292 | /* Xtalk timeout threshold register (IIO_IXTT) */ | ||
3293 | #define IXTT_RRSP_TO_SHFT 55 /* read response timeout */ | ||
3294 | #define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT) | ||
3295 | #define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */ | ||
3296 | #define IXTT_RRSP_PS_MASK (0x7FFFFFULL << IXTT_RRSP_PS_SHFT) | ||
3297 | #define IXTT_TAIL_TO_SHFT 0 /* tail timeout counter threshold */ | ||
3298 | #define IXTT_TAIL_TO_MASK (0x3FFFFFFULL << IXTT_TAIL_TO_SHFT) | ||
3299 | |||
3300 | /* | ||
3301 | * The IO LLP control status register and widget control register | ||
3302 | */ | ||
3303 | |||
3304 | typedef union hubii_wcr_u { | ||
3305 | u64 wcr_reg_value; | ||
3306 | struct { | ||
3307 | u64 wcr_widget_id:4, /* LLP crossbar credit */ | ||
3308 | wcr_tag_mode:1, /* Tag mode */ | ||
3309 | wcr_rsvd1:8, /* Reserved */ | ||
3310 | wcr_xbar_crd:3, /* LLP crossbar credit */ | ||
3311 | wcr_f_bad_pkt:1, /* Force bad llp pkt enable */ | ||
3312 | wcr_dir_con:1, /* widget direct connect */ | ||
3313 | wcr_e_thresh:5, /* elasticity threshold */ | ||
3314 | wcr_rsvd:41; /* unused */ | ||
3315 | } wcr_fields_s; | ||
3316 | } hubii_wcr_t; | ||
3317 | |||
3318 | #define iwcr_dir_con wcr_fields_s.wcr_dir_con | ||
3319 | |||
3320 | /* The structures below are defined to extract and modify the ii | ||
3321 | performance registers */ | ||
3322 | |||
3323 | /* io_perf_sel allows the caller to specify what tests will be | ||
3324 | performed */ | ||
3325 | |||
3326 | typedef union io_perf_sel { | ||
3327 | u64 perf_sel_reg; | ||
3328 | struct { | ||
3329 | u64 perf_ippr0:4, perf_ippr1:4, perf_icct:8, perf_rsvd:48; | ||
3330 | } perf_sel_bits; | ||
3331 | } io_perf_sel_t; | ||
3332 | |||
3333 | /* io_perf_cnt is to extract the count from the shub registers. Due to | ||
3334 | hardware problems there is only one counter, not two. */ | ||
3335 | |||
3336 | typedef union io_perf_cnt { | ||
3337 | u64 perf_cnt; | ||
3338 | struct { | ||
3339 | u64 perf_cnt:20, perf_rsvd2:12, perf_rsvd1:32; | ||
3340 | } perf_cnt_bits; | ||
3341 | |||
3342 | } io_perf_cnt_t; | ||
3343 | |||
3344 | typedef union iprte_a { | ||
3345 | u64 entry; | ||
3346 | struct { | ||
3347 | u64 i_rsvd_1:3; | ||
3348 | u64 i_addr:38; | ||
3349 | u64 i_init:3; | ||
3350 | u64 i_source:8; | ||
3351 | u64 i_rsvd:2; | ||
3352 | u64 i_widget:4; | ||
3353 | u64 i_to_cnt:5; | ||
3354 | u64 i_vld:1; | ||
3355 | } iprte_fields; | ||
3356 | } iprte_a_t; | ||
3357 | |||
3358 | #endif /* _ASM_IA64_SN_SHUBIO_H */ | ||
diff --git a/arch/ia64/include/asm/sn/simulator.h b/arch/ia64/include/asm/sn/simulator.h index c2611f6cfe33..3e4557df3b7c 100644 --- a/arch/ia64/include/asm/sn/simulator.h +++ b/arch/ia64/include/asm/sn/simulator.h | |||
@@ -8,7 +8,7 @@ | |||
8 | #ifndef _ASM_IA64_SN_SIMULATOR_H | 8 | #ifndef _ASM_IA64_SN_SIMULATOR_H |
9 | #define _ASM_IA64_SN_SIMULATOR_H | 9 | #define _ASM_IA64_SN_SIMULATOR_H |
10 | 10 | ||
11 | #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_SGI_UV) | 11 | #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_UV) |
12 | #define SNMAGIC 0xaeeeeeee8badbeefL | 12 | #define SNMAGIC 0xaeeeeeee8badbeefL |
13 | #define IS_MEDUSA() ({long sn; asm("mov %0=cpuid[%1]" : "=r"(sn) : "r"(2)); sn == SNMAGIC;}) | 13 | #define IS_MEDUSA() ({long sn; asm("mov %0=cpuid[%1]" : "=r"(sn) : "r"(2)); sn == SNMAGIC;}) |
14 | 14 | ||
diff --git a/arch/ia64/include/asm/sn/sn2/sn_hwperf.h b/arch/ia64/include/asm/sn/sn2/sn_hwperf.h deleted file mode 100644 index e61ebac38cdd..000000000000 --- a/arch/ia64/include/asm/sn/sn2/sn_hwperf.h +++ /dev/null | |||
@@ -1,242 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved. | ||
7 | * | ||
8 | * Data types used by the SN_SAL_HWPERF_OP SAL call for monitoring | ||
9 | * SGI Altix node and router hardware | ||
10 | * | ||
11 | * Mark Goodwin <markgw@sgi.com> Mon Aug 30 12:23:46 EST 2004 | ||
12 | */ | ||
13 | |||
14 | #ifndef SN_HWPERF_H | ||
15 | #define SN_HWPERF_H | ||
16 | |||
17 | /* | ||
18 | * object structure. SN_HWPERF_ENUM_OBJECTS and SN_HWPERF_GET_CPU_INFO | ||
19 | * return an array of these. Do not change this without also | ||
20 | * changing the corresponding SAL code. | ||
21 | */ | ||
22 | #define SN_HWPERF_MAXSTRING 128 | ||
23 | struct sn_hwperf_object_info { | ||
24 | u32 id; | ||
25 | union { | ||
26 | struct { | ||
27 | u64 this_part:1; | ||
28 | u64 is_shared:1; | ||
29 | } fields; | ||
30 | struct { | ||
31 | u64 flags; | ||
32 | u64 reserved; | ||
33 | } b; | ||
34 | } f; | ||
35 | char name[SN_HWPERF_MAXSTRING]; | ||
36 | char location[SN_HWPERF_MAXSTRING]; | ||
37 | u32 ports; | ||
38 | }; | ||
39 | |||
40 | #define sn_hwp_this_part f.fields.this_part | ||
41 | #define sn_hwp_is_shared f.fields.is_shared | ||
42 | #define sn_hwp_flags f.b.flags | ||
43 | |||
44 | /* macros for object classification */ | ||
45 | #define SN_HWPERF_IS_NODE(x) ((x) && strstr((x)->name, "SHub")) | ||
46 | #define SN_HWPERF_IS_NODE_SHUB2(x) ((x) && strstr((x)->name, "SHub 2.")) | ||
47 | #define SN_HWPERF_IS_IONODE(x) ((x) && strstr((x)->name, "TIO")) | ||
48 | #define SN_HWPERF_IS_NL3ROUTER(x) ((x) && strstr((x)->name, "NL3Router")) | ||
49 | #define SN_HWPERF_IS_NL4ROUTER(x) ((x) && strstr((x)->name, "NL4Router")) | ||
50 | #define SN_HWPERF_IS_OLDROUTER(x) ((x) && strstr((x)->name, "Router")) | ||
51 | #define SN_HWPERF_IS_ROUTER(x) (SN_HWPERF_IS_NL3ROUTER(x) || \ | ||
52 | SN_HWPERF_IS_NL4ROUTER(x) || \ | ||
53 | SN_HWPERF_IS_OLDROUTER(x)) | ||
54 | #define SN_HWPERF_FOREIGN(x) ((x) && !(x)->sn_hwp_this_part && !(x)->sn_hwp_is_shared) | ||
55 | #define SN_HWPERF_SAME_OBJTYPE(x,y) ((SN_HWPERF_IS_NODE(x) && SN_HWPERF_IS_NODE(y)) ||\ | ||
56 | (SN_HWPERF_IS_IONODE(x) && SN_HWPERF_IS_IONODE(y)) ||\ | ||
57 | (SN_HWPERF_IS_ROUTER(x) && SN_HWPERF_IS_ROUTER(y))) | ||
58 | |||
59 | /* numa port structure, SN_HWPERF_ENUM_PORTS returns an array of these */ | ||
60 | struct sn_hwperf_port_info { | ||
61 | u32 port; | ||
62 | u32 conn_id; | ||
63 | u32 conn_port; | ||
64 | }; | ||
65 | |||
66 | /* for HWPERF_{GET,SET}_MMRS */ | ||
67 | struct sn_hwperf_data { | ||
68 | u64 addr; | ||
69 | u64 data; | ||
70 | }; | ||
71 | |||
72 | /* user ioctl() argument, see below */ | ||
73 | struct sn_hwperf_ioctl_args { | ||
74 | u64 arg; /* argument, usually an object id */ | ||
75 | u64 sz; /* size of transfer */ | ||
76 | void *ptr; /* pointer to source/target */ | ||
77 | u32 v0; /* second return value */ | ||
78 | }; | ||
79 | |||
80 | /* | ||
81 | * For SN_HWPERF_{GET,SET}_MMRS and SN_HWPERF_OBJECT_DISTANCE, | ||
82 | * sn_hwperf_ioctl_args.arg can be used to specify a CPU on which | ||
83 | * to call SAL, and whether to use an interprocessor interrupt | ||
84 | * or task migration in order to do so. If the CPU specified is | ||
85 | * SN_HWPERF_ARG_ANY_CPU, then the current CPU will be used. | ||
86 | */ | ||
87 | #define SN_HWPERF_ARG_ANY_CPU 0x7fffffffUL | ||
88 | #define SN_HWPERF_ARG_CPU_MASK 0x7fffffff00000000ULL | ||
89 | #define SN_HWPERF_ARG_USE_IPI_MASK 0x8000000000000000ULL | ||
90 | #define SN_HWPERF_ARG_OBJID_MASK 0x00000000ffffffffULL | ||
91 | |||
92 | /* | ||
93 | * ioctl requests on the "sn_hwperf" misc device that call SAL. | ||
94 | */ | ||
95 | #define SN_HWPERF_OP_MEM_COPYIN 0x1000 | ||
96 | #define SN_HWPERF_OP_MEM_COPYOUT 0x2000 | ||
97 | #define SN_HWPERF_OP_MASK 0x0fff | ||
98 | |||
99 | /* | ||
100 | * Determine mem requirement. | ||
101 | * arg don't care | ||
102 | * sz 8 | ||
103 | * p pointer to u64 integer | ||
104 | */ | ||
105 | #define SN_HWPERF_GET_HEAPSIZE 1 | ||
106 | |||
107 | /* | ||
108 | * Install mem for SAL drvr | ||
109 | * arg don't care | ||
110 | * sz sizeof buffer pointed to by p | ||
111 | * p pointer to buffer for scratch area | ||
112 | */ | ||
113 | #define SN_HWPERF_INSTALL_HEAP 2 | ||
114 | |||
115 | /* | ||
116 | * Determine number of objects | ||
117 | * arg don't care | ||
118 | * sz 8 | ||
119 | * p pointer to u64 integer | ||
120 | */ | ||
121 | #define SN_HWPERF_OBJECT_COUNT (10|SN_HWPERF_OP_MEM_COPYOUT) | ||
122 | |||
123 | /* | ||
124 | * Determine object "distance", relative to a cpu. This operation can | ||
125 | * execute on a designated logical cpu number, using either an IPI or | ||
126 | * via task migration. If the cpu number is SN_HWPERF_ANY_CPU, then | ||
127 | * the current CPU is used. See the SN_HWPERF_ARG_* macros above. | ||
128 | * | ||
129 | * arg bitmap of IPI flag, cpu number and object id | ||
130 | * sz 8 | ||
131 | * p pointer to u64 integer | ||
132 | */ | ||
133 | #define SN_HWPERF_OBJECT_DISTANCE (11|SN_HWPERF_OP_MEM_COPYOUT) | ||
134 | |||
135 | /* | ||
136 | * Enumerate objects. Special case if sz == 8, returns the required | ||
137 | * buffer size. | ||
138 | * arg don't care | ||
139 | * sz sizeof buffer pointed to by p | ||
140 | * p pointer to array of struct sn_hwperf_object_info | ||
141 | */ | ||
142 | #define SN_HWPERF_ENUM_OBJECTS (12|SN_HWPERF_OP_MEM_COPYOUT) | ||
143 | |||
144 | /* | ||
145 | * Enumerate NumaLink ports for an object. Special case if sz == 8, | ||
146 | * returns the required buffer size. | ||
147 | * arg object id | ||
148 | * sz sizeof buffer pointed to by p | ||
149 | * p pointer to array of struct sn_hwperf_port_info | ||
150 | */ | ||
151 | #define SN_HWPERF_ENUM_PORTS (13|SN_HWPERF_OP_MEM_COPYOUT) | ||
152 | |||
153 | /* | ||
154 | * SET/GET memory mapped registers. These operations can execute | ||
155 | * on a designated logical cpu number, using either an IPI or via | ||
156 | * task migration. If the cpu number is SN_HWPERF_ANY_CPU, then | ||
157 | * the current CPU is used. See the SN_HWPERF_ARG_* macros above. | ||
158 | * | ||
159 | * arg bitmap of ipi flag, cpu number and object id | ||
160 | * sz sizeof buffer pointed to by p | ||
161 | * p pointer to array of struct sn_hwperf_data | ||
162 | */ | ||
163 | #define SN_HWPERF_SET_MMRS (14|SN_HWPERF_OP_MEM_COPYIN) | ||
164 | #define SN_HWPERF_GET_MMRS (15|SN_HWPERF_OP_MEM_COPYOUT| \ | ||
165 | SN_HWPERF_OP_MEM_COPYIN) | ||
166 | /* | ||
167 | * Lock a shared object | ||
168 | * arg object id | ||
169 | * sz don't care | ||
170 | * p don't care | ||
171 | */ | ||
172 | #define SN_HWPERF_ACQUIRE 16 | ||
173 | |||
174 | /* | ||
175 | * Unlock a shared object | ||
176 | * arg object id | ||
177 | * sz don't care | ||
178 | * p don't care | ||
179 | */ | ||
180 | #define SN_HWPERF_RELEASE 17 | ||
181 | |||
182 | /* | ||
183 | * Break a lock on a shared object | ||
184 | * arg object id | ||
185 | * sz don't care | ||
186 | * p don't care | ||
187 | */ | ||
188 | #define SN_HWPERF_FORCE_RELEASE 18 | ||
189 | |||
190 | /* | ||
191 | * ioctl requests on "sn_hwperf" that do not call SAL | ||
192 | */ | ||
193 | |||
194 | /* | ||
195 | * get cpu info as an array of hwperf_object_info_t. | ||
196 | * id is logical CPU number, name is description, location | ||
197 | * is geoid (e.g. 001c04#1c). Special case if sz == 8, | ||
198 | * returns the required buffer size. | ||
199 | * | ||
200 | * arg don't care | ||
201 | * sz sizeof buffer pointed to by p | ||
202 | * p pointer to array of struct sn_hwperf_object_info | ||
203 | */ | ||
204 | #define SN_HWPERF_GET_CPU_INFO (100|SN_HWPERF_OP_MEM_COPYOUT) | ||
205 | |||
206 | /* | ||
207 | * Given an object id, return it's node number (aka cnode). | ||
208 | * arg object id | ||
209 | * sz 8 | ||
210 | * p pointer to u64 integer | ||
211 | */ | ||
212 | #define SN_HWPERF_GET_OBJ_NODE (101|SN_HWPERF_OP_MEM_COPYOUT) | ||
213 | |||
214 | /* | ||
215 | * Given a node number (cnode), return it's nasid. | ||
216 | * arg ordinal node number (aka cnodeid) | ||
217 | * sz 8 | ||
218 | * p pointer to u64 integer | ||
219 | */ | ||
220 | #define SN_HWPERF_GET_NODE_NASID (102|SN_HWPERF_OP_MEM_COPYOUT) | ||
221 | |||
222 | /* | ||
223 | * Given a node id, determine the id of the nearest node with CPUs | ||
224 | * and the id of the nearest node that has memory. The argument | ||
225 | * node would normally be a "headless" node, e.g. an "IO node". | ||
226 | * Return 0 on success. | ||
227 | */ | ||
228 | extern int sn_hwperf_get_nearest_node(cnodeid_t node, | ||
229 | cnodeid_t *near_mem, cnodeid_t *near_cpu); | ||
230 | |||
231 | /* return codes */ | ||
232 | #define SN_HWPERF_OP_OK 0 | ||
233 | #define SN_HWPERF_OP_NOMEM 1 | ||
234 | #define SN_HWPERF_OP_NO_PERM 2 | ||
235 | #define SN_HWPERF_OP_IO_ERROR 3 | ||
236 | #define SN_HWPERF_OP_BUSY 4 | ||
237 | #define SN_HWPERF_OP_RECONFIGURE 253 | ||
238 | #define SN_HWPERF_OP_INVAL 254 | ||
239 | |||
240 | int sn_topology_open(struct inode *inode, struct file *file); | ||
241 | int sn_topology_release(struct inode *inode, struct file *file); | ||
242 | #endif /* SN_HWPERF_H */ | ||
diff --git a/arch/ia64/include/asm/sn/sn_cpuid.h b/arch/ia64/include/asm/sn/sn_cpuid.h deleted file mode 100644 index a676dd9ace3e..000000000000 --- a/arch/ia64/include/asm/sn/sn_cpuid.h +++ /dev/null | |||
@@ -1,132 +0,0 @@ | |||
1 | /* | ||
2 | * | ||
3 | * This file is subject to the terms and conditions of the GNU General Public | ||
4 | * License. See the file "COPYING" in the main directory of this archive | ||
5 | * for more details. | ||
6 | * | ||
7 | * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved. | ||
8 | */ | ||
9 | |||
10 | |||
11 | #ifndef _ASM_IA64_SN_SN_CPUID_H | ||
12 | #define _ASM_IA64_SN_SN_CPUID_H | ||
13 | |||
14 | #include <linux/smp.h> | ||
15 | #include <asm/sn/addrs.h> | ||
16 | #include <asm/sn/pda.h> | ||
17 | #include <asm/intrinsics.h> | ||
18 | |||
19 | |||
20 | /* | ||
21 | * Functions for converting between cpuids, nodeids and NASIDs. | ||
22 | * | ||
23 | * These are for SGI platforms only. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | |||
28 | |||
29 | |||
30 | /* | ||
31 | * Definitions of terms (these definitions are for IA64 ONLY. Other architectures | ||
32 | * use cpuid/cpunum quite defferently): | ||
33 | * | ||
34 | * CPUID - a number in range of 0..NR_CPUS-1 that uniquely identifies | ||
35 | * the cpu. The value cpuid has no significance on IA64 other than | ||
36 | * the boot cpu is 0. | ||
37 | * smp_processor_id() returns the cpuid of the current cpu. | ||
38 | * | ||
39 | * CPU_PHYSICAL_ID (also known as HARD_PROCESSOR_ID) | ||
40 | * This is the same as 31:24 of the processor LID register | ||
41 | * hard_smp_processor_id()- cpu_physical_id of current processor | ||
42 | * cpu_physical_id(cpuid) - convert a <cpuid> to a <physical_cpuid> | ||
43 | * cpu_logical_id(phy_id) - convert a <physical_cpuid> to a <cpuid> | ||
44 | * * not real efficient - don't use in perf critical code | ||
45 | * | ||
46 | * SLICE - a number in the range of 0 - 3 (typically) that represents the | ||
47 | * cpu number on a brick. | ||
48 | * | ||
49 | * SUBNODE - (almost obsolete) the number of the FSB that a cpu is | ||
50 | * connected to. This is also the same as the PI number. Usually 0 or 1. | ||
51 | * | ||
52 | * NOTE!!!: the value of the bits in the cpu physical id (SAPICid or LID) of a cpu has no | ||
53 | * significance. The SAPIC id (LID) is a 16-bit cookie that has meaning only to the PROM. | ||
54 | * | ||
55 | * | ||
56 | * The macros convert between cpu physical ids & slice/nasid/cnodeid. | ||
57 | * These terms are described below: | ||
58 | * | ||
59 | * | ||
60 | * Brick | ||
61 | * ----- ----- ----- ----- CPU | ||
62 | * | 0 | | 1 | | 0 | | 1 | SLICE | ||
63 | * ----- ----- ----- ----- | ||
64 | * | | | | | ||
65 | * | | | | | ||
66 | * 0 | | 2 0 | | 2 FSB SLOT | ||
67 | * ------- ------- | ||
68 | * | | | ||
69 | * | | | ||
70 | * | | | ||
71 | * ------------ ------------- | ||
72 | * | | | | | ||
73 | * | SHUB | | SHUB | NASID (0..MAX_NASIDS) | ||
74 | * | |----- | | CNODEID (0..num_compact_nodes-1) | ||
75 | * | | | | | ||
76 | * | | | | | ||
77 | * ------------ ------------- | ||
78 | * | | | ||
79 | * | ||
80 | * | ||
81 | */ | ||
82 | |||
83 | #define get_node_number(addr) NASID_GET(addr) | ||
84 | |||
85 | /* | ||
86 | * NOTE: on non-MP systems, only cpuid 0 exists | ||
87 | */ | ||
88 | |||
89 | extern short physical_node_map[]; /* indexed by nasid to get cnode */ | ||
90 | |||
91 | /* | ||
92 | * Macros for retrieving info about current cpu | ||
93 | */ | ||
94 | #define get_nasid() (sn_nodepda->phys_cpuid[smp_processor_id()].nasid) | ||
95 | #define get_subnode() (sn_nodepda->phys_cpuid[smp_processor_id()].subnode) | ||
96 | #define get_slice() (sn_nodepda->phys_cpuid[smp_processor_id()].slice) | ||
97 | #define get_cnode() (sn_nodepda->phys_cpuid[smp_processor_id()].cnode) | ||
98 | #define get_sapicid() ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff) | ||
99 | |||
100 | /* | ||
101 | * Macros for retrieving info about an arbitrary cpu | ||
102 | * cpuid - logical cpu id | ||
103 | */ | ||
104 | #define cpuid_to_nasid(cpuid) (sn_nodepda->phys_cpuid[cpuid].nasid) | ||
105 | #define cpuid_to_subnode(cpuid) (sn_nodepda->phys_cpuid[cpuid].subnode) | ||
106 | #define cpuid_to_slice(cpuid) (sn_nodepda->phys_cpuid[cpuid].slice) | ||
107 | |||
108 | |||
109 | /* | ||
110 | * Dont use the following in performance critical code. They require scans | ||
111 | * of potentially large tables. | ||
112 | */ | ||
113 | extern int nasid_slice_to_cpuid(int, int); | ||
114 | |||
115 | /* | ||
116 | * cnodeid_to_nasid - convert a cnodeid to a NASID | ||
117 | */ | ||
118 | #define cnodeid_to_nasid(cnodeid) (sn_cnodeid_to_nasid[cnodeid]) | ||
119 | |||
120 | /* | ||
121 | * nasid_to_cnodeid - convert a NASID to a cnodeid | ||
122 | */ | ||
123 | #define nasid_to_cnodeid(nasid) (physical_node_map[nasid]) | ||
124 | |||
125 | /* | ||
126 | * partition_coherence_id - get the coherence ID of the current partition | ||
127 | */ | ||
128 | extern u8 sn_coherency_id; | ||
129 | #define partition_coherence_id() (sn_coherency_id) | ||
130 | |||
131 | #endif /* _ASM_IA64_SN_SN_CPUID_H */ | ||
132 | |||
diff --git a/arch/ia64/include/asm/sn/sn_feature_sets.h b/arch/ia64/include/asm/sn/sn_feature_sets.h deleted file mode 100644 index 8e83ac117ace..000000000000 --- a/arch/ia64/include/asm/sn/sn_feature_sets.h +++ /dev/null | |||
@@ -1,58 +0,0 @@ | |||
1 | #ifndef _ASM_IA64_SN_FEATURE_SETS_H | ||
2 | #define _ASM_IA64_SN_FEATURE_SETS_H | ||
3 | |||
4 | /* | ||
5 | * SN PROM Features | ||
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | * | ||
11 | * Copyright (c) 2005-2006 Silicon Graphics, Inc. All rights reserved. | ||
12 | */ | ||
13 | |||
14 | |||
15 | /* --------------------- PROM Features -----------------------------*/ | ||
16 | extern int sn_prom_feature_available(int id); | ||
17 | |||
18 | #define MAX_PROM_FEATURE_SETS 2 | ||
19 | |||
20 | /* | ||
21 | * The following defines features that may or may not be supported by the | ||
22 | * current PROM. The OS uses sn_prom_feature_available(feature) to test for | ||
23 | * the presence of a PROM feature. Down rev (old) PROMs will always test | ||
24 | * "false" for new features. | ||
25 | * | ||
26 | * Use: | ||
27 | * if (sn_prom_feature_available(PRF_XXX)) | ||
28 | * ... | ||
29 | */ | ||
30 | |||
31 | #define PRF_PAL_CACHE_FLUSH_SAFE 0 | ||
32 | #define PRF_DEVICE_FLUSH_LIST 1 | ||
33 | #define PRF_HOTPLUG_SUPPORT 2 | ||
34 | #define PRF_CPU_DISABLE_SUPPORT 3 | ||
35 | |||
36 | /* --------------------- OS Features -------------------------------*/ | ||
37 | |||
38 | /* | ||
39 | * The following defines OS features that are optionally present in | ||
40 | * the operating system. | ||
41 | * During boot, PROM is notified of these features via a series of calls: | ||
42 | * | ||
43 | * ia64_sn_set_os_feature(feature1); | ||
44 | * | ||
45 | * Once enabled, a feature cannot be disabled. | ||
46 | * | ||
47 | * By default, features are disabled unless explicitly enabled. | ||
48 | * | ||
49 | * These defines must be kept in sync with the corresponding | ||
50 | * PROM definitions in feature_sets.h. | ||
51 | */ | ||
52 | #define OSF_MCA_SLV_TO_OS_INIT_SLV 0 | ||
53 | #define OSF_FEAT_LOG_SBES 1 | ||
54 | #define OSF_ACPI_ENABLE 2 | ||
55 | #define OSF_PCISEGMENT_ENABLE 3 | ||
56 | |||
57 | |||
58 | #endif /* _ASM_IA64_SN_FEATURE_SETS_H */ | ||
diff --git a/arch/ia64/include/asm/sn/sn_sal.h b/arch/ia64/include/asm/sn/sn_sal.h index 1f5ff470a5a1..48b88d0807db 100644 --- a/arch/ia64/include/asm/sn/sn_sal.h +++ b/arch/ia64/include/asm/sn/sn_sal.h | |||
@@ -11,140 +11,17 @@ | |||
11 | * Copyright (c) 2000-2006 Silicon Graphics, Inc. All rights reserved. | 11 | * Copyright (c) 2000-2006 Silicon Graphics, Inc. All rights reserved. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | 14 | #include <linux/types.h> | |
15 | #include <asm/sal.h> | 15 | #include <asm/sal.h> |
16 | #include <asm/sn/sn_cpuid.h> | ||
17 | #include <asm/sn/arch.h> | ||
18 | #include <asm/sn/geo.h> | ||
19 | #include <asm/sn/nodepda.h> | ||
20 | #include <asm/sn/shub_mmr.h> | ||
21 | 16 | ||
22 | // SGI Specific Calls | 17 | // SGI Specific Calls |
23 | #define SN_SAL_POD_MODE 0x02000001 | ||
24 | #define SN_SAL_SYSTEM_RESET 0x02000002 | ||
25 | #define SN_SAL_PROBE 0x02000003 | ||
26 | #define SN_SAL_GET_MASTER_NASID 0x02000004 | ||
27 | #define SN_SAL_GET_KLCONFIG_ADDR 0x02000005 | ||
28 | #define SN_SAL_LOG_CE 0x02000006 | ||
29 | #define SN_SAL_REGISTER_CE 0x02000007 | ||
30 | #define SN_SAL_GET_PARTITION_ADDR 0x02000009 | 18 | #define SN_SAL_GET_PARTITION_ADDR 0x02000009 |
31 | #define SN_SAL_XP_ADDR_REGION 0x0200000f | ||
32 | #define SN_SAL_NO_FAULT_ZONE_VIRTUAL 0x02000010 | ||
33 | #define SN_SAL_NO_FAULT_ZONE_PHYSICAL 0x02000011 | ||
34 | #define SN_SAL_PRINT_ERROR 0x02000012 | ||
35 | #define SN_SAL_REGISTER_PMI_HANDLER 0x02000014 | ||
36 | #define SN_SAL_SET_ERROR_HANDLING_FEATURES 0x0200001a // reentrant | ||
37 | #define SN_SAL_GET_FIT_COMPT 0x0200001b // reentrant | ||
38 | #define SN_SAL_GET_SAPIC_INFO 0x0200001d | ||
39 | #define SN_SAL_GET_SN_INFO 0x0200001e | ||
40 | #define SN_SAL_CONSOLE_PUTC 0x02000021 | ||
41 | #define SN_SAL_CONSOLE_GETC 0x02000022 | ||
42 | #define SN_SAL_CONSOLE_PUTS 0x02000023 | ||
43 | #define SN_SAL_CONSOLE_GETS 0x02000024 | ||
44 | #define SN_SAL_CONSOLE_GETS_TIMEOUT 0x02000025 | ||
45 | #define SN_SAL_CONSOLE_POLL 0x02000026 | ||
46 | #define SN_SAL_CONSOLE_INTR 0x02000027 | ||
47 | #define SN_SAL_CONSOLE_PUTB 0x02000028 | ||
48 | #define SN_SAL_CONSOLE_XMIT_CHARS 0x0200002a | ||
49 | #define SN_SAL_CONSOLE_READC 0x0200002b | ||
50 | #define SN_SAL_SYSCTL_OP 0x02000030 | ||
51 | #define SN_SAL_SYSCTL_MODID_GET 0x02000031 | ||
52 | #define SN_SAL_SYSCTL_GET 0x02000032 | ||
53 | #define SN_SAL_SYSCTL_IOBRICK_MODULE_GET 0x02000033 | ||
54 | #define SN_SAL_SYSCTL_IO_PORTSPEED_GET 0x02000035 | ||
55 | #define SN_SAL_SYSCTL_SLAB_GET 0x02000036 | ||
56 | #define SN_SAL_BUS_CONFIG 0x02000037 | ||
57 | #define SN_SAL_SYS_SERIAL_GET 0x02000038 | ||
58 | #define SN_SAL_PARTITION_SERIAL_GET 0x02000039 | ||
59 | #define SN_SAL_SYSCTL_PARTITION_GET 0x0200003a | ||
60 | #define SN_SAL_SYSTEM_POWER_DOWN 0x0200003b | ||
61 | #define SN_SAL_GET_MASTER_BASEIO_NASID 0x0200003c | ||
62 | #define SN_SAL_COHERENCE 0x0200003d | ||
63 | #define SN_SAL_MEMPROTECT 0x0200003e | 19 | #define SN_SAL_MEMPROTECT 0x0200003e |
64 | #define SN_SAL_SYSCTL_FRU_CAPTURE 0x0200003f | ||
65 | |||
66 | #define SN_SAL_SYSCTL_IOBRICK_PCI_OP 0x02000042 // reentrant | ||
67 | #define SN_SAL_IROUTER_OP 0x02000043 | ||
68 | #define SN_SAL_SYSCTL_EVENT 0x02000044 | ||
69 | #define SN_SAL_IOIF_INTERRUPT 0x0200004a | ||
70 | #define SN_SAL_HWPERF_OP 0x02000050 // lock | ||
71 | #define SN_SAL_IOIF_ERROR_INTERRUPT 0x02000051 | ||
72 | #define SN_SAL_IOIF_PCI_SAFE 0x02000052 | ||
73 | #define SN_SAL_IOIF_SLOT_ENABLE 0x02000053 | ||
74 | #define SN_SAL_IOIF_SLOT_DISABLE 0x02000054 | ||
75 | #define SN_SAL_IOIF_GET_HUBDEV_INFO 0x02000055 | ||
76 | #define SN_SAL_IOIF_GET_PCIBUS_INFO 0x02000056 | ||
77 | #define SN_SAL_IOIF_GET_PCIDEV_INFO 0x02000057 | ||
78 | #define SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST 0x02000058 // deprecated | ||
79 | #define SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST 0x0200005a | ||
80 | |||
81 | #define SN_SAL_IOIF_INIT 0x0200005f | ||
82 | #define SN_SAL_HUB_ERROR_INTERRUPT 0x02000060 | ||
83 | #define SN_SAL_BTE_RECOVER 0x02000061 | ||
84 | #define SN_SAL_RESERVED_DO_NOT_USE 0x02000062 | ||
85 | #define SN_SAL_IOIF_GET_PCI_TOPOLOGY 0x02000064 | ||
86 | |||
87 | #define SN_SAL_GET_PROM_FEATURE_SET 0x02000065 | ||
88 | #define SN_SAL_SET_OS_FEATURE_SET 0x02000066 | ||
89 | #define SN_SAL_INJECT_ERROR 0x02000067 | ||
90 | #define SN_SAL_SET_CPU_NUMBER 0x02000068 | ||
91 | 20 | ||
92 | #define SN_SAL_KERNEL_LAUNCH_EVENT 0x02000069 | ||
93 | #define SN_SAL_WATCHLIST_ALLOC 0x02000070 | 21 | #define SN_SAL_WATCHLIST_ALLOC 0x02000070 |
94 | #define SN_SAL_WATCHLIST_FREE 0x02000071 | 22 | #define SN_SAL_WATCHLIST_FREE 0x02000071 |
95 | 23 | ||
96 | /* | 24 | /* |
97 | * Service-specific constants | ||
98 | */ | ||
99 | |||
100 | /* Console interrupt manipulation */ | ||
101 | /* action codes */ | ||
102 | #define SAL_CONSOLE_INTR_OFF 0 /* turn the interrupt off */ | ||
103 | #define SAL_CONSOLE_INTR_ON 1 /* turn the interrupt on */ | ||
104 | #define SAL_CONSOLE_INTR_STATUS 2 /* retrieve the interrupt status */ | ||
105 | /* interrupt specification & status return codes */ | ||
106 | #define SAL_CONSOLE_INTR_XMIT 1 /* output interrupt */ | ||
107 | #define SAL_CONSOLE_INTR_RECV 2 /* input interrupt */ | ||
108 | |||
109 | /* interrupt handling */ | ||
110 | #define SAL_INTR_ALLOC 1 | ||
111 | #define SAL_INTR_FREE 2 | ||
112 | #define SAL_INTR_REDIRECT 3 | ||
113 | |||
114 | /* | ||
115 | * operations available on the generic SN_SAL_SYSCTL_OP | ||
116 | * runtime service | ||
117 | */ | ||
118 | #define SAL_SYSCTL_OP_IOBOARD 0x0001 /* retrieve board type */ | ||
119 | #define SAL_SYSCTL_OP_TIO_JLCK_RST 0x0002 /* issue TIO clock reset */ | ||
120 | |||
121 | /* | ||
122 | * IRouter (i.e. generalized system controller) operations | ||
123 | */ | ||
124 | #define SAL_IROUTER_OPEN 0 /* open a subchannel */ | ||
125 | #define SAL_IROUTER_CLOSE 1 /* close a subchannel */ | ||
126 | #define SAL_IROUTER_SEND 2 /* send part of an IRouter packet */ | ||
127 | #define SAL_IROUTER_RECV 3 /* receive part of an IRouter packet */ | ||
128 | #define SAL_IROUTER_INTR_STATUS 4 /* check the interrupt status for | ||
129 | * an open subchannel | ||
130 | */ | ||
131 | #define SAL_IROUTER_INTR_ON 5 /* enable an interrupt */ | ||
132 | #define SAL_IROUTER_INTR_OFF 6 /* disable an interrupt */ | ||
133 | #define SAL_IROUTER_INIT 7 /* initialize IRouter driver */ | ||
134 | |||
135 | /* IRouter interrupt mask bits */ | ||
136 | #define SAL_IROUTER_INTR_XMIT SAL_CONSOLE_INTR_XMIT | ||
137 | #define SAL_IROUTER_INTR_RECV SAL_CONSOLE_INTR_RECV | ||
138 | |||
139 | /* | ||
140 | * Error Handling Features | ||
141 | */ | ||
142 | #define SAL_ERR_FEAT_MCA_SLV_TO_OS_INIT_SLV 0x1 // obsolete | ||
143 | #define SAL_ERR_FEAT_LOG_SBES 0x2 // obsolete | ||
144 | #define SAL_ERR_FEAT_MFR_OVERRIDE 0x4 | ||
145 | #define SAL_ERR_FEAT_SBE_THRESHOLD 0xffff0000 | ||
146 | |||
147 | /* | ||
148 | * SAL Error Codes | 25 | * SAL Error Codes |
149 | */ | 26 | */ |
150 | #define SALRET_MORE_PASSES 1 | 27 | #define SALRET_MORE_PASSES 1 |
@@ -155,456 +32,6 @@ | |||
155 | 32 | ||
156 | #define SN_SAL_FAKE_PROM 0x02009999 | 33 | #define SN_SAL_FAKE_PROM 0x02009999 |
157 | 34 | ||
158 | /** | ||
159 | * sn_sal_revision - get the SGI SAL revision number | ||
160 | * | ||
161 | * The SGI PROM stores its version in the sal_[ab]_rev_(major|minor). | ||
162 | * This routine simply extracts the major and minor values and | ||
163 | * presents them in a u32 format. | ||
164 | * | ||
165 | * For example, version 4.05 would be represented at 0x0405. | ||
166 | */ | ||
167 | static inline u32 | ||
168 | sn_sal_rev(void) | ||
169 | { | ||
170 | struct ia64_sal_systab *systab = __va(efi.sal_systab); | ||
171 | |||
172 | return (u32)(systab->sal_b_rev_major << 8 | systab->sal_b_rev_minor); | ||
173 | } | ||
174 | |||
175 | /* | ||
176 | * Returns the master console nasid, if the call fails, return an illegal | ||
177 | * value. | ||
178 | */ | ||
179 | static inline u64 | ||
180 | ia64_sn_get_console_nasid(void) | ||
181 | { | ||
182 | struct ia64_sal_retval ret_stuff; | ||
183 | |||
184 | ret_stuff.status = 0; | ||
185 | ret_stuff.v0 = 0; | ||
186 | ret_stuff.v1 = 0; | ||
187 | ret_stuff.v2 = 0; | ||
188 | SAL_CALL(ret_stuff, SN_SAL_GET_MASTER_NASID, 0, 0, 0, 0, 0, 0, 0); | ||
189 | |||
190 | if (ret_stuff.status < 0) | ||
191 | return ret_stuff.status; | ||
192 | |||
193 | /* Master console nasid is in 'v0' */ | ||
194 | return ret_stuff.v0; | ||
195 | } | ||
196 | |||
197 | /* | ||
198 | * Returns the master baseio nasid, if the call fails, return an illegal | ||
199 | * value. | ||
200 | */ | ||
201 | static inline u64 | ||
202 | ia64_sn_get_master_baseio_nasid(void) | ||
203 | { | ||
204 | struct ia64_sal_retval ret_stuff; | ||
205 | |||
206 | ret_stuff.status = 0; | ||
207 | ret_stuff.v0 = 0; | ||
208 | ret_stuff.v1 = 0; | ||
209 | ret_stuff.v2 = 0; | ||
210 | SAL_CALL(ret_stuff, SN_SAL_GET_MASTER_BASEIO_NASID, 0, 0, 0, 0, 0, 0, 0); | ||
211 | |||
212 | if (ret_stuff.status < 0) | ||
213 | return ret_stuff.status; | ||
214 | |||
215 | /* Master baseio nasid is in 'v0' */ | ||
216 | return ret_stuff.v0; | ||
217 | } | ||
218 | |||
219 | static inline void * | ||
220 | ia64_sn_get_klconfig_addr(nasid_t nasid) | ||
221 | { | ||
222 | struct ia64_sal_retval ret_stuff; | ||
223 | |||
224 | ret_stuff.status = 0; | ||
225 | ret_stuff.v0 = 0; | ||
226 | ret_stuff.v1 = 0; | ||
227 | ret_stuff.v2 = 0; | ||
228 | SAL_CALL(ret_stuff, SN_SAL_GET_KLCONFIG_ADDR, (u64)nasid, 0, 0, 0, 0, 0, 0); | ||
229 | return ret_stuff.v0 ? __va(ret_stuff.v0) : NULL; | ||
230 | } | ||
231 | |||
232 | /* | ||
233 | * Returns the next console character. | ||
234 | */ | ||
235 | static inline u64 | ||
236 | ia64_sn_console_getc(int *ch) | ||
237 | { | ||
238 | struct ia64_sal_retval ret_stuff; | ||
239 | |||
240 | ret_stuff.status = 0; | ||
241 | ret_stuff.v0 = 0; | ||
242 | ret_stuff.v1 = 0; | ||
243 | ret_stuff.v2 = 0; | ||
244 | SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_GETC, 0, 0, 0, 0, 0, 0, 0); | ||
245 | |||
246 | /* character is in 'v0' */ | ||
247 | *ch = (int)ret_stuff.v0; | ||
248 | |||
249 | return ret_stuff.status; | ||
250 | } | ||
251 | |||
252 | /* | ||
253 | * Read a character from the SAL console device, after a previous interrupt | ||
254 | * or poll operation has given us to know that a character is available | ||
255 | * to be read. | ||
256 | */ | ||
257 | static inline u64 | ||
258 | ia64_sn_console_readc(void) | ||
259 | { | ||
260 | struct ia64_sal_retval ret_stuff; | ||
261 | |||
262 | ret_stuff.status = 0; | ||
263 | ret_stuff.v0 = 0; | ||
264 | ret_stuff.v1 = 0; | ||
265 | ret_stuff.v2 = 0; | ||
266 | SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_READC, 0, 0, 0, 0, 0, 0, 0); | ||
267 | |||
268 | /* character is in 'v0' */ | ||
269 | return ret_stuff.v0; | ||
270 | } | ||
271 | |||
272 | /* | ||
273 | * Sends the given character to the console. | ||
274 | */ | ||
275 | static inline u64 | ||
276 | ia64_sn_console_putc(char ch) | ||
277 | { | ||
278 | struct ia64_sal_retval ret_stuff; | ||
279 | |||
280 | ret_stuff.status = 0; | ||
281 | ret_stuff.v0 = 0; | ||
282 | ret_stuff.v1 = 0; | ||
283 | ret_stuff.v2 = 0; | ||
284 | SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTC, (u64)ch, 0, 0, 0, 0, 0, 0); | ||
285 | |||
286 | return ret_stuff.status; | ||
287 | } | ||
288 | |||
289 | /* | ||
290 | * Sends the given buffer to the console. | ||
291 | */ | ||
292 | static inline u64 | ||
293 | ia64_sn_console_putb(const char *buf, int len) | ||
294 | { | ||
295 | struct ia64_sal_retval ret_stuff; | ||
296 | |||
297 | ret_stuff.status = 0; | ||
298 | ret_stuff.v0 = 0; | ||
299 | ret_stuff.v1 = 0; | ||
300 | ret_stuff.v2 = 0; | ||
301 | SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTB, (u64)buf, (u64)len, 0, 0, 0, 0, 0); | ||
302 | |||
303 | if ( ret_stuff.status == 0 ) { | ||
304 | return ret_stuff.v0; | ||
305 | } | ||
306 | return (u64)0; | ||
307 | } | ||
308 | |||
309 | /* | ||
310 | * Print a platform error record | ||
311 | */ | ||
312 | static inline u64 | ||
313 | ia64_sn_plat_specific_err_print(int (*hook)(const char*, ...), char *rec) | ||
314 | { | ||
315 | struct ia64_sal_retval ret_stuff; | ||
316 | |||
317 | ret_stuff.status = 0; | ||
318 | ret_stuff.v0 = 0; | ||
319 | ret_stuff.v1 = 0; | ||
320 | ret_stuff.v2 = 0; | ||
321 | SAL_CALL_REENTRANT(ret_stuff, SN_SAL_PRINT_ERROR, (u64)hook, (u64)rec, 0, 0, 0, 0, 0); | ||
322 | |||
323 | return ret_stuff.status; | ||
324 | } | ||
325 | |||
326 | /* | ||
327 | * Check for Platform errors | ||
328 | */ | ||
329 | static inline u64 | ||
330 | ia64_sn_plat_cpei_handler(void) | ||
331 | { | ||
332 | struct ia64_sal_retval ret_stuff; | ||
333 | |||
334 | ret_stuff.status = 0; | ||
335 | ret_stuff.v0 = 0; | ||
336 | ret_stuff.v1 = 0; | ||
337 | ret_stuff.v2 = 0; | ||
338 | SAL_CALL_NOLOCK(ret_stuff, SN_SAL_LOG_CE, 0, 0, 0, 0, 0, 0, 0); | ||
339 | |||
340 | return ret_stuff.status; | ||
341 | } | ||
342 | |||
343 | /* | ||
344 | * Set Error Handling Features (Obsolete) | ||
345 | */ | ||
346 | static inline u64 | ||
347 | ia64_sn_plat_set_error_handling_features(void) | ||
348 | { | ||
349 | struct ia64_sal_retval ret_stuff; | ||
350 | |||
351 | ret_stuff.status = 0; | ||
352 | ret_stuff.v0 = 0; | ||
353 | ret_stuff.v1 = 0; | ||
354 | ret_stuff.v2 = 0; | ||
355 | SAL_CALL_REENTRANT(ret_stuff, SN_SAL_SET_ERROR_HANDLING_FEATURES, | ||
356 | SAL_ERR_FEAT_LOG_SBES, | ||
357 | 0, 0, 0, 0, 0, 0); | ||
358 | |||
359 | return ret_stuff.status; | ||
360 | } | ||
361 | |||
362 | /* | ||
363 | * Checks for console input. | ||
364 | */ | ||
365 | static inline u64 | ||
366 | ia64_sn_console_check(int *result) | ||
367 | { | ||
368 | struct ia64_sal_retval ret_stuff; | ||
369 | |||
370 | ret_stuff.status = 0; | ||
371 | ret_stuff.v0 = 0; | ||
372 | ret_stuff.v1 = 0; | ||
373 | ret_stuff.v2 = 0; | ||
374 | SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_POLL, 0, 0, 0, 0, 0, 0, 0); | ||
375 | |||
376 | /* result is in 'v0' */ | ||
377 | *result = (int)ret_stuff.v0; | ||
378 | |||
379 | return ret_stuff.status; | ||
380 | } | ||
381 | |||
382 | /* | ||
383 | * Checks console interrupt status | ||
384 | */ | ||
385 | static inline u64 | ||
386 | ia64_sn_console_intr_status(void) | ||
387 | { | ||
388 | struct ia64_sal_retval ret_stuff; | ||
389 | |||
390 | ret_stuff.status = 0; | ||
391 | ret_stuff.v0 = 0; | ||
392 | ret_stuff.v1 = 0; | ||
393 | ret_stuff.v2 = 0; | ||
394 | SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR, | ||
395 | 0, SAL_CONSOLE_INTR_STATUS, | ||
396 | 0, 0, 0, 0, 0); | ||
397 | |||
398 | if (ret_stuff.status == 0) { | ||
399 | return ret_stuff.v0; | ||
400 | } | ||
401 | |||
402 | return 0; | ||
403 | } | ||
404 | |||
405 | /* | ||
406 | * Enable an interrupt on the SAL console device. | ||
407 | */ | ||
408 | static inline void | ||
409 | ia64_sn_console_intr_enable(u64 intr) | ||
410 | { | ||
411 | struct ia64_sal_retval ret_stuff; | ||
412 | |||
413 | ret_stuff.status = 0; | ||
414 | ret_stuff.v0 = 0; | ||
415 | ret_stuff.v1 = 0; | ||
416 | ret_stuff.v2 = 0; | ||
417 | SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR, | ||
418 | intr, SAL_CONSOLE_INTR_ON, | ||
419 | 0, 0, 0, 0, 0); | ||
420 | } | ||
421 | |||
422 | /* | ||
423 | * Disable an interrupt on the SAL console device. | ||
424 | */ | ||
425 | static inline void | ||
426 | ia64_sn_console_intr_disable(u64 intr) | ||
427 | { | ||
428 | struct ia64_sal_retval ret_stuff; | ||
429 | |||
430 | ret_stuff.status = 0; | ||
431 | ret_stuff.v0 = 0; | ||
432 | ret_stuff.v1 = 0; | ||
433 | ret_stuff.v2 = 0; | ||
434 | SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR, | ||
435 | intr, SAL_CONSOLE_INTR_OFF, | ||
436 | 0, 0, 0, 0, 0); | ||
437 | } | ||
438 | |||
439 | /* | ||
440 | * Sends a character buffer to the console asynchronously. | ||
441 | */ | ||
442 | static inline u64 | ||
443 | ia64_sn_console_xmit_chars(char *buf, int len) | ||
444 | { | ||
445 | struct ia64_sal_retval ret_stuff; | ||
446 | |||
447 | ret_stuff.status = 0; | ||
448 | ret_stuff.v0 = 0; | ||
449 | ret_stuff.v1 = 0; | ||
450 | ret_stuff.v2 = 0; | ||
451 | SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_XMIT_CHARS, | ||
452 | (u64)buf, (u64)len, | ||
453 | 0, 0, 0, 0, 0); | ||
454 | |||
455 | if (ret_stuff.status == 0) { | ||
456 | return ret_stuff.v0; | ||
457 | } | ||
458 | |||
459 | return 0; | ||
460 | } | ||
461 | |||
462 | /* | ||
463 | * Returns the iobrick module Id | ||
464 | */ | ||
465 | static inline u64 | ||
466 | ia64_sn_sysctl_iobrick_module_get(nasid_t nasid, int *result) | ||
467 | { | ||
468 | struct ia64_sal_retval ret_stuff; | ||
469 | |||
470 | ret_stuff.status = 0; | ||
471 | ret_stuff.v0 = 0; | ||
472 | ret_stuff.v1 = 0; | ||
473 | ret_stuff.v2 = 0; | ||
474 | SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SYSCTL_IOBRICK_MODULE_GET, nasid, 0, 0, 0, 0, 0, 0); | ||
475 | |||
476 | /* result is in 'v0' */ | ||
477 | *result = (int)ret_stuff.v0; | ||
478 | |||
479 | return ret_stuff.status; | ||
480 | } | ||
481 | |||
482 | /** | ||
483 | * ia64_sn_pod_mode - call the SN_SAL_POD_MODE function | ||
484 | * | ||
485 | * SN_SAL_POD_MODE actually takes an argument, but it's always | ||
486 | * 0 when we call it from the kernel, so we don't have to expose | ||
487 | * it to the caller. | ||
488 | */ | ||
489 | static inline u64 | ||
490 | ia64_sn_pod_mode(void) | ||
491 | { | ||
492 | struct ia64_sal_retval isrv; | ||
493 | SAL_CALL_REENTRANT(isrv, SN_SAL_POD_MODE, 0, 0, 0, 0, 0, 0, 0); | ||
494 | if (isrv.status) | ||
495 | return 0; | ||
496 | return isrv.v0; | ||
497 | } | ||
498 | |||
499 | /** | ||
500 | * ia64_sn_probe_mem - read from memory safely | ||
501 | * @addr: address to probe | ||
502 | * @size: number bytes to read (1,2,4,8) | ||
503 | * @data_ptr: address to store value read by probe (-1 returned if probe fails) | ||
504 | * | ||
505 | * Call into the SAL to do a memory read. If the read generates a machine | ||
506 | * check, this routine will recover gracefully and return -1 to the caller. | ||
507 | * @addr is usually a kernel virtual address in uncached space (i.e. the | ||
508 | * address starts with 0xc), but if called in physical mode, @addr should | ||
509 | * be a physical address. | ||
510 | * | ||
511 | * Return values: | ||
512 | * 0 - probe successful | ||
513 | * 1 - probe failed (generated MCA) | ||
514 | * 2 - Bad arg | ||
515 | * <0 - PAL error | ||
516 | */ | ||
517 | static inline u64 | ||
518 | ia64_sn_probe_mem(long addr, long size, void *data_ptr) | ||
519 | { | ||
520 | struct ia64_sal_retval isrv; | ||
521 | |||
522 | SAL_CALL(isrv, SN_SAL_PROBE, addr, size, 0, 0, 0, 0, 0); | ||
523 | |||
524 | if (data_ptr) { | ||
525 | switch (size) { | ||
526 | case 1: | ||
527 | *((u8*)data_ptr) = (u8)isrv.v0; | ||
528 | break; | ||
529 | case 2: | ||
530 | *((u16*)data_ptr) = (u16)isrv.v0; | ||
531 | break; | ||
532 | case 4: | ||
533 | *((u32*)data_ptr) = (u32)isrv.v0; | ||
534 | break; | ||
535 | case 8: | ||
536 | *((u64*)data_ptr) = (u64)isrv.v0; | ||
537 | break; | ||
538 | default: | ||
539 | isrv.status = 2; | ||
540 | } | ||
541 | } | ||
542 | return isrv.status; | ||
543 | } | ||
544 | |||
545 | /* | ||
546 | * Retrieve the system serial number as an ASCII string. | ||
547 | */ | ||
548 | static inline u64 | ||
549 | ia64_sn_sys_serial_get(char *buf) | ||
550 | { | ||
551 | struct ia64_sal_retval ret_stuff; | ||
552 | SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SYS_SERIAL_GET, buf, 0, 0, 0, 0, 0, 0); | ||
553 | return ret_stuff.status; | ||
554 | } | ||
555 | |||
556 | extern char sn_system_serial_number_string[]; | ||
557 | extern u64 sn_partition_serial_number; | ||
558 | |||
559 | static inline char * | ||
560 | sn_system_serial_number(void) { | ||
561 | if (sn_system_serial_number_string[0]) { | ||
562 | return(sn_system_serial_number_string); | ||
563 | } else { | ||
564 | ia64_sn_sys_serial_get(sn_system_serial_number_string); | ||
565 | return(sn_system_serial_number_string); | ||
566 | } | ||
567 | } | ||
568 | |||
569 | |||
570 | /* | ||
571 | * Returns a unique id number for this system and partition (suitable for | ||
572 | * use with license managers), based in part on the system serial number. | ||
573 | */ | ||
574 | static inline u64 | ||
575 | ia64_sn_partition_serial_get(void) | ||
576 | { | ||
577 | struct ia64_sal_retval ret_stuff; | ||
578 | ia64_sal_oemcall_reentrant(&ret_stuff, SN_SAL_PARTITION_SERIAL_GET, 0, | ||
579 | 0, 0, 0, 0, 0, 0); | ||
580 | if (ret_stuff.status != 0) | ||
581 | return 0; | ||
582 | return ret_stuff.v0; | ||
583 | } | ||
584 | |||
585 | static inline u64 | ||
586 | sn_partition_serial_number_val(void) { | ||
587 | if (unlikely(sn_partition_serial_number == 0)) { | ||
588 | sn_partition_serial_number = ia64_sn_partition_serial_get(); | ||
589 | } | ||
590 | return sn_partition_serial_number; | ||
591 | } | ||
592 | |||
593 | /* | ||
594 | * Returns the partition id of the nasid passed in as an argument, | ||
595 | * or INVALID_PARTID if the partition id cannot be retrieved. | ||
596 | */ | ||
597 | static inline partid_t | ||
598 | ia64_sn_sysctl_partition_get(nasid_t nasid) | ||
599 | { | ||
600 | struct ia64_sal_retval ret_stuff; | ||
601 | SAL_CALL(ret_stuff, SN_SAL_SYSCTL_PARTITION_GET, nasid, | ||
602 | 0, 0, 0, 0, 0, 0); | ||
603 | if (ret_stuff.status != 0) | ||
604 | return -1; | ||
605 | return ((partid_t)ret_stuff.v0); | ||
606 | } | ||
607 | |||
608 | /* | 35 | /* |
609 | * Returns the physical address of the partition's reserved page through | 36 | * Returns the physical address of the partition's reserved page through |
610 | * an iterative number of calls. | 37 | * an iterative number of calls. |
@@ -634,96 +61,6 @@ sn_partition_reserved_page_pa(u64 buf, u64 *cookie, u64 *addr, u64 *len) | |||
634 | } | 61 | } |
635 | 62 | ||
636 | /* | 63 | /* |
637 | * Register or unregister a physical address range being referenced across | ||
638 | * a partition boundary for which certain SAL errors should be scanned for, | ||
639 | * cleaned up and ignored. This is of value for kernel partitioning code only. | ||
640 | * Values for the operation argument: | ||
641 | * 1 = register this address range with SAL | ||
642 | * 0 = unregister this address range with SAL | ||
643 | * | ||
644 | * SAL maintains a reference count on an address range in case it is registered | ||
645 | * multiple times. | ||
646 | * | ||
647 | * On success, returns the reference count of the address range after the SAL | ||
648 | * call has performed the current registration/unregistration. Returns a | ||
649 | * negative value if an error occurred. | ||
650 | */ | ||
651 | static inline int | ||
652 | sn_register_xp_addr_region(u64 paddr, u64 len, int operation) | ||
653 | { | ||
654 | struct ia64_sal_retval ret_stuff; | ||
655 | ia64_sal_oemcall(&ret_stuff, SN_SAL_XP_ADDR_REGION, paddr, len, | ||
656 | (u64)operation, 0, 0, 0, 0); | ||
657 | return ret_stuff.status; | ||
658 | } | ||
659 | |||
660 | /* | ||
661 | * Register or unregister an instruction range for which SAL errors should | ||
662 | * be ignored. If an error occurs while in the registered range, SAL jumps | ||
663 | * to return_addr after ignoring the error. Values for the operation argument: | ||
664 | * 1 = register this instruction range with SAL | ||
665 | * 0 = unregister this instruction range with SAL | ||
666 | * | ||
667 | * Returns 0 on success, or a negative value if an error occurred. | ||
668 | */ | ||
669 | static inline int | ||
670 | sn_register_nofault_code(u64 start_addr, u64 end_addr, u64 return_addr, | ||
671 | int virtual, int operation) | ||
672 | { | ||
673 | struct ia64_sal_retval ret_stuff; | ||
674 | u64 call; | ||
675 | if (virtual) { | ||
676 | call = SN_SAL_NO_FAULT_ZONE_VIRTUAL; | ||
677 | } else { | ||
678 | call = SN_SAL_NO_FAULT_ZONE_PHYSICAL; | ||
679 | } | ||
680 | ia64_sal_oemcall(&ret_stuff, call, start_addr, end_addr, return_addr, | ||
681 | (u64)1, 0, 0, 0); | ||
682 | return ret_stuff.status; | ||
683 | } | ||
684 | |||
685 | /* | ||
686 | * Register or unregister a function to handle a PMI received by a CPU. | ||
687 | * Before calling the registered handler, SAL sets r1 to the value that | ||
688 | * was passed in as the global_pointer. | ||
689 | * | ||
690 | * If the handler pointer is NULL, then the currently registered handler | ||
691 | * will be unregistered. | ||
692 | * | ||
693 | * Returns 0 on success, or a negative value if an error occurred. | ||
694 | */ | ||
695 | static inline int | ||
696 | sn_register_pmi_handler(u64 handler, u64 global_pointer) | ||
697 | { | ||
698 | struct ia64_sal_retval ret_stuff; | ||
699 | ia64_sal_oemcall(&ret_stuff, SN_SAL_REGISTER_PMI_HANDLER, handler, | ||
700 | global_pointer, 0, 0, 0, 0, 0); | ||
701 | return ret_stuff.status; | ||
702 | } | ||
703 | |||
704 | /* | ||
705 | * Change or query the coherence domain for this partition. Each cpu-based | ||
706 | * nasid is represented by a bit in an array of 64-bit words: | ||
707 | * 0 = not in this partition's coherency domain | ||
708 | * 1 = in this partition's coherency domain | ||
709 | * | ||
710 | * It is not possible for the local system's nasids to be removed from | ||
711 | * the coherency domain. Purpose of the domain arguments: | ||
712 | * new_domain = set the coherence domain to the given nasids | ||
713 | * old_domain = return the current coherence domain | ||
714 | * | ||
715 | * Returns 0 on success, or a negative value if an error occurred. | ||
716 | */ | ||
717 | static inline int | ||
718 | sn_change_coherence(u64 *new_domain, u64 *old_domain) | ||
719 | { | ||
720 | struct ia64_sal_retval ret_stuff; | ||
721 | ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_COHERENCE, (u64)new_domain, | ||
722 | (u64)old_domain, 0, 0, 0, 0, 0); | ||
723 | return ret_stuff.status; | ||
724 | } | ||
725 | |||
726 | /* | ||
727 | * Change memory access protections for a physical address range. | 64 | * Change memory access protections for a physical address range. |
728 | * nasid_array is not used on Altix, but may be in future architectures. | 65 | * nasid_array is not used on Altix, but may be in future architectures. |
729 | * Available memory protection access classes are defined after the function. | 66 | * Available memory protection access classes are defined after the function. |
@@ -744,395 +81,6 @@ sn_change_memprotect(u64 paddr, u64 len, u64 perms, u64 *nasid_array) | |||
744 | #define SN_MEMPROT_ACCESS_CLASS_6 0x084080 | 81 | #define SN_MEMPROT_ACCESS_CLASS_6 0x084080 |
745 | #define SN_MEMPROT_ACCESS_CLASS_7 0x021080 | 82 | #define SN_MEMPROT_ACCESS_CLASS_7 0x021080 |
746 | 83 | ||
747 | /* | ||
748 | * Turns off system power. | ||
749 | */ | ||
750 | static inline void | ||
751 | ia64_sn_power_down(void) | ||
752 | { | ||
753 | struct ia64_sal_retval ret_stuff; | ||
754 | SAL_CALL(ret_stuff, SN_SAL_SYSTEM_POWER_DOWN, 0, 0, 0, 0, 0, 0, 0); | ||
755 | while(1) | ||
756 | cpu_relax(); | ||
757 | /* never returns */ | ||
758 | } | ||
759 | |||
760 | /** | ||
761 | * ia64_sn_fru_capture - tell the system controller to capture hw state | ||
762 | * | ||
763 | * This routine will call the SAL which will tell the system controller(s) | ||
764 | * to capture hw mmr information from each SHub in the system. | ||
765 | */ | ||
766 | static inline u64 | ||
767 | ia64_sn_fru_capture(void) | ||
768 | { | ||
769 | struct ia64_sal_retval isrv; | ||
770 | SAL_CALL(isrv, SN_SAL_SYSCTL_FRU_CAPTURE, 0, 0, 0, 0, 0, 0, 0); | ||
771 | if (isrv.status) | ||
772 | return 0; | ||
773 | return isrv.v0; | ||
774 | } | ||
775 | |||
776 | /* | ||
777 | * Performs an operation on a PCI bus or slot -- power up, power down | ||
778 | * or reset. | ||
779 | */ | ||
780 | static inline u64 | ||
781 | ia64_sn_sysctl_iobrick_pci_op(nasid_t n, u64 connection_type, | ||
782 | u64 bus, char slot, | ||
783 | u64 action) | ||
784 | { | ||
785 | struct ia64_sal_retval rv = {0, 0, 0, 0}; | ||
786 | |||
787 | SAL_CALL_NOLOCK(rv, SN_SAL_SYSCTL_IOBRICK_PCI_OP, connection_type, n, action, | ||
788 | bus, (u64) slot, 0, 0); | ||
789 | if (rv.status) | ||
790 | return rv.v0; | ||
791 | return 0; | ||
792 | } | ||
793 | |||
794 | |||
795 | /* | ||
796 | * Open a subchannel for sending arbitrary data to the system | ||
797 | * controller network via the system controller device associated with | ||
798 | * 'nasid'. Return the subchannel number or a negative error code. | ||
799 | */ | ||
800 | static inline int | ||
801 | ia64_sn_irtr_open(nasid_t nasid) | ||
802 | { | ||
803 | struct ia64_sal_retval rv; | ||
804 | SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_OPEN, nasid, | ||
805 | 0, 0, 0, 0, 0); | ||
806 | return (int) rv.v0; | ||
807 | } | ||
808 | |||
809 | /* | ||
810 | * Close system controller subchannel 'subch' previously opened on 'nasid'. | ||
811 | */ | ||
812 | static inline int | ||
813 | ia64_sn_irtr_close(nasid_t nasid, int subch) | ||
814 | { | ||
815 | struct ia64_sal_retval rv; | ||
816 | SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_CLOSE, | ||
817 | (u64) nasid, (u64) subch, 0, 0, 0, 0); | ||
818 | return (int) rv.status; | ||
819 | } | ||
820 | |||
821 | /* | ||
822 | * Read data from system controller associated with 'nasid' on | ||
823 | * subchannel 'subch'. The buffer to be filled is pointed to by | ||
824 | * 'buf', and its capacity is in the integer pointed to by 'len'. The | ||
825 | * referent of 'len' is set to the number of bytes read by the SAL | ||
826 | * call. The return value is either SALRET_OK (for bytes read) or | ||
827 | * SALRET_ERROR (for error or "no data available"). | ||
828 | */ | ||
829 | static inline int | ||
830 | ia64_sn_irtr_recv(nasid_t nasid, int subch, char *buf, int *len) | ||
831 | { | ||
832 | struct ia64_sal_retval rv; | ||
833 | SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_RECV, | ||
834 | (u64) nasid, (u64) subch, (u64) buf, (u64) len, | ||
835 | 0, 0); | ||
836 | return (int) rv.status; | ||
837 | } | ||
838 | |||
839 | /* | ||
840 | * Write data to the system controller network via the system | ||
841 | * controller associated with 'nasid' on suchannel 'subch'. The | ||
842 | * buffer to be written out is pointed to by 'buf', and 'len' is the | ||
843 | * number of bytes to be written. The return value is either the | ||
844 | * number of bytes written (which could be zero) or a negative error | ||
845 | * code. | ||
846 | */ | ||
847 | static inline int | ||
848 | ia64_sn_irtr_send(nasid_t nasid, int subch, char *buf, int len) | ||
849 | { | ||
850 | struct ia64_sal_retval rv; | ||
851 | SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_SEND, | ||
852 | (u64) nasid, (u64) subch, (u64) buf, (u64) len, | ||
853 | 0, 0); | ||
854 | return (int) rv.v0; | ||
855 | } | ||
856 | |||
857 | /* | ||
858 | * Check whether any interrupts are pending for the system controller | ||
859 | * associated with 'nasid' and its subchannel 'subch'. The return | ||
860 | * value is a mask of pending interrupts (SAL_IROUTER_INTR_XMIT and/or | ||
861 | * SAL_IROUTER_INTR_RECV). | ||
862 | */ | ||
863 | static inline int | ||
864 | ia64_sn_irtr_intr(nasid_t nasid, int subch) | ||
865 | { | ||
866 | struct ia64_sal_retval rv; | ||
867 | SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_STATUS, | ||
868 | (u64) nasid, (u64) subch, 0, 0, 0, 0); | ||
869 | return (int) rv.v0; | ||
870 | } | ||
871 | |||
872 | /* | ||
873 | * Enable the interrupt indicated by the intr parameter (either | ||
874 | * SAL_IROUTER_INTR_XMIT or SAL_IROUTER_INTR_RECV). | ||
875 | */ | ||
876 | static inline int | ||
877 | ia64_sn_irtr_intr_enable(nasid_t nasid, int subch, u64 intr) | ||
878 | { | ||
879 | struct ia64_sal_retval rv; | ||
880 | SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_ON, | ||
881 | (u64) nasid, (u64) subch, intr, 0, 0, 0); | ||
882 | return (int) rv.v0; | ||
883 | } | ||
884 | |||
885 | /* | ||
886 | * Disable the interrupt indicated by the intr parameter (either | ||
887 | * SAL_IROUTER_INTR_XMIT or SAL_IROUTER_INTR_RECV). | ||
888 | */ | ||
889 | static inline int | ||
890 | ia64_sn_irtr_intr_disable(nasid_t nasid, int subch, u64 intr) | ||
891 | { | ||
892 | struct ia64_sal_retval rv; | ||
893 | SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_OFF, | ||
894 | (u64) nasid, (u64) subch, intr, 0, 0, 0); | ||
895 | return (int) rv.v0; | ||
896 | } | ||
897 | |||
898 | /* | ||
899 | * Set up a node as the point of contact for system controller | ||
900 | * environmental event delivery. | ||
901 | */ | ||
902 | static inline int | ||
903 | ia64_sn_sysctl_event_init(nasid_t nasid) | ||
904 | { | ||
905 | struct ia64_sal_retval rv; | ||
906 | SAL_CALL_REENTRANT(rv, SN_SAL_SYSCTL_EVENT, (u64) nasid, | ||
907 | 0, 0, 0, 0, 0, 0); | ||
908 | return (int) rv.v0; | ||
909 | } | ||
910 | |||
911 | /* | ||
912 | * Ask the system controller on the specified nasid to reset | ||
913 | * the CX corelet clock. Only valid on TIO nodes. | ||
914 | */ | ||
915 | static inline int | ||
916 | ia64_sn_sysctl_tio_clock_reset(nasid_t nasid) | ||
917 | { | ||
918 | struct ia64_sal_retval rv; | ||
919 | SAL_CALL_REENTRANT(rv, SN_SAL_SYSCTL_OP, SAL_SYSCTL_OP_TIO_JLCK_RST, | ||
920 | nasid, 0, 0, 0, 0, 0); | ||
921 | if (rv.status != 0) | ||
922 | return (int)rv.status; | ||
923 | if (rv.v0 != 0) | ||
924 | return (int)rv.v0; | ||
925 | |||
926 | return 0; | ||
927 | } | ||
928 | |||
929 | /* | ||
930 | * Get the associated ioboard type for a given nasid. | ||
931 | */ | ||
932 | static inline long | ||
933 | ia64_sn_sysctl_ioboard_get(nasid_t nasid, u16 *ioboard) | ||
934 | { | ||
935 | struct ia64_sal_retval isrv; | ||
936 | SAL_CALL_REENTRANT(isrv, SN_SAL_SYSCTL_OP, SAL_SYSCTL_OP_IOBOARD, | ||
937 | nasid, 0, 0, 0, 0, 0); | ||
938 | if (isrv.v0 != 0) { | ||
939 | *ioboard = isrv.v0; | ||
940 | return isrv.status; | ||
941 | } | ||
942 | if (isrv.v1 != 0) { | ||
943 | *ioboard = isrv.v1; | ||
944 | return isrv.status; | ||
945 | } | ||
946 | |||
947 | return isrv.status; | ||
948 | } | ||
949 | |||
950 | /** | ||
951 | * ia64_sn_get_fit_compt - read a FIT entry from the PROM header | ||
952 | * @nasid: NASID of node to read | ||
953 | * @index: FIT entry index to be retrieved (0..n) | ||
954 | * @fitentry: 16 byte buffer where FIT entry will be stored. | ||
955 | * @banbuf: optional buffer for retrieving banner | ||
956 | * @banlen: length of banner buffer | ||
957 | * | ||
958 | * Access to the physical PROM chips needs to be serialized since reads and | ||
959 | * writes can't occur at the same time, so we need to call into the SAL when | ||
960 | * we want to look at the FIT entries on the chips. | ||
961 | * | ||
962 | * Returns: | ||
963 | * %SALRET_OK if ok | ||
964 | * %SALRET_INVALID_ARG if index too big | ||
965 | * %SALRET_NOT_IMPLEMENTED if running on older PROM | ||
966 | * ??? if nasid invalid OR banner buffer not large enough | ||
967 | */ | ||
968 | static inline int | ||
969 | ia64_sn_get_fit_compt(u64 nasid, u64 index, void *fitentry, void *banbuf, | ||
970 | u64 banlen) | ||
971 | { | ||
972 | struct ia64_sal_retval rv; | ||
973 | SAL_CALL_NOLOCK(rv, SN_SAL_GET_FIT_COMPT, nasid, index, fitentry, | ||
974 | banbuf, banlen, 0, 0); | ||
975 | return (int) rv.status; | ||
976 | } | ||
977 | |||
978 | /* | ||
979 | * Initialize the SAL components of the system controller | ||
980 | * communication driver; specifically pass in a sizable buffer that | ||
981 | * can be used for allocation of subchannel queues as new subchannels | ||
982 | * are opened. "buf" points to the buffer, and "len" specifies its | ||
983 | * length. | ||
984 | */ | ||
985 | static inline int | ||
986 | ia64_sn_irtr_init(nasid_t nasid, void *buf, int len) | ||
987 | { | ||
988 | struct ia64_sal_retval rv; | ||
989 | SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INIT, | ||
990 | (u64) nasid, (u64) buf, (u64) len, 0, 0, 0); | ||
991 | return (int) rv.status; | ||
992 | } | ||
993 | |||
994 | /* | ||
995 | * Returns the nasid, subnode & slice corresponding to a SAPIC ID | ||
996 | * | ||
997 | * In: | ||
998 | * arg0 - SN_SAL_GET_SAPIC_INFO | ||
999 | * arg1 - sapicid (lid >> 16) | ||
1000 | * Out: | ||
1001 | * v0 - nasid | ||
1002 | * v1 - subnode | ||
1003 | * v2 - slice | ||
1004 | */ | ||
1005 | static inline u64 | ||
1006 | ia64_sn_get_sapic_info(int sapicid, int *nasid, int *subnode, int *slice) | ||
1007 | { | ||
1008 | struct ia64_sal_retval ret_stuff; | ||
1009 | |||
1010 | ret_stuff.status = 0; | ||
1011 | ret_stuff.v0 = 0; | ||
1012 | ret_stuff.v1 = 0; | ||
1013 | ret_stuff.v2 = 0; | ||
1014 | SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SAPIC_INFO, sapicid, 0, 0, 0, 0, 0, 0); | ||
1015 | |||
1016 | /***** BEGIN HACK - temp til old proms no longer supported ********/ | ||
1017 | if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) { | ||
1018 | if (nasid) *nasid = sapicid & 0xfff; | ||
1019 | if (subnode) *subnode = (sapicid >> 13) & 1; | ||
1020 | if (slice) *slice = (sapicid >> 12) & 3; | ||
1021 | return 0; | ||
1022 | } | ||
1023 | /***** END HACK *******/ | ||
1024 | |||
1025 | if (ret_stuff.status < 0) | ||
1026 | return ret_stuff.status; | ||
1027 | |||
1028 | if (nasid) *nasid = (int) ret_stuff.v0; | ||
1029 | if (subnode) *subnode = (int) ret_stuff.v1; | ||
1030 | if (slice) *slice = (int) ret_stuff.v2; | ||
1031 | return 0; | ||
1032 | } | ||
1033 | |||
1034 | /* | ||
1035 | * Returns information about the HUB/SHUB. | ||
1036 | * In: | ||
1037 | * arg0 - SN_SAL_GET_SN_INFO | ||
1038 | * arg1 - 0 (other values reserved for future use) | ||
1039 | * Out: | ||
1040 | * v0 | ||
1041 | * [7:0] - shub type (0=shub1, 1=shub2) | ||
1042 | * [15:8] - Log2 max number of nodes in entire system (includes | ||
1043 | * C-bricks, I-bricks, etc) | ||
1044 | * [23:16] - Log2 of nodes per sharing domain | ||
1045 | * [31:24] - partition ID | ||
1046 | * [39:32] - coherency_id | ||
1047 | * [47:40] - regionsize | ||
1048 | * v1 | ||
1049 | * [15:0] - nasid mask (ex., 0x7ff for 11 bit nasid) | ||
1050 | * [23:15] - bit position of low nasid bit | ||
1051 | */ | ||
1052 | static inline u64 | ||
1053 | ia64_sn_get_sn_info(int fc, u8 *shubtype, u16 *nasid_bitmask, u8 *nasid_shift, | ||
1054 | u8 *systemsize, u8 *sharing_domain_size, u8 *partid, u8 *coher, u8 *reg) | ||
1055 | { | ||
1056 | struct ia64_sal_retval ret_stuff; | ||
1057 | |||
1058 | ret_stuff.status = 0; | ||
1059 | ret_stuff.v0 = 0; | ||
1060 | ret_stuff.v1 = 0; | ||
1061 | ret_stuff.v2 = 0; | ||
1062 | SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SN_INFO, fc, 0, 0, 0, 0, 0, 0); | ||
1063 | |||
1064 | /***** BEGIN HACK - temp til old proms no longer supported ********/ | ||
1065 | if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) { | ||
1066 | int nasid = get_sapicid() & 0xfff; | ||
1067 | #define SH_SHUB_ID_NODES_PER_BIT_MASK 0x001f000000000000UL | ||
1068 | #define SH_SHUB_ID_NODES_PER_BIT_SHFT 48 | ||
1069 | if (shubtype) *shubtype = 0; | ||
1070 | if (nasid_bitmask) *nasid_bitmask = 0x7ff; | ||
1071 | if (nasid_shift) *nasid_shift = 38; | ||
1072 | if (systemsize) *systemsize = 10; | ||
1073 | if (sharing_domain_size) *sharing_domain_size = 8; | ||
1074 | if (partid) *partid = ia64_sn_sysctl_partition_get(nasid); | ||
1075 | if (coher) *coher = nasid >> 9; | ||
1076 | if (reg) *reg = (HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_SHUB_ID)) & SH_SHUB_ID_NODES_PER_BIT_MASK) >> | ||
1077 | SH_SHUB_ID_NODES_PER_BIT_SHFT; | ||
1078 | return 0; | ||
1079 | } | ||
1080 | /***** END HACK *******/ | ||
1081 | |||
1082 | if (ret_stuff.status < 0) | ||
1083 | return ret_stuff.status; | ||
1084 | |||
1085 | if (shubtype) *shubtype = ret_stuff.v0 & 0xff; | ||
1086 | if (systemsize) *systemsize = (ret_stuff.v0 >> 8) & 0xff; | ||
1087 | if (sharing_domain_size) *sharing_domain_size = (ret_stuff.v0 >> 16) & 0xff; | ||
1088 | if (partid) *partid = (ret_stuff.v0 >> 24) & 0xff; | ||
1089 | if (coher) *coher = (ret_stuff.v0 >> 32) & 0xff; | ||
1090 | if (reg) *reg = (ret_stuff.v0 >> 40) & 0xff; | ||
1091 | if (nasid_bitmask) *nasid_bitmask = (ret_stuff.v1 & 0xffff); | ||
1092 | if (nasid_shift) *nasid_shift = (ret_stuff.v1 >> 16) & 0xff; | ||
1093 | return 0; | ||
1094 | } | ||
1095 | |||
1096 | /* | ||
1097 | * This is the access point to the Altix PROM hardware performance | ||
1098 | * and status monitoring interface. For info on using this, see | ||
1099 | * arch/ia64/include/asm/sn/sn2/sn_hwperf.h | ||
1100 | */ | ||
1101 | static inline int | ||
1102 | ia64_sn_hwperf_op(nasid_t nasid, u64 opcode, u64 a0, u64 a1, u64 a2, | ||
1103 | u64 a3, u64 a4, int *v0) | ||
1104 | { | ||
1105 | struct ia64_sal_retval rv; | ||
1106 | SAL_CALL_NOLOCK(rv, SN_SAL_HWPERF_OP, (u64)nasid, | ||
1107 | opcode, a0, a1, a2, a3, a4); | ||
1108 | if (v0) | ||
1109 | *v0 = (int) rv.v0; | ||
1110 | return (int) rv.status; | ||
1111 | } | ||
1112 | |||
1113 | static inline int | ||
1114 | ia64_sn_ioif_get_pci_topology(u64 buf, u64 len) | ||
1115 | { | ||
1116 | struct ia64_sal_retval rv; | ||
1117 | SAL_CALL_NOLOCK(rv, SN_SAL_IOIF_GET_PCI_TOPOLOGY, buf, len, 0, 0, 0, 0, 0); | ||
1118 | return (int) rv.status; | ||
1119 | } | ||
1120 | |||
1121 | /* | ||
1122 | * BTE error recovery is implemented in SAL | ||
1123 | */ | ||
1124 | static inline int | ||
1125 | ia64_sn_bte_recovery(nasid_t nasid) | ||
1126 | { | ||
1127 | struct ia64_sal_retval rv; | ||
1128 | |||
1129 | rv.status = 0; | ||
1130 | SAL_CALL_NOLOCK(rv, SN_SAL_BTE_RECOVER, (u64)nasid, 0, 0, 0, 0, 0, 0); | ||
1131 | if (rv.status == SALRET_NOT_IMPLEMENTED) | ||
1132 | return 0; | ||
1133 | return (int) rv.status; | ||
1134 | } | ||
1135 | |||
1136 | static inline int | 84 | static inline int |
1137 | ia64_sn_is_fake_prom(void) | 85 | ia64_sn_is_fake_prom(void) |
1138 | { | 86 | { |
@@ -1141,53 +89,6 @@ ia64_sn_is_fake_prom(void) | |||
1141 | return (rv.status == 0); | 89 | return (rv.status == 0); |
1142 | } | 90 | } |
1143 | 91 | ||
1144 | static inline int | ||
1145 | ia64_sn_get_prom_feature_set(int set, unsigned long *feature_set) | ||
1146 | { | ||
1147 | struct ia64_sal_retval rv; | ||
1148 | |||
1149 | SAL_CALL_NOLOCK(rv, SN_SAL_GET_PROM_FEATURE_SET, set, 0, 0, 0, 0, 0, 0); | ||
1150 | if (rv.status != 0) | ||
1151 | return rv.status; | ||
1152 | *feature_set = rv.v0; | ||
1153 | return 0; | ||
1154 | } | ||
1155 | |||
1156 | static inline int | ||
1157 | ia64_sn_set_os_feature(int feature) | ||
1158 | { | ||
1159 | struct ia64_sal_retval rv; | ||
1160 | |||
1161 | SAL_CALL_NOLOCK(rv, SN_SAL_SET_OS_FEATURE_SET, feature, 0, 0, 0, 0, 0, 0); | ||
1162 | return rv.status; | ||
1163 | } | ||
1164 | |||
1165 | static inline int | ||
1166 | sn_inject_error(u64 paddr, u64 *data, u64 *ecc) | ||
1167 | { | ||
1168 | struct ia64_sal_retval ret_stuff; | ||
1169 | |||
1170 | ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_INJECT_ERROR, paddr, (u64)data, | ||
1171 | (u64)ecc, 0, 0, 0, 0); | ||
1172 | return ret_stuff.status; | ||
1173 | } | ||
1174 | |||
1175 | static inline int | ||
1176 | ia64_sn_set_cpu_number(int cpu) | ||
1177 | { | ||
1178 | struct ia64_sal_retval rv; | ||
1179 | |||
1180 | SAL_CALL_NOLOCK(rv, SN_SAL_SET_CPU_NUMBER, cpu, 0, 0, 0, 0, 0, 0); | ||
1181 | return rv.status; | ||
1182 | } | ||
1183 | static inline int | ||
1184 | ia64_sn_kernel_launch_event(void) | ||
1185 | { | ||
1186 | struct ia64_sal_retval rv; | ||
1187 | SAL_CALL_NOLOCK(rv, SN_SAL_KERNEL_LAUNCH_EVENT, 0, 0, 0, 0, 0, 0, 0); | ||
1188 | return rv.status; | ||
1189 | } | ||
1190 | |||
1191 | union sn_watchlist_u { | 92 | union sn_watchlist_u { |
1192 | u64 val; | 93 | u64 val; |
1193 | struct { | 94 | struct { |
diff --git a/arch/ia64/include/asm/sn/tioca.h b/arch/ia64/include/asm/sn/tioca.h deleted file mode 100644 index 666222d7f0f6..000000000000 --- a/arch/ia64/include/asm/sn/tioca.h +++ /dev/null | |||
@@ -1,596 +0,0 @@ | |||
1 | #ifndef _ASM_IA64_SN_TIO_TIOCA_H | ||
2 | #define _ASM_IA64_SN_TIO_TIOCA_H | ||
3 | |||
4 | /* | ||
5 | * This file is subject to the terms and conditions of the GNU General Public | ||
6 | * License. See the file "COPYING" in the main directory of this archive | ||
7 | * for more details. | ||
8 | * | ||
9 | * Copyright (c) 2003-2005 Silicon Graphics, Inc. All rights reserved. | ||
10 | */ | ||
11 | |||
12 | |||
13 | #define TIOCA_PART_NUM 0xE020 | ||
14 | #define TIOCA_MFGR_NUM 0x24 | ||
15 | #define TIOCA_REV_A 0x1 | ||
16 | |||
17 | /* | ||
18 | * Register layout for TIO:CA. See below for bitmasks for each register. | ||
19 | */ | ||
20 | |||
21 | struct tioca { | ||
22 | u64 ca_id; /* 0x000000 */ | ||
23 | u64 ca_control1; /* 0x000008 */ | ||
24 | u64 ca_control2; /* 0x000010 */ | ||
25 | u64 ca_status1; /* 0x000018 */ | ||
26 | u64 ca_status2; /* 0x000020 */ | ||
27 | u64 ca_gart_aperature; /* 0x000028 */ | ||
28 | u64 ca_gfx_detach; /* 0x000030 */ | ||
29 | u64 ca_inta_dest_addr; /* 0x000038 */ | ||
30 | u64 ca_intb_dest_addr; /* 0x000040 */ | ||
31 | u64 ca_err_int_dest_addr; /* 0x000048 */ | ||
32 | u64 ca_int_status; /* 0x000050 */ | ||
33 | u64 ca_int_status_alias; /* 0x000058 */ | ||
34 | u64 ca_mult_error; /* 0x000060 */ | ||
35 | u64 ca_mult_error_alias; /* 0x000068 */ | ||
36 | u64 ca_first_error; /* 0x000070 */ | ||
37 | u64 ca_int_mask; /* 0x000078 */ | ||
38 | u64 ca_crm_pkterr_type; /* 0x000080 */ | ||
39 | u64 ca_crm_pkterr_type_alias; /* 0x000088 */ | ||
40 | u64 ca_crm_ct_error_detail_1; /* 0x000090 */ | ||
41 | u64 ca_crm_ct_error_detail_2; /* 0x000098 */ | ||
42 | u64 ca_crm_tnumto; /* 0x0000A0 */ | ||
43 | u64 ca_gart_err; /* 0x0000A8 */ | ||
44 | u64 ca_pcierr_type; /* 0x0000B0 */ | ||
45 | u64 ca_pcierr_addr; /* 0x0000B8 */ | ||
46 | |||
47 | u64 ca_pad_0000C0[3]; /* 0x0000{C0..D0} */ | ||
48 | |||
49 | u64 ca_pci_rd_buf_flush; /* 0x0000D8 */ | ||
50 | u64 ca_pci_dma_addr_extn; /* 0x0000E0 */ | ||
51 | u64 ca_agp_dma_addr_extn; /* 0x0000E8 */ | ||
52 | u64 ca_force_inta; /* 0x0000F0 */ | ||
53 | u64 ca_force_intb; /* 0x0000F8 */ | ||
54 | u64 ca_debug_vector_sel; /* 0x000100 */ | ||
55 | u64 ca_debug_mux_core_sel; /* 0x000108 */ | ||
56 | u64 ca_debug_mux_pci_sel; /* 0x000110 */ | ||
57 | u64 ca_debug_domain_sel; /* 0x000118 */ | ||
58 | |||
59 | u64 ca_pad_000120[28]; /* 0x0001{20..F8} */ | ||
60 | |||
61 | u64 ca_gart_ptr_table; /* 0x200 */ | ||
62 | u64 ca_gart_tlb_addr[8]; /* 0x2{08..40} */ | ||
63 | }; | ||
64 | |||
65 | /* | ||
66 | * Mask/shift definitions for TIO:CA registers. The convention here is | ||
67 | * to mainly use the names as they appear in the "TIO AEGIS Programmers' | ||
68 | * Reference" with a CA_ prefix added. Some exceptions were made to fix | ||
69 | * duplicate field names or to generalize fields that are common to | ||
70 | * different registers (ca_debug_mux_core_sel and ca_debug_mux_pci_sel for | ||
71 | * example). | ||
72 | * | ||
73 | * Fields consisting of a single bit have a single #define have a single | ||
74 | * macro declaration to mask the bit. Fields consisting of multiple bits | ||
75 | * have two declarations: one to mask the proper bits in a register, and | ||
76 | * a second with the suffix "_SHFT" to identify how far the mask needs to | ||
77 | * be shifted right to get its base value. | ||
78 | */ | ||
79 | |||
80 | /* ==== ca_control1 */ | ||
81 | #define CA_SYS_BIG_END (1ull << 0) | ||
82 | #define CA_DMA_AGP_SWAP (1ull << 1) | ||
83 | #define CA_DMA_PCI_SWAP (1ull << 2) | ||
84 | #define CA_PIO_IO_SWAP (1ull << 3) | ||
85 | #define CA_PIO_MEM_SWAP (1ull << 4) | ||
86 | #define CA_GFX_WR_SWAP (1ull << 5) | ||
87 | #define CA_AGP_FW_ENABLE (1ull << 6) | ||
88 | #define CA_AGP_CAL_CYCLE (0x7ull << 7) | ||
89 | #define CA_AGP_CAL_CYCLE_SHFT 7 | ||
90 | #define CA_AGP_CAL_PRSCL_BYP (1ull << 10) | ||
91 | #define CA_AGP_INIT_CAL_ENB (1ull << 11) | ||
92 | #define CA_INJ_ADDR_PERR (1ull << 12) | ||
93 | #define CA_INJ_DATA_PERR (1ull << 13) | ||
94 | /* bits 15:14 unused */ | ||
95 | #define CA_PCIM_IO_NBE_AD (0x7ull << 16) | ||
96 | #define CA_PCIM_IO_NBE_AD_SHFT 16 | ||
97 | #define CA_PCIM_FAST_BTB_ENB (1ull << 19) | ||
98 | /* bits 23:20 unused */ | ||
99 | #define CA_PIO_ADDR_OFFSET (0xffull << 24) | ||
100 | #define CA_PIO_ADDR_OFFSET_SHFT 24 | ||
101 | /* bits 35:32 unused */ | ||
102 | #define CA_AGPDMA_OP_COMBDELAY (0x1full << 36) | ||
103 | #define CA_AGPDMA_OP_COMBDELAY_SHFT 36 | ||
104 | /* bit 41 unused */ | ||
105 | #define CA_AGPDMA_OP_ENB_COMBDELAY (1ull << 42) | ||
106 | #define CA_PCI_INT_LPCNT (0xffull << 44) | ||
107 | #define CA_PCI_INT_LPCNT_SHFT 44 | ||
108 | /* bits 63:52 unused */ | ||
109 | |||
110 | /* ==== ca_control2 */ | ||
111 | #define CA_AGP_LATENCY_TO (0xffull << 0) | ||
112 | #define CA_AGP_LATENCY_TO_SHFT 0 | ||
113 | #define CA_PCI_LATENCY_TO (0xffull << 8) | ||
114 | #define CA_PCI_LATENCY_TO_SHFT 8 | ||
115 | #define CA_PCI_MAX_RETRY (0x3ffull << 16) | ||
116 | #define CA_PCI_MAX_RETRY_SHFT 16 | ||
117 | /* bits 27:26 unused */ | ||
118 | #define CA_RT_INT_EN (0x3ull << 28) | ||
119 | #define CA_RT_INT_EN_SHFT 28 | ||
120 | #define CA_MSI_INT_ENB (1ull << 30) | ||
121 | #define CA_PCI_ARB_ERR_ENB (1ull << 31) | ||
122 | #define CA_GART_MEM_PARAM (0x3ull << 32) | ||
123 | #define CA_GART_MEM_PARAM_SHFT 32 | ||
124 | #define CA_GART_RD_PREFETCH_ENB (1ull << 34) | ||
125 | #define CA_GART_WR_PREFETCH_ENB (1ull << 35) | ||
126 | #define CA_GART_FLUSH_TLB (1ull << 36) | ||
127 | /* bits 39:37 unused */ | ||
128 | #define CA_CRM_TNUMTO_PERIOD (0x1fffull << 40) | ||
129 | #define CA_CRM_TNUMTO_PERIOD_SHFT 40 | ||
130 | /* bits 55:53 unused */ | ||
131 | #define CA_CRM_TNUMTO_ENB (1ull << 56) | ||
132 | #define CA_CRM_PRESCALER_BYP (1ull << 57) | ||
133 | /* bits 59:58 unused */ | ||
134 | #define CA_CRM_MAX_CREDIT (0x7ull << 60) | ||
135 | #define CA_CRM_MAX_CREDIT_SHFT 60 | ||
136 | /* bit 63 unused */ | ||
137 | |||
138 | /* ==== ca_status1 */ | ||
139 | #define CA_CORELET_ID (0x3ull << 0) | ||
140 | #define CA_CORELET_ID_SHFT 0 | ||
141 | #define CA_INTA_N (1ull << 2) | ||
142 | #define CA_INTB_N (1ull << 3) | ||
143 | #define CA_CRM_CREDIT_AVAIL (0x7ull << 4) | ||
144 | #define CA_CRM_CREDIT_AVAIL_SHFT 4 | ||
145 | /* bit 7 unused */ | ||
146 | #define CA_CRM_SPACE_AVAIL (0x7full << 8) | ||
147 | #define CA_CRM_SPACE_AVAIL_SHFT 8 | ||
148 | /* bit 15 unused */ | ||
149 | #define CA_GART_TLB_VAL (0xffull << 16) | ||
150 | #define CA_GART_TLB_VAL_SHFT 16 | ||
151 | /* bits 63:24 unused */ | ||
152 | |||
153 | /* ==== ca_status2 */ | ||
154 | #define CA_GFX_CREDIT_AVAIL (0xffull << 0) | ||
155 | #define CA_GFX_CREDIT_AVAIL_SHFT 0 | ||
156 | #define CA_GFX_OPQ_AVAIL (0xffull << 8) | ||
157 | #define CA_GFX_OPQ_AVAIL_SHFT 8 | ||
158 | #define CA_GFX_WRBUFF_AVAIL (0xffull << 16) | ||
159 | #define CA_GFX_WRBUFF_AVAIL_SHFT 16 | ||
160 | #define CA_ADMA_OPQ_AVAIL (0xffull << 24) | ||
161 | #define CA_ADMA_OPQ_AVAIL_SHFT 24 | ||
162 | #define CA_ADMA_WRBUFF_AVAIL (0xffull << 32) | ||
163 | #define CA_ADMA_WRBUFF_AVAIL_SHFT 32 | ||
164 | #define CA_ADMA_RDBUFF_AVAIL (0x7full << 40) | ||
165 | #define CA_ADMA_RDBUFF_AVAIL_SHFT 40 | ||
166 | #define CA_PCI_PIO_OP_STAT (1ull << 47) | ||
167 | #define CA_PDMA_OPQ_AVAIL (0xfull << 48) | ||
168 | #define CA_PDMA_OPQ_AVAIL_SHFT 48 | ||
169 | #define CA_PDMA_WRBUFF_AVAIL (0xfull << 52) | ||
170 | #define CA_PDMA_WRBUFF_AVAIL_SHFT 52 | ||
171 | #define CA_PDMA_RDBUFF_AVAIL (0x3ull << 56) | ||
172 | #define CA_PDMA_RDBUFF_AVAIL_SHFT 56 | ||
173 | /* bits 63:58 unused */ | ||
174 | |||
175 | /* ==== ca_gart_aperature */ | ||
176 | #define CA_GART_AP_ENB_AGP (1ull << 0) | ||
177 | #define CA_GART_PAGE_SIZE (1ull << 1) | ||
178 | #define CA_GART_AP_ENB_PCI (1ull << 2) | ||
179 | /* bits 11:3 unused */ | ||
180 | #define CA_GART_AP_SIZE (0x3ffull << 12) | ||
181 | #define CA_GART_AP_SIZE_SHFT 12 | ||
182 | #define CA_GART_AP_BASE (0x3ffffffffffull << 22) | ||
183 | #define CA_GART_AP_BASE_SHFT 22 | ||
184 | |||
185 | /* ==== ca_inta_dest_addr | ||
186 | ==== ca_intb_dest_addr | ||
187 | ==== ca_err_int_dest_addr */ | ||
188 | /* bits 2:0 unused */ | ||
189 | #define CA_INT_DEST_ADDR (0x7ffffffffffffull << 3) | ||
190 | #define CA_INT_DEST_ADDR_SHFT 3 | ||
191 | /* bits 55:54 unused */ | ||
192 | #define CA_INT_DEST_VECT (0xffull << 56) | ||
193 | #define CA_INT_DEST_VECT_SHFT 56 | ||
194 | |||
195 | /* ==== ca_int_status */ | ||
196 | /* ==== ca_int_status_alias */ | ||
197 | /* ==== ca_mult_error */ | ||
198 | /* ==== ca_mult_error_alias */ | ||
199 | /* ==== ca_first_error */ | ||
200 | /* ==== ca_int_mask */ | ||
201 | #define CA_PCI_ERR (1ull << 0) | ||
202 | /* bits 3:1 unused */ | ||
203 | #define CA_GART_FETCH_ERR (1ull << 4) | ||
204 | #define CA_GFX_WR_OVFLW (1ull << 5) | ||
205 | #define CA_PIO_REQ_OVFLW (1ull << 6) | ||
206 | #define CA_CRM_PKTERR (1ull << 7) | ||
207 | #define CA_CRM_DVERR (1ull << 8) | ||
208 | #define CA_TNUMTO (1ull << 9) | ||
209 | #define CA_CXM_RSP_CRED_OVFLW (1ull << 10) | ||
210 | #define CA_CXM_REQ_CRED_OVFLW (1ull << 11) | ||
211 | #define CA_PIO_INVALID_ADDR (1ull << 12) | ||
212 | #define CA_PCI_ARB_TO (1ull << 13) | ||
213 | #define CA_AGP_REQ_OFLOW (1ull << 14) | ||
214 | #define CA_SBA_TYPE1_ERR (1ull << 15) | ||
215 | /* bit 16 unused */ | ||
216 | #define CA_INTA (1ull << 17) | ||
217 | #define CA_INTB (1ull << 18) | ||
218 | #define CA_MULT_INTA (1ull << 19) | ||
219 | #define CA_MULT_INTB (1ull << 20) | ||
220 | #define CA_GFX_CREDIT_OVFLW (1ull << 21) | ||
221 | /* bits 63:22 unused */ | ||
222 | |||
223 | /* ==== ca_crm_pkterr_type */ | ||
224 | /* ==== ca_crm_pkterr_type_alias */ | ||
225 | #define CA_CRM_PKTERR_SBERR_HDR (1ull << 0) | ||
226 | #define CA_CRM_PKTERR_DIDN (1ull << 1) | ||
227 | #define CA_CRM_PKTERR_PACTYPE (1ull << 2) | ||
228 | #define CA_CRM_PKTERR_INV_TNUM (1ull << 3) | ||
229 | #define CA_CRM_PKTERR_ADDR_RNG (1ull << 4) | ||
230 | #define CA_CRM_PKTERR_ADDR_ALGN (1ull << 5) | ||
231 | #define CA_CRM_PKTERR_HDR_PARAM (1ull << 6) | ||
232 | #define CA_CRM_PKTERR_CW_ERR (1ull << 7) | ||
233 | #define CA_CRM_PKTERR_SBERR_NH (1ull << 8) | ||
234 | #define CA_CRM_PKTERR_EARLY_TERM (1ull << 9) | ||
235 | #define CA_CRM_PKTERR_EARLY_TAIL (1ull << 10) | ||
236 | #define CA_CRM_PKTERR_MSSNG_TAIL (1ull << 11) | ||
237 | #define CA_CRM_PKTERR_MSSNG_HDR (1ull << 12) | ||
238 | /* bits 15:13 unused */ | ||
239 | #define CA_FIRST_CRM_PKTERR_SBERR_HDR (1ull << 16) | ||
240 | #define CA_FIRST_CRM_PKTERR_DIDN (1ull << 17) | ||
241 | #define CA_FIRST_CRM_PKTERR_PACTYPE (1ull << 18) | ||
242 | #define CA_FIRST_CRM_PKTERR_INV_TNUM (1ull << 19) | ||
243 | #define CA_FIRST_CRM_PKTERR_ADDR_RNG (1ull << 20) | ||
244 | #define CA_FIRST_CRM_PKTERR_ADDR_ALGN (1ull << 21) | ||
245 | #define CA_FIRST_CRM_PKTERR_HDR_PARAM (1ull << 22) | ||
246 | #define CA_FIRST_CRM_PKTERR_CW_ERR (1ull << 23) | ||
247 | #define CA_FIRST_CRM_PKTERR_SBERR_NH (1ull << 24) | ||
248 | #define CA_FIRST_CRM_PKTERR_EARLY_TERM (1ull << 25) | ||
249 | #define CA_FIRST_CRM_PKTERR_EARLY_TAIL (1ull << 26) | ||
250 | #define CA_FIRST_CRM_PKTERR_MSSNG_TAIL (1ull << 27) | ||
251 | #define CA_FIRST_CRM_PKTERR_MSSNG_HDR (1ull << 28) | ||
252 | /* bits 63:29 unused */ | ||
253 | |||
254 | /* ==== ca_crm_ct_error_detail_1 */ | ||
255 | #define CA_PKT_TYPE (0xfull << 0) | ||
256 | #define CA_PKT_TYPE_SHFT 0 | ||
257 | #define CA_SRC_ID (0x3ull << 4) | ||
258 | #define CA_SRC_ID_SHFT 4 | ||
259 | #define CA_DATA_SZ (0x3ull << 6) | ||
260 | #define CA_DATA_SZ_SHFT 6 | ||
261 | #define CA_TNUM (0xffull << 8) | ||
262 | #define CA_TNUM_SHFT 8 | ||
263 | #define CA_DW_DATA_EN (0xffull << 16) | ||
264 | #define CA_DW_DATA_EN_SHFT 16 | ||
265 | #define CA_GFX_CRED (0xffull << 24) | ||
266 | #define CA_GFX_CRED_SHFT 24 | ||
267 | #define CA_MEM_RD_PARAM (0x3ull << 32) | ||
268 | #define CA_MEM_RD_PARAM_SHFT 32 | ||
269 | #define CA_PIO_OP (1ull << 34) | ||
270 | #define CA_CW_ERR (1ull << 35) | ||
271 | /* bits 62:36 unused */ | ||
272 | #define CA_VALID (1ull << 63) | ||
273 | |||
274 | /* ==== ca_crm_ct_error_detail_2 */ | ||
275 | /* bits 2:0 unused */ | ||
276 | #define CA_PKT_ADDR (0x1fffffffffffffull << 3) | ||
277 | #define CA_PKT_ADDR_SHFT 3 | ||
278 | /* bits 63:56 unused */ | ||
279 | |||
280 | /* ==== ca_crm_tnumto */ | ||
281 | #define CA_CRM_TNUMTO_VAL (0xffull << 0) | ||
282 | #define CA_CRM_TNUMTO_VAL_SHFT 0 | ||
283 | #define CA_CRM_TNUMTO_WR (1ull << 8) | ||
284 | /* bits 63:9 unused */ | ||
285 | |||
286 | /* ==== ca_gart_err */ | ||
287 | #define CA_GART_ERR_SOURCE (0x3ull << 0) | ||
288 | #define CA_GART_ERR_SOURCE_SHFT 0 | ||
289 | /* bits 3:2 unused */ | ||
290 | #define CA_GART_ERR_ADDR (0xfffffffffull << 4) | ||
291 | #define CA_GART_ERR_ADDR_SHFT 4 | ||
292 | /* bits 63:40 unused */ | ||
293 | |||
294 | /* ==== ca_pcierr_type */ | ||
295 | #define CA_PCIERR_DATA (0xffffffffull << 0) | ||
296 | #define CA_PCIERR_DATA_SHFT 0 | ||
297 | #define CA_PCIERR_ENB (0xfull << 32) | ||
298 | #define CA_PCIERR_ENB_SHFT 32 | ||
299 | #define CA_PCIERR_CMD (0xfull << 36) | ||
300 | #define CA_PCIERR_CMD_SHFT 36 | ||
301 | #define CA_PCIERR_A64 (1ull << 40) | ||
302 | #define CA_PCIERR_SLV_SERR (1ull << 41) | ||
303 | #define CA_PCIERR_SLV_WR_PERR (1ull << 42) | ||
304 | #define CA_PCIERR_SLV_RD_PERR (1ull << 43) | ||
305 | #define CA_PCIERR_MST_SERR (1ull << 44) | ||
306 | #define CA_PCIERR_MST_WR_PERR (1ull << 45) | ||
307 | #define CA_PCIERR_MST_RD_PERR (1ull << 46) | ||
308 | #define CA_PCIERR_MST_MABT (1ull << 47) | ||
309 | #define CA_PCIERR_MST_TABT (1ull << 48) | ||
310 | #define CA_PCIERR_MST_RETRY_TOUT (1ull << 49) | ||
311 | |||
312 | #define CA_PCIERR_TYPES \ | ||
313 | (CA_PCIERR_A64|CA_PCIERR_SLV_SERR| \ | ||
314 | CA_PCIERR_SLV_WR_PERR|CA_PCIERR_SLV_RD_PERR| \ | ||
315 | CA_PCIERR_MST_SERR|CA_PCIERR_MST_WR_PERR|CA_PCIERR_MST_RD_PERR| \ | ||
316 | CA_PCIERR_MST_MABT|CA_PCIERR_MST_TABT|CA_PCIERR_MST_RETRY_TOUT) | ||
317 | |||
318 | /* bits 63:50 unused */ | ||
319 | |||
320 | /* ==== ca_pci_dma_addr_extn */ | ||
321 | #define CA_UPPER_NODE_OFFSET (0x3full << 0) | ||
322 | #define CA_UPPER_NODE_OFFSET_SHFT 0 | ||
323 | /* bits 7:6 unused */ | ||
324 | #define CA_CHIPLET_ID (0x3ull << 8) | ||
325 | #define CA_CHIPLET_ID_SHFT 8 | ||
326 | /* bits 11:10 unused */ | ||
327 | #define CA_PCI_DMA_NODE_ID (0xffffull << 12) | ||
328 | #define CA_PCI_DMA_NODE_ID_SHFT 12 | ||
329 | /* bits 27:26 unused */ | ||
330 | #define CA_PCI_DMA_PIO_MEM_TYPE (1ull << 28) | ||
331 | /* bits 63:29 unused */ | ||
332 | |||
333 | |||
334 | /* ==== ca_agp_dma_addr_extn */ | ||
335 | /* bits 19:0 unused */ | ||
336 | #define CA_AGP_DMA_NODE_ID (0xffffull << 20) | ||
337 | #define CA_AGP_DMA_NODE_ID_SHFT 20 | ||
338 | /* bits 27:26 unused */ | ||
339 | #define CA_AGP_DMA_PIO_MEM_TYPE (1ull << 28) | ||
340 | /* bits 63:29 unused */ | ||
341 | |||
342 | /* ==== ca_debug_vector_sel */ | ||
343 | #define CA_DEBUG_MN_VSEL (0xfull << 0) | ||
344 | #define CA_DEBUG_MN_VSEL_SHFT 0 | ||
345 | #define CA_DEBUG_PP_VSEL (0xfull << 4) | ||
346 | #define CA_DEBUG_PP_VSEL_SHFT 4 | ||
347 | #define CA_DEBUG_GW_VSEL (0xfull << 8) | ||
348 | #define CA_DEBUG_GW_VSEL_SHFT 8 | ||
349 | #define CA_DEBUG_GT_VSEL (0xfull << 12) | ||
350 | #define CA_DEBUG_GT_VSEL_SHFT 12 | ||
351 | #define CA_DEBUG_PD_VSEL (0xfull << 16) | ||
352 | #define CA_DEBUG_PD_VSEL_SHFT 16 | ||
353 | #define CA_DEBUG_AD_VSEL (0xfull << 20) | ||
354 | #define CA_DEBUG_AD_VSEL_SHFT 20 | ||
355 | #define CA_DEBUG_CX_VSEL (0xfull << 24) | ||
356 | #define CA_DEBUG_CX_VSEL_SHFT 24 | ||
357 | #define CA_DEBUG_CR_VSEL (0xfull << 28) | ||
358 | #define CA_DEBUG_CR_VSEL_SHFT 28 | ||
359 | #define CA_DEBUG_BA_VSEL (0xfull << 32) | ||
360 | #define CA_DEBUG_BA_VSEL_SHFT 32 | ||
361 | #define CA_DEBUG_PE_VSEL (0xfull << 36) | ||
362 | #define CA_DEBUG_PE_VSEL_SHFT 36 | ||
363 | #define CA_DEBUG_BO_VSEL (0xfull << 40) | ||
364 | #define CA_DEBUG_BO_VSEL_SHFT 40 | ||
365 | #define CA_DEBUG_BI_VSEL (0xfull << 44) | ||
366 | #define CA_DEBUG_BI_VSEL_SHFT 44 | ||
367 | #define CA_DEBUG_AS_VSEL (0xfull << 48) | ||
368 | #define CA_DEBUG_AS_VSEL_SHFT 48 | ||
369 | #define CA_DEBUG_PS_VSEL (0xfull << 52) | ||
370 | #define CA_DEBUG_PS_VSEL_SHFT 52 | ||
371 | #define CA_DEBUG_PM_VSEL (0xfull << 56) | ||
372 | #define CA_DEBUG_PM_VSEL_SHFT 56 | ||
373 | /* bits 63:60 unused */ | ||
374 | |||
375 | /* ==== ca_debug_mux_core_sel */ | ||
376 | /* ==== ca_debug_mux_pci_sel */ | ||
377 | #define CA_DEBUG_MSEL0 (0x7ull << 0) | ||
378 | #define CA_DEBUG_MSEL0_SHFT 0 | ||
379 | /* bit 3 unused */ | ||
380 | #define CA_DEBUG_NSEL0 (0x7ull << 4) | ||
381 | #define CA_DEBUG_NSEL0_SHFT 4 | ||
382 | /* bit 7 unused */ | ||
383 | #define CA_DEBUG_MSEL1 (0x7ull << 8) | ||
384 | #define CA_DEBUG_MSEL1_SHFT 8 | ||
385 | /* bit 11 unused */ | ||
386 | #define CA_DEBUG_NSEL1 (0x7ull << 12) | ||
387 | #define CA_DEBUG_NSEL1_SHFT 12 | ||
388 | /* bit 15 unused */ | ||
389 | #define CA_DEBUG_MSEL2 (0x7ull << 16) | ||
390 | #define CA_DEBUG_MSEL2_SHFT 16 | ||
391 | /* bit 19 unused */ | ||
392 | #define CA_DEBUG_NSEL2 (0x7ull << 20) | ||
393 | #define CA_DEBUG_NSEL2_SHFT 20 | ||
394 | /* bit 23 unused */ | ||
395 | #define CA_DEBUG_MSEL3 (0x7ull << 24) | ||
396 | #define CA_DEBUG_MSEL3_SHFT 24 | ||
397 | /* bit 27 unused */ | ||
398 | #define CA_DEBUG_NSEL3 (0x7ull << 28) | ||
399 | #define CA_DEBUG_NSEL3_SHFT 28 | ||
400 | /* bit 31 unused */ | ||
401 | #define CA_DEBUG_MSEL4 (0x7ull << 32) | ||
402 | #define CA_DEBUG_MSEL4_SHFT 32 | ||
403 | /* bit 35 unused */ | ||
404 | #define CA_DEBUG_NSEL4 (0x7ull << 36) | ||
405 | #define CA_DEBUG_NSEL4_SHFT 36 | ||
406 | /* bit 39 unused */ | ||
407 | #define CA_DEBUG_MSEL5 (0x7ull << 40) | ||
408 | #define CA_DEBUG_MSEL5_SHFT 40 | ||
409 | /* bit 43 unused */ | ||
410 | #define CA_DEBUG_NSEL5 (0x7ull << 44) | ||
411 | #define CA_DEBUG_NSEL5_SHFT 44 | ||
412 | /* bit 47 unused */ | ||
413 | #define CA_DEBUG_MSEL6 (0x7ull << 48) | ||
414 | #define CA_DEBUG_MSEL6_SHFT 48 | ||
415 | /* bit 51 unused */ | ||
416 | #define CA_DEBUG_NSEL6 (0x7ull << 52) | ||
417 | #define CA_DEBUG_NSEL6_SHFT 52 | ||
418 | /* bit 55 unused */ | ||
419 | #define CA_DEBUG_MSEL7 (0x7ull << 56) | ||
420 | #define CA_DEBUG_MSEL7_SHFT 56 | ||
421 | /* bit 59 unused */ | ||
422 | #define CA_DEBUG_NSEL7 (0x7ull << 60) | ||
423 | #define CA_DEBUG_NSEL7_SHFT 60 | ||
424 | /* bit 63 unused */ | ||
425 | |||
426 | |||
427 | /* ==== ca_debug_domain_sel */ | ||
428 | #define CA_DEBUG_DOMAIN_L (1ull << 0) | ||
429 | #define CA_DEBUG_DOMAIN_H (1ull << 1) | ||
430 | /* bits 63:2 unused */ | ||
431 | |||
432 | /* ==== ca_gart_ptr_table */ | ||
433 | #define CA_GART_PTR_VAL (1ull << 0) | ||
434 | /* bits 11:1 unused */ | ||
435 | #define CA_GART_PTR_ADDR (0xfffffffffffull << 12) | ||
436 | #define CA_GART_PTR_ADDR_SHFT 12 | ||
437 | /* bits 63:56 unused */ | ||
438 | |||
439 | /* ==== ca_gart_tlb_addr[0-7] */ | ||
440 | #define CA_GART_TLB_ADDR (0xffffffffffffffull << 0) | ||
441 | #define CA_GART_TLB_ADDR_SHFT 0 | ||
442 | /* bits 62:56 unused */ | ||
443 | #define CA_GART_TLB_ENTRY_VAL (1ull << 63) | ||
444 | |||
445 | /* | ||
446 | * PIO address space ranges for TIO:CA | ||
447 | */ | ||
448 | |||
449 | /* CA internal registers */ | ||
450 | #define CA_PIO_ADMIN 0x00000000 | ||
451 | #define CA_PIO_ADMIN_LEN 0x00010000 | ||
452 | |||
453 | /* GFX Write Buffer - Diagnostics */ | ||
454 | #define CA_PIO_GFX 0x00010000 | ||
455 | #define CA_PIO_GFX_LEN 0x00010000 | ||
456 | |||
457 | /* AGP DMA Write Buffer - Diagnostics */ | ||
458 | #define CA_PIO_AGP_DMAWRITE 0x00020000 | ||
459 | #define CA_PIO_AGP_DMAWRITE_LEN 0x00010000 | ||
460 | |||
461 | /* AGP DMA READ Buffer - Diagnostics */ | ||
462 | #define CA_PIO_AGP_DMAREAD 0x00030000 | ||
463 | #define CA_PIO_AGP_DMAREAD_LEN 0x00010000 | ||
464 | |||
465 | /* PCI Config Type 0 */ | ||
466 | #define CA_PIO_PCI_TYPE0_CONFIG 0x01000000 | ||
467 | #define CA_PIO_PCI_TYPE0_CONFIG_LEN 0x01000000 | ||
468 | |||
469 | /* PCI Config Type 1 */ | ||
470 | #define CA_PIO_PCI_TYPE1_CONFIG 0x02000000 | ||
471 | #define CA_PIO_PCI_TYPE1_CONFIG_LEN 0x01000000 | ||
472 | |||
473 | /* PCI I/O Cycles - mapped to PCI Address 0x00000000-0x04ffffff */ | ||
474 | #define CA_PIO_PCI_IO 0x03000000 | ||
475 | #define CA_PIO_PCI_IO_LEN 0x05000000 | ||
476 | |||
477 | /* PCI MEM Cycles - mapped to PCI with CA_PIO_ADDR_OFFSET of ca_control1 */ | ||
478 | /* use Fast Write if enabled and coretalk packet type is a GFX request */ | ||
479 | #define CA_PIO_PCI_MEM_OFFSET 0x08000000 | ||
480 | #define CA_PIO_PCI_MEM_OFFSET_LEN 0x08000000 | ||
481 | |||
482 | /* PCI MEM Cycles - mapped to PCI Address 0x00000000-0xbfffffff */ | ||
483 | /* use Fast Write if enabled and coretalk packet type is a GFX request */ | ||
484 | #define CA_PIO_PCI_MEM 0x40000000 | ||
485 | #define CA_PIO_PCI_MEM_LEN 0xc0000000 | ||
486 | |||
487 | /* | ||
488 | * DMA space | ||
489 | * | ||
490 | * The CA aperature (ie. bus address range) mapped by the GART is segmented into | ||
491 | * two parts. The lower portion of the aperature is used for mapping 32 bit | ||
492 | * PCI addresses which are managed by the dma interfaces in this file. The | ||
493 | * upper poprtion of the aperature is used for mapping 48 bit AGP addresses. | ||
494 | * The AGP portion of the aperature is managed by the agpgart_be.c driver | ||
495 | * in drivers/linux/agp. There are ca-specific hooks in that driver to | ||
496 | * manipulate the gart, but management of the AGP portion of the aperature | ||
497 | * is the responsibility of that driver. | ||
498 | * | ||
499 | * CA allows three main types of DMA mapping: | ||
500 | * | ||
501 | * PCI 64-bit Managed by this driver | ||
502 | * PCI 32-bit Managed by this driver | ||
503 | * AGP 48-bit Managed by hooks in the /dev/agpgart driver | ||
504 | * | ||
505 | * All of the above can optionally be remapped through the GART. The following | ||
506 | * table lists the combinations of addressing types and GART remapping that | ||
507 | * is currently supported by the driver (h/w supports all, s/w limits this): | ||
508 | * | ||
509 | * PCI64 PCI32 AGP48 | ||
510 | * GART no yes yes | ||
511 | * Direct yes yes no | ||
512 | * | ||
513 | * GART remapping of PCI64 is not done because there is no need to. The | ||
514 | * 64 bit PCI address holds all of the information necessary to target any | ||
515 | * memory in the system. | ||
516 | * | ||
517 | * AGP48 is always mapped through the GART. Management of the AGP48 portion | ||
518 | * of the aperature is the responsibility of code in the agpgart_be driver. | ||
519 | * | ||
520 | * The non-64 bit bus address space will currently be partitioned like this: | ||
521 | * | ||
522 | * 0xffff_ffff_ffff +-------- | ||
523 | * | AGP48 direct | ||
524 | * | Space managed by this driver | ||
525 | * CA_AGP_DIRECT_BASE +-------- | ||
526 | * | AGP GART mapped (gfx aperature) | ||
527 | * | Space managed by /dev/agpgart driver | ||
528 | * | This range is exposed to the agpgart | ||
529 | * | driver as the "graphics aperature" | ||
530 | * CA_AGP_MAPPED_BASE +----- | ||
531 | * | PCI GART mapped | ||
532 | * | Space managed by this driver | ||
533 | * CA_PCI32_MAPPED_BASE +---- | ||
534 | * | PCI32 direct | ||
535 | * | Space managed by this driver | ||
536 | * 0xC000_0000 +-------- | ||
537 | * (CA_PCI32_DIRECT_BASE) | ||
538 | * | ||
539 | * The bus address range CA_PCI32_MAPPED_BASE through CA_AGP_DIRECT_BASE | ||
540 | * is what we call the CA aperature. Addresses falling in this range will | ||
541 | * be remapped using the GART. | ||
542 | * | ||
543 | * The bus address range CA_AGP_MAPPED_BASE through CA_AGP_DIRECT_BASE | ||
544 | * is what we call the graphics aperature. This is a subset of the CA | ||
545 | * aperature and is under the control of the agpgart_be driver. | ||
546 | * | ||
547 | * CA_PCI32_MAPPED_BASE, CA_AGP_MAPPED_BASE, and CA_AGP_DIRECT_BASE are | ||
548 | * somewhat arbitrary values. The known constraints on choosing these is: | ||
549 | * | ||
550 | * 1) CA_AGP_DIRECT_BASE-CA_PCI32_MAPPED_BASE+1 (the CA aperature size) | ||
551 | * must be one of the values supported by the ca_gart_aperature register. | ||
552 | * Currently valid values are: 4MB through 4096MB in powers of 2 increments | ||
553 | * | ||
554 | * 2) CA_AGP_DIRECT_BASE-CA_AGP_MAPPED_BASE+1 (the gfx aperature size) | ||
555 | * must be in MB units since that's what the agpgart driver assumes. | ||
556 | */ | ||
557 | |||
558 | /* | ||
559 | * Define Bus DMA ranges. These are configurable (see constraints above) | ||
560 | * and will probably need tuning based on experience. | ||
561 | */ | ||
562 | |||
563 | |||
564 | /* | ||
565 | * 11/24/03 | ||
566 | * CA has an addressing glitch w.r.t. PCI direct 32 bit DMA that makes it | ||
567 | * generally unusable. The problem is that for PCI direct 32 | ||
568 | * DMA's, all 32 bits of the bus address are used to form the lower 32 bits | ||
569 | * of the coretalk address, and coretalk bits 38:32 come from a register. | ||
570 | * Since only PCI bus addresses 0xC0000000-0xFFFFFFFF (1GB) are available | ||
571 | * for DMA (the rest is allocated to PIO), host node addresses need to be | ||
572 | * such that their lower 32 bits fall in the 0xC0000000-0xffffffff range | ||
573 | * as well. So there can be no PCI32 direct DMA below 3GB!! For this | ||
574 | * reason we set the CA_PCI32_DIRECT_SIZE to 0 which essentially makes | ||
575 | * tioca_dma_direct32() a noop but preserves the code flow should this issue | ||
576 | * be fixed in a respin. | ||
577 | * | ||
578 | * For now, all PCI32 DMA's must be mapped through the GART. | ||
579 | */ | ||
580 | |||
581 | #define CA_PCI32_DIRECT_BASE 0xC0000000UL /* BASE not configurable */ | ||
582 | #define CA_PCI32_DIRECT_SIZE 0x00000000UL /* 0 MB */ | ||
583 | |||
584 | #define CA_PCI32_MAPPED_BASE 0xC0000000UL | ||
585 | #define CA_PCI32_MAPPED_SIZE 0x40000000UL /* 2GB */ | ||
586 | |||
587 | #define CA_AGP_MAPPED_BASE 0x80000000UL | ||
588 | #define CA_AGP_MAPPED_SIZE 0x40000000UL /* 2GB */ | ||
589 | |||
590 | #define CA_AGP_DIRECT_BASE 0x40000000UL /* 2GB */ | ||
591 | #define CA_AGP_DIRECT_SIZE 0x40000000UL | ||
592 | |||
593 | #define CA_APERATURE_BASE (CA_AGP_MAPPED_BASE) | ||
594 | #define CA_APERATURE_SIZE (CA_AGP_MAPPED_SIZE+CA_PCI32_MAPPED_SIZE) | ||
595 | |||
596 | #endif /* _ASM_IA64_SN_TIO_TIOCA_H */ | ||
diff --git a/arch/ia64/include/asm/sn/tioca_provider.h b/arch/ia64/include/asm/sn/tioca_provider.h deleted file mode 100644 index 9a820ac61be3..000000000000 --- a/arch/ia64/include/asm/sn/tioca_provider.h +++ /dev/null | |||
@@ -1,207 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 2003-2005 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_IA64_SN_TIO_CA_AGP_PROVIDER_H | ||
10 | #define _ASM_IA64_SN_TIO_CA_AGP_PROVIDER_H | ||
11 | |||
12 | #include <asm/sn/tioca.h> | ||
13 | |||
14 | /* | ||
15 | * WAR enables | ||
16 | * Defines for individual WARs. Each is a bitmask of applicable | ||
17 | * part revision numbers. (1 << 1) == rev A, (1 << 2) == rev B, | ||
18 | * (3 << 1) == (rev A or rev B), etc | ||
19 | */ | ||
20 | |||
21 | #define TIOCA_WAR_ENABLED(pv, tioca_common) \ | ||
22 | ((1 << tioca_common->ca_rev) & pv) | ||
23 | |||
24 | /* TIO:ICE:FRZ:Freezer loses a PIO data ucred on PIO RD RSP with CW error */ | ||
25 | #define PV907908 (1 << 1) | ||
26 | /* ATI config space problems after BIOS execution starts */ | ||
27 | #define PV908234 (1 << 1) | ||
28 | /* CA:AGPDMA write request data mismatch with ABC1CL merge */ | ||
29 | #define PV895469 (1 << 1) | ||
30 | /* TIO:CA TLB invalidate of written GART entries possibly not occurring in CA*/ | ||
31 | #define PV910244 (1 << 1) | ||
32 | |||
33 | struct tioca_dmamap{ | ||
34 | struct list_head cad_list; /* headed by ca_list */ | ||
35 | |||
36 | dma_addr_t cad_dma_addr; /* Linux dma handle */ | ||
37 | uint cad_gart_entry; /* start entry in ca_gart_pagemap */ | ||
38 | uint cad_gart_size; /* #entries for this map */ | ||
39 | }; | ||
40 | |||
41 | /* | ||
42 | * Kernel only fields. Prom may look at this stuff for debugging only. | ||
43 | * Access this structure through the ca_kernel_private ptr. | ||
44 | */ | ||
45 | |||
46 | struct tioca_common ; | ||
47 | |||
48 | struct tioca_kernel { | ||
49 | struct tioca_common *ca_common; /* tioca this belongs to */ | ||
50 | struct list_head ca_list; /* list of all ca's */ | ||
51 | struct list_head ca_dmamaps; | ||
52 | spinlock_t ca_lock; /* Kernel lock */ | ||
53 | cnodeid_t ca_closest_node; | ||
54 | struct list_head *ca_devices; /* bus->devices */ | ||
55 | |||
56 | /* | ||
57 | * General GART stuff | ||
58 | */ | ||
59 | u64 ca_ap_size; /* size of aperature in bytes */ | ||
60 | u32 ca_gart_entries; /* # u64 entries in gart */ | ||
61 | u32 ca_ap_pagesize; /* aperature page size in bytes */ | ||
62 | u64 ca_ap_bus_base; /* bus address of CA aperature */ | ||
63 | u64 ca_gart_size; /* gart size in bytes */ | ||
64 | u64 *ca_gart; /* gart table vaddr */ | ||
65 | u64 ca_gart_coretalk_addr; /* gart coretalk addr */ | ||
66 | u8 ca_gart_iscoherent; /* used in tioca_tlbflush */ | ||
67 | |||
68 | /* PCI GART convenience values */ | ||
69 | u64 ca_pciap_base; /* pci aperature bus base address */ | ||
70 | u64 ca_pciap_size; /* pci aperature size (bytes) */ | ||
71 | u64 ca_pcigart_base; /* gfx GART bus base address */ | ||
72 | u64 *ca_pcigart; /* gfx GART vm address */ | ||
73 | u32 ca_pcigart_entries; | ||
74 | u32 ca_pcigart_start; /* PCI start index in ca_gart */ | ||
75 | void *ca_pcigart_pagemap; | ||
76 | |||
77 | /* AGP GART convenience values */ | ||
78 | u64 ca_gfxap_base; /* gfx aperature bus base address */ | ||
79 | u64 ca_gfxap_size; /* gfx aperature size (bytes) */ | ||
80 | u64 ca_gfxgart_base; /* gfx GART bus base address */ | ||
81 | u64 *ca_gfxgart; /* gfx GART vm address */ | ||
82 | u32 ca_gfxgart_entries; | ||
83 | u32 ca_gfxgart_start; /* agpgart start index in ca_gart */ | ||
84 | }; | ||
85 | |||
86 | /* | ||
87 | * Common tioca info shared between kernel and prom | ||
88 | * | ||
89 | * DO NOT CHANGE THIS STRUCT WITHOUT MAKING CORRESPONDING CHANGES | ||
90 | * TO THE PROM VERSION. | ||
91 | */ | ||
92 | |||
93 | struct tioca_common { | ||
94 | struct pcibus_bussoft ca_common; /* common pciio header */ | ||
95 | |||
96 | u32 ca_rev; | ||
97 | u32 ca_closest_nasid; | ||
98 | |||
99 | u64 ca_prom_private; | ||
100 | u64 ca_kernel_private; | ||
101 | }; | ||
102 | |||
103 | /** | ||
104 | * tioca_paddr_to_gart - Convert an SGI coretalk address to a CA GART entry | ||
105 | * @paddr: page address to convert | ||
106 | * | ||
107 | * Convert a system [coretalk] address to a GART entry. GART entries are | ||
108 | * formed using the following: | ||
109 | * | ||
110 | * data = ( (1<<63) | ( (REMAP_NODE_ID << 40) | (MD_CHIPLET_ID << 38) | | ||
111 | * (REMAP_SYS_ADDR) ) >> 12 ) | ||
112 | * | ||
113 | * DATA written to 1 GART TABLE Entry in system memory is remapped system | ||
114 | * addr for 1 page | ||
115 | * | ||
116 | * The data is for coretalk address format right shifted 12 bits with a | ||
117 | * valid bit. | ||
118 | * | ||
119 | * GART_TABLE_ENTRY [ 25:0 ] -- REMAP_SYS_ADDRESS[37:12]. | ||
120 | * GART_TABLE_ENTRY [ 27:26 ] -- SHUB MD chiplet id. | ||
121 | * GART_TABLE_ENTRY [ 41:28 ] -- REMAP_NODE_ID. | ||
122 | * GART_TABLE_ENTRY [ 63 ] -- Valid Bit | ||
123 | */ | ||
124 | static inline u64 | ||
125 | tioca_paddr_to_gart(unsigned long paddr) | ||
126 | { | ||
127 | /* | ||
128 | * We are assuming right now that paddr already has the correct | ||
129 | * format since the address from xtalk_dmaXXX should already have | ||
130 | * NODE_ID, CHIPLET_ID, and SYS_ADDR in the correct locations. | ||
131 | */ | ||
132 | |||
133 | return ((paddr) >> 12) | (1UL << 63); | ||
134 | } | ||
135 | |||
136 | /** | ||
137 | * tioca_physpage_to_gart - Map a host physical page for SGI CA based DMA | ||
138 | * @page_addr: system page address to map | ||
139 | */ | ||
140 | |||
141 | static inline unsigned long | ||
142 | tioca_physpage_to_gart(u64 page_addr) | ||
143 | { | ||
144 | u64 coretalk_addr; | ||
145 | |||
146 | coretalk_addr = PHYS_TO_TIODMA(page_addr); | ||
147 | if (!coretalk_addr) { | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | return tioca_paddr_to_gart(coretalk_addr); | ||
152 | } | ||
153 | |||
154 | /** | ||
155 | * tioca_tlbflush - invalidate cached SGI CA GART TLB entries | ||
156 | * @tioca_kernel: CA context | ||
157 | * | ||
158 | * Invalidate tlb entries for a given CA GART. Main complexity is to account | ||
159 | * for revA bug. | ||
160 | */ | ||
161 | static inline void | ||
162 | tioca_tlbflush(struct tioca_kernel *tioca_kernel) | ||
163 | { | ||
164 | volatile u64 tmp; | ||
165 | volatile struct tioca __iomem *ca_base; | ||
166 | struct tioca_common *tioca_common; | ||
167 | |||
168 | tioca_common = tioca_kernel->ca_common; | ||
169 | ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base; | ||
170 | |||
171 | /* | ||
172 | * Explicit flushes not needed if GART is in cached mode | ||
173 | */ | ||
174 | if (tioca_kernel->ca_gart_iscoherent) { | ||
175 | if (TIOCA_WAR_ENABLED(PV910244, tioca_common)) { | ||
176 | /* | ||
177 | * PV910244: RevA CA needs explicit flushes. | ||
178 | * Need to put GART into uncached mode before | ||
179 | * flushing otherwise the explicit flush is ignored. | ||
180 | * | ||
181 | * Alternate WAR would be to leave GART cached and | ||
182 | * touch every CL aligned GART entry. | ||
183 | */ | ||
184 | |||
185 | __sn_clrq_relaxed(&ca_base->ca_control2, CA_GART_MEM_PARAM); | ||
186 | __sn_setq_relaxed(&ca_base->ca_control2, CA_GART_FLUSH_TLB); | ||
187 | __sn_setq_relaxed(&ca_base->ca_control2, | ||
188 | (0x2ull << CA_GART_MEM_PARAM_SHFT)); | ||
189 | tmp = __sn_readq_relaxed(&ca_base->ca_control2); | ||
190 | } | ||
191 | |||
192 | return; | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * Gart in uncached mode ... need an explicit flush. | ||
197 | */ | ||
198 | |||
199 | __sn_setq_relaxed(&ca_base->ca_control2, CA_GART_FLUSH_TLB); | ||
200 | tmp = __sn_readq_relaxed(&ca_base->ca_control2); | ||
201 | } | ||
202 | |||
203 | extern u32 tioca_gart_found; | ||
204 | extern struct list_head tioca_list; | ||
205 | extern int tioca_init_provider(void); | ||
206 | extern void tioca_fastwrite_enable(struct tioca_kernel *tioca_kern); | ||
207 | #endif /* _ASM_IA64_SN_TIO_CA_AGP_PROVIDER_H */ | ||
diff --git a/arch/ia64/include/asm/sn/tioce.h b/arch/ia64/include/asm/sn/tioce.h deleted file mode 100644 index 6eae8ada90f0..000000000000 --- a/arch/ia64/include/asm/sn/tioce.h +++ /dev/null | |||
@@ -1,760 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 2003-2005 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #ifndef __ASM_IA64_SN_TIOCE_H__ | ||
10 | #define __ASM_IA64_SN_TIOCE_H__ | ||
11 | |||
12 | /* CE ASIC part & mfgr information */ | ||
13 | #define TIOCE_PART_NUM 0xCE00 | ||
14 | #define TIOCE_SRC_ID 0x01 | ||
15 | #define TIOCE_REV_A 0x1 | ||
16 | |||
17 | /* CE Virtual PPB Vendor/Device IDs */ | ||
18 | #define CE_VIRT_PPB_VENDOR_ID 0x10a9 | ||
19 | #define CE_VIRT_PPB_DEVICE_ID 0x4002 | ||
20 | |||
21 | /* CE Host Bridge Vendor/Device IDs */ | ||
22 | #define CE_HOST_BRIDGE_VENDOR_ID 0x10a9 | ||
23 | #define CE_HOST_BRIDGE_DEVICE_ID 0x4001 | ||
24 | |||
25 | |||
26 | #define TIOCE_NUM_M40_ATES 4096 | ||
27 | #define TIOCE_NUM_M3240_ATES 2048 | ||
28 | #define TIOCE_NUM_PORTS 2 | ||
29 | |||
30 | /* | ||
31 | * Register layout for TIOCE. MMR offsets are shown at the far right of the | ||
32 | * structure definition. | ||
33 | */ | ||
34 | typedef volatile struct tioce { | ||
35 | /* | ||
36 | * ADMIN : Administration Registers | ||
37 | */ | ||
38 | u64 ce_adm_id; /* 0x000000 */ | ||
39 | u64 ce_pad_000008; /* 0x000008 */ | ||
40 | u64 ce_adm_dyn_credit_status; /* 0x000010 */ | ||
41 | u64 ce_adm_last_credit_status; /* 0x000018 */ | ||
42 | u64 ce_adm_credit_limit; /* 0x000020 */ | ||
43 | u64 ce_adm_force_credit; /* 0x000028 */ | ||
44 | u64 ce_adm_control; /* 0x000030 */ | ||
45 | u64 ce_adm_mmr_chn_timeout; /* 0x000038 */ | ||
46 | u64 ce_adm_ssp_ure_timeout; /* 0x000040 */ | ||
47 | u64 ce_adm_ssp_dre_timeout; /* 0x000048 */ | ||
48 | u64 ce_adm_ssp_debug_sel; /* 0x000050 */ | ||
49 | u64 ce_adm_int_status; /* 0x000058 */ | ||
50 | u64 ce_adm_int_status_alias; /* 0x000060 */ | ||
51 | u64 ce_adm_int_mask; /* 0x000068 */ | ||
52 | u64 ce_adm_int_pending; /* 0x000070 */ | ||
53 | u64 ce_adm_force_int; /* 0x000078 */ | ||
54 | u64 ce_adm_ure_ups_buf_barrier_flush; /* 0x000080 */ | ||
55 | u64 ce_adm_int_dest[15]; /* 0x000088 -- 0x0000F8 */ | ||
56 | u64 ce_adm_error_summary; /* 0x000100 */ | ||
57 | u64 ce_adm_error_summary_alias; /* 0x000108 */ | ||
58 | u64 ce_adm_error_mask; /* 0x000110 */ | ||
59 | u64 ce_adm_first_error; /* 0x000118 */ | ||
60 | u64 ce_adm_error_overflow; /* 0x000120 */ | ||
61 | u64 ce_adm_error_overflow_alias; /* 0x000128 */ | ||
62 | u64 ce_pad_000130[2]; /* 0x000130 -- 0x000138 */ | ||
63 | u64 ce_adm_tnum_error; /* 0x000140 */ | ||
64 | u64 ce_adm_mmr_err_detail; /* 0x000148 */ | ||
65 | u64 ce_adm_msg_sram_perr_detail; /* 0x000150 */ | ||
66 | u64 ce_adm_bap_sram_perr_detail; /* 0x000158 */ | ||
67 | u64 ce_adm_ce_sram_perr_detail; /* 0x000160 */ | ||
68 | u64 ce_adm_ce_credit_oflow_detail; /* 0x000168 */ | ||
69 | u64 ce_adm_tx_link_idle_max_timer; /* 0x000170 */ | ||
70 | u64 ce_adm_pcie_debug_sel; /* 0x000178 */ | ||
71 | u64 ce_pad_000180[16]; /* 0x000180 -- 0x0001F8 */ | ||
72 | |||
73 | u64 ce_adm_pcie_debug_sel_top; /* 0x000200 */ | ||
74 | u64 ce_adm_pcie_debug_lat_sel_lo_top; /* 0x000208 */ | ||
75 | u64 ce_adm_pcie_debug_lat_sel_hi_top; /* 0x000210 */ | ||
76 | u64 ce_adm_pcie_debug_trig_sel_top; /* 0x000218 */ | ||
77 | u64 ce_adm_pcie_debug_trig_lat_sel_lo_top; /* 0x000220 */ | ||
78 | u64 ce_adm_pcie_debug_trig_lat_sel_hi_top; /* 0x000228 */ | ||
79 | u64 ce_adm_pcie_trig_compare_top; /* 0x000230 */ | ||
80 | u64 ce_adm_pcie_trig_compare_en_top; /* 0x000238 */ | ||
81 | u64 ce_adm_ssp_debug_sel_top; /* 0x000240 */ | ||
82 | u64 ce_adm_ssp_debug_lat_sel_lo_top; /* 0x000248 */ | ||
83 | u64 ce_adm_ssp_debug_lat_sel_hi_top; /* 0x000250 */ | ||
84 | u64 ce_adm_ssp_debug_trig_sel_top; /* 0x000258 */ | ||
85 | u64 ce_adm_ssp_debug_trig_lat_sel_lo_top; /* 0x000260 */ | ||
86 | u64 ce_adm_ssp_debug_trig_lat_sel_hi_top; /* 0x000268 */ | ||
87 | u64 ce_adm_ssp_trig_compare_top; /* 0x000270 */ | ||
88 | u64 ce_adm_ssp_trig_compare_en_top; /* 0x000278 */ | ||
89 | u64 ce_pad_000280[48]; /* 0x000280 -- 0x0003F8 */ | ||
90 | |||
91 | u64 ce_adm_bap_ctrl; /* 0x000400 */ | ||
92 | u64 ce_pad_000408[127]; /* 0x000408 -- 0x0007F8 */ | ||
93 | |||
94 | u64 ce_msg_buf_data63_0[35]; /* 0x000800 -- 0x000918 */ | ||
95 | u64 ce_pad_000920[29]; /* 0x000920 -- 0x0009F8 */ | ||
96 | |||
97 | u64 ce_msg_buf_data127_64[35]; /* 0x000A00 -- 0x000B18 */ | ||
98 | u64 ce_pad_000B20[29]; /* 0x000B20 -- 0x000BF8 */ | ||
99 | |||
100 | u64 ce_msg_buf_parity[35]; /* 0x000C00 -- 0x000D18 */ | ||
101 | u64 ce_pad_000D20[29]; /* 0x000D20 -- 0x000DF8 */ | ||
102 | |||
103 | u64 ce_pad_000E00[576]; /* 0x000E00 -- 0x001FF8 */ | ||
104 | |||
105 | /* | ||
106 | * LSI : LSI's PCI Express Link Registers (Link#1 and Link#2) | ||
107 | * Link#1 MMRs at start at 0x002000, Link#2 MMRs at 0x003000 | ||
108 | * NOTE: the comment offsets at far right: let 'z' = {2 or 3} | ||
109 | */ | ||
110 | #define ce_lsi(link_num) ce_lsi[link_num-1] | ||
111 | struct ce_lsi_reg { | ||
112 | u64 ce_lsi_lpu_id; /* 0x00z000 */ | ||
113 | u64 ce_lsi_rst; /* 0x00z008 */ | ||
114 | u64 ce_lsi_dbg_stat; /* 0x00z010 */ | ||
115 | u64 ce_lsi_dbg_cfg; /* 0x00z018 */ | ||
116 | u64 ce_lsi_ltssm_ctrl; /* 0x00z020 */ | ||
117 | u64 ce_lsi_lk_stat; /* 0x00z028 */ | ||
118 | u64 ce_pad_00z030[2]; /* 0x00z030 -- 0x00z038 */ | ||
119 | u64 ce_lsi_int_and_stat; /* 0x00z040 */ | ||
120 | u64 ce_lsi_int_mask; /* 0x00z048 */ | ||
121 | u64 ce_pad_00z050[22]; /* 0x00z050 -- 0x00z0F8 */ | ||
122 | u64 ce_lsi_lk_perf_cnt_sel; /* 0x00z100 */ | ||
123 | u64 ce_pad_00z108; /* 0x00z108 */ | ||
124 | u64 ce_lsi_lk_perf_cnt_ctrl; /* 0x00z110 */ | ||
125 | u64 ce_pad_00z118; /* 0x00z118 */ | ||
126 | u64 ce_lsi_lk_perf_cnt1; /* 0x00z120 */ | ||
127 | u64 ce_lsi_lk_perf_cnt1_test; /* 0x00z128 */ | ||
128 | u64 ce_lsi_lk_perf_cnt2; /* 0x00z130 */ | ||
129 | u64 ce_lsi_lk_perf_cnt2_test; /* 0x00z138 */ | ||
130 | u64 ce_pad_00z140[24]; /* 0x00z140 -- 0x00z1F8 */ | ||
131 | u64 ce_lsi_lk_lyr_cfg; /* 0x00z200 */ | ||
132 | u64 ce_lsi_lk_lyr_status; /* 0x00z208 */ | ||
133 | u64 ce_lsi_lk_lyr_int_stat; /* 0x00z210 */ | ||
134 | u64 ce_lsi_lk_ly_int_stat_test; /* 0x00z218 */ | ||
135 | u64 ce_lsi_lk_ly_int_stat_mask; /* 0x00z220 */ | ||
136 | u64 ce_pad_00z228[3]; /* 0x00z228 -- 0x00z238 */ | ||
137 | u64 ce_lsi_fc_upd_ctl; /* 0x00z240 */ | ||
138 | u64 ce_pad_00z248[3]; /* 0x00z248 -- 0x00z258 */ | ||
139 | u64 ce_lsi_flw_ctl_upd_to_timer; /* 0x00z260 */ | ||
140 | u64 ce_lsi_flw_ctl_upd_timer0; /* 0x00z268 */ | ||
141 | u64 ce_lsi_flw_ctl_upd_timer1; /* 0x00z270 */ | ||
142 | u64 ce_pad_00z278[49]; /* 0x00z278 -- 0x00z3F8 */ | ||
143 | u64 ce_lsi_freq_nak_lat_thrsh; /* 0x00z400 */ | ||
144 | u64 ce_lsi_ack_nak_lat_tmr; /* 0x00z408 */ | ||
145 | u64 ce_lsi_rply_tmr_thr; /* 0x00z410 */ | ||
146 | u64 ce_lsi_rply_tmr; /* 0x00z418 */ | ||
147 | u64 ce_lsi_rply_num_stat; /* 0x00z420 */ | ||
148 | u64 ce_lsi_rty_buf_max_addr; /* 0x00z428 */ | ||
149 | u64 ce_lsi_rty_fifo_ptr; /* 0x00z430 */ | ||
150 | u64 ce_lsi_rty_fifo_rd_wr_ptr; /* 0x00z438 */ | ||
151 | u64 ce_lsi_rty_fifo_cred; /* 0x00z440 */ | ||
152 | u64 ce_lsi_seq_cnt; /* 0x00z448 */ | ||
153 | u64 ce_lsi_ack_sent_seq_num; /* 0x00z450 */ | ||
154 | u64 ce_lsi_seq_cnt_fifo_max_addr; /* 0x00z458 */ | ||
155 | u64 ce_lsi_seq_cnt_fifo_ptr; /* 0x00z460 */ | ||
156 | u64 ce_lsi_seq_cnt_rd_wr_ptr; /* 0x00z468 */ | ||
157 | u64 ce_lsi_tx_lk_ts_ctl; /* 0x00z470 */ | ||
158 | u64 ce_pad_00z478; /* 0x00z478 */ | ||
159 | u64 ce_lsi_mem_addr_ctl; /* 0x00z480 */ | ||
160 | u64 ce_lsi_mem_d_ld0; /* 0x00z488 */ | ||
161 | u64 ce_lsi_mem_d_ld1; /* 0x00z490 */ | ||
162 | u64 ce_lsi_mem_d_ld2; /* 0x00z498 */ | ||
163 | u64 ce_lsi_mem_d_ld3; /* 0x00z4A0 */ | ||
164 | u64 ce_lsi_mem_d_ld4; /* 0x00z4A8 */ | ||
165 | u64 ce_pad_00z4B0[2]; /* 0x00z4B0 -- 0x00z4B8 */ | ||
166 | u64 ce_lsi_rty_d_cnt; /* 0x00z4C0 */ | ||
167 | u64 ce_lsi_seq_buf_cnt; /* 0x00z4C8 */ | ||
168 | u64 ce_lsi_seq_buf_bt_d; /* 0x00z4D0 */ | ||
169 | u64 ce_pad_00z4D8; /* 0x00z4D8 */ | ||
170 | u64 ce_lsi_ack_lat_thr; /* 0x00z4E0 */ | ||
171 | u64 ce_pad_00z4E8[3]; /* 0x00z4E8 -- 0x00z4F8 */ | ||
172 | u64 ce_lsi_nxt_rcv_seq_1_cntr; /* 0x00z500 */ | ||
173 | u64 ce_lsi_unsp_dllp_rcvd; /* 0x00z508 */ | ||
174 | u64 ce_lsi_rcv_lk_ts_ctl; /* 0x00z510 */ | ||
175 | u64 ce_pad_00z518[29]; /* 0x00z518 -- 0x00z5F8 */ | ||
176 | u64 ce_lsi_phy_lyr_cfg; /* 0x00z600 */ | ||
177 | u64 ce_pad_00z608; /* 0x00z608 */ | ||
178 | u64 ce_lsi_phy_lyr_int_stat; /* 0x00z610 */ | ||
179 | u64 ce_lsi_phy_lyr_int_stat_test; /* 0x00z618 */ | ||
180 | u64 ce_lsi_phy_lyr_int_mask; /* 0x00z620 */ | ||
181 | u64 ce_pad_00z628[11]; /* 0x00z628 -- 0x00z678 */ | ||
182 | u64 ce_lsi_rcv_phy_cfg; /* 0x00z680 */ | ||
183 | u64 ce_lsi_rcv_phy_stat1; /* 0x00z688 */ | ||
184 | u64 ce_lsi_rcv_phy_stat2; /* 0x00z690 */ | ||
185 | u64 ce_lsi_rcv_phy_stat3; /* 0x00z698 */ | ||
186 | u64 ce_lsi_rcv_phy_int_stat; /* 0x00z6A0 */ | ||
187 | u64 ce_lsi_rcv_phy_int_stat_test; /* 0x00z6A8 */ | ||
188 | u64 ce_lsi_rcv_phy_int_mask; /* 0x00z6B0 */ | ||
189 | u64 ce_pad_00z6B8[9]; /* 0x00z6B8 -- 0x00z6F8 */ | ||
190 | u64 ce_lsi_tx_phy_cfg; /* 0x00z700 */ | ||
191 | u64 ce_lsi_tx_phy_stat; /* 0x00z708 */ | ||
192 | u64 ce_lsi_tx_phy_int_stat; /* 0x00z710 */ | ||
193 | u64 ce_lsi_tx_phy_int_stat_test; /* 0x00z718 */ | ||
194 | u64 ce_lsi_tx_phy_int_mask; /* 0x00z720 */ | ||
195 | u64 ce_lsi_tx_phy_stat2; /* 0x00z728 */ | ||
196 | u64 ce_pad_00z730[10]; /* 0x00z730 -- 0x00z77F */ | ||
197 | u64 ce_lsi_ltssm_cfg1; /* 0x00z780 */ | ||
198 | u64 ce_lsi_ltssm_cfg2; /* 0x00z788 */ | ||
199 | u64 ce_lsi_ltssm_cfg3; /* 0x00z790 */ | ||
200 | u64 ce_lsi_ltssm_cfg4; /* 0x00z798 */ | ||
201 | u64 ce_lsi_ltssm_cfg5; /* 0x00z7A0 */ | ||
202 | u64 ce_lsi_ltssm_stat1; /* 0x00z7A8 */ | ||
203 | u64 ce_lsi_ltssm_stat2; /* 0x00z7B0 */ | ||
204 | u64 ce_lsi_ltssm_int_stat; /* 0x00z7B8 */ | ||
205 | u64 ce_lsi_ltssm_int_stat_test; /* 0x00z7C0 */ | ||
206 | u64 ce_lsi_ltssm_int_mask; /* 0x00z7C8 */ | ||
207 | u64 ce_lsi_ltssm_stat_wr_en; /* 0x00z7D0 */ | ||
208 | u64 ce_pad_00z7D8[5]; /* 0x00z7D8 -- 0x00z7F8 */ | ||
209 | u64 ce_lsi_gb_cfg1; /* 0x00z800 */ | ||
210 | u64 ce_lsi_gb_cfg2; /* 0x00z808 */ | ||
211 | u64 ce_lsi_gb_cfg3; /* 0x00z810 */ | ||
212 | u64 ce_lsi_gb_cfg4; /* 0x00z818 */ | ||
213 | u64 ce_lsi_gb_stat; /* 0x00z820 */ | ||
214 | u64 ce_lsi_gb_int_stat; /* 0x00z828 */ | ||
215 | u64 ce_lsi_gb_int_stat_test; /* 0x00z830 */ | ||
216 | u64 ce_lsi_gb_int_mask; /* 0x00z838 */ | ||
217 | u64 ce_lsi_gb_pwr_dn1; /* 0x00z840 */ | ||
218 | u64 ce_lsi_gb_pwr_dn2; /* 0x00z848 */ | ||
219 | u64 ce_pad_00z850[246]; /* 0x00z850 -- 0x00zFF8 */ | ||
220 | } ce_lsi[2]; | ||
221 | |||
222 | u64 ce_pad_004000[10]; /* 0x004000 -- 0x004048 */ | ||
223 | |||
224 | /* | ||
225 | * CRM: Coretalk Receive Module Registers | ||
226 | */ | ||
227 | u64 ce_crm_debug_mux; /* 0x004050 */ | ||
228 | u64 ce_pad_004058; /* 0x004058 */ | ||
229 | u64 ce_crm_ssp_err_cmd_wrd; /* 0x004060 */ | ||
230 | u64 ce_crm_ssp_err_addr; /* 0x004068 */ | ||
231 | u64 ce_crm_ssp_err_syn; /* 0x004070 */ | ||
232 | |||
233 | u64 ce_pad_004078[499]; /* 0x004078 -- 0x005008 */ | ||
234 | |||
235 | /* | ||
236 | * CXM: Coretalk Xmit Module Registers | ||
237 | */ | ||
238 | u64 ce_cxm_dyn_credit_status; /* 0x005010 */ | ||
239 | u64 ce_cxm_last_credit_status; /* 0x005018 */ | ||
240 | u64 ce_cxm_credit_limit; /* 0x005020 */ | ||
241 | u64 ce_cxm_force_credit; /* 0x005028 */ | ||
242 | u64 ce_cxm_disable_bypass; /* 0x005030 */ | ||
243 | u64 ce_pad_005038[3]; /* 0x005038 -- 0x005048 */ | ||
244 | u64 ce_cxm_debug_mux; /* 0x005050 */ | ||
245 | |||
246 | u64 ce_pad_005058[501]; /* 0x005058 -- 0x005FF8 */ | ||
247 | |||
248 | /* | ||
249 | * DTL: Downstream Transaction Layer Regs (Link#1 and Link#2) | ||
250 | * DTL: Link#1 MMRs at start at 0x006000, Link#2 MMRs at 0x008000 | ||
251 | * DTL: the comment offsets at far right: let 'y' = {6 or 8} | ||
252 | * | ||
253 | * UTL: Downstream Transaction Layer Regs (Link#1 and Link#2) | ||
254 | * UTL: Link#1 MMRs at start at 0x007000, Link#2 MMRs at 0x009000 | ||
255 | * UTL: the comment offsets at far right: let 'z' = {7 or 9} | ||
256 | */ | ||
257 | #define ce_dtl(link_num) ce_dtl_utl[link_num-1] | ||
258 | #define ce_utl(link_num) ce_dtl_utl[link_num-1] | ||
259 | struct ce_dtl_utl_reg { | ||
260 | /* DTL */ | ||
261 | u64 ce_dtl_dtdr_credit_limit; /* 0x00y000 */ | ||
262 | u64 ce_dtl_dtdr_credit_force; /* 0x00y008 */ | ||
263 | u64 ce_dtl_dyn_credit_status; /* 0x00y010 */ | ||
264 | u64 ce_dtl_dtl_last_credit_stat; /* 0x00y018 */ | ||
265 | u64 ce_dtl_dtl_ctrl; /* 0x00y020 */ | ||
266 | u64 ce_pad_00y028[5]; /* 0x00y028 -- 0x00y048 */ | ||
267 | u64 ce_dtl_debug_sel; /* 0x00y050 */ | ||
268 | u64 ce_pad_00y058[501]; /* 0x00y058 -- 0x00yFF8 */ | ||
269 | |||
270 | /* UTL */ | ||
271 | u64 ce_utl_utl_ctrl; /* 0x00z000 */ | ||
272 | u64 ce_utl_debug_sel; /* 0x00z008 */ | ||
273 | u64 ce_pad_00z010[510]; /* 0x00z010 -- 0x00zFF8 */ | ||
274 | } ce_dtl_utl[2]; | ||
275 | |||
276 | u64 ce_pad_00A000[514]; /* 0x00A000 -- 0x00B008 */ | ||
277 | |||
278 | /* | ||
279 | * URE: Upstream Request Engine | ||
280 | */ | ||
281 | u64 ce_ure_dyn_credit_status; /* 0x00B010 */ | ||
282 | u64 ce_ure_last_credit_status; /* 0x00B018 */ | ||
283 | u64 ce_ure_credit_limit; /* 0x00B020 */ | ||
284 | u64 ce_pad_00B028; /* 0x00B028 */ | ||
285 | u64 ce_ure_control; /* 0x00B030 */ | ||
286 | u64 ce_ure_status; /* 0x00B038 */ | ||
287 | u64 ce_pad_00B040[2]; /* 0x00B040 -- 0x00B048 */ | ||
288 | u64 ce_ure_debug_sel; /* 0x00B050 */ | ||
289 | u64 ce_ure_pcie_debug_sel; /* 0x00B058 */ | ||
290 | u64 ce_ure_ssp_err_cmd_wrd; /* 0x00B060 */ | ||
291 | u64 ce_ure_ssp_err_addr; /* 0x00B068 */ | ||
292 | u64 ce_ure_page_map; /* 0x00B070 */ | ||
293 | u64 ce_ure_dir_map[TIOCE_NUM_PORTS]; /* 0x00B078 */ | ||
294 | u64 ce_ure_pipe_sel1; /* 0x00B088 */ | ||
295 | u64 ce_ure_pipe_mask1; /* 0x00B090 */ | ||
296 | u64 ce_ure_pipe_sel2; /* 0x00B098 */ | ||
297 | u64 ce_ure_pipe_mask2; /* 0x00B0A0 */ | ||
298 | u64 ce_ure_pcie1_credits_sent; /* 0x00B0A8 */ | ||
299 | u64 ce_ure_pcie1_credits_used; /* 0x00B0B0 */ | ||
300 | u64 ce_ure_pcie1_credit_limit; /* 0x00B0B8 */ | ||
301 | u64 ce_ure_pcie2_credits_sent; /* 0x00B0C0 */ | ||
302 | u64 ce_ure_pcie2_credits_used; /* 0x00B0C8 */ | ||
303 | u64 ce_ure_pcie2_credit_limit; /* 0x00B0D0 */ | ||
304 | u64 ce_ure_pcie_force_credit; /* 0x00B0D8 */ | ||
305 | u64 ce_ure_rd_tnum_val; /* 0x00B0E0 */ | ||
306 | u64 ce_ure_rd_tnum_rsp_rcvd; /* 0x00B0E8 */ | ||
307 | u64 ce_ure_rd_tnum_esent_timer; /* 0x00B0F0 */ | ||
308 | u64 ce_ure_rd_tnum_error; /* 0x00B0F8 */ | ||
309 | u64 ce_ure_rd_tnum_first_cl; /* 0x00B100 */ | ||
310 | u64 ce_ure_rd_tnum_link_buf; /* 0x00B108 */ | ||
311 | u64 ce_ure_wr_tnum_val; /* 0x00B110 */ | ||
312 | u64 ce_ure_sram_err_addr0; /* 0x00B118 */ | ||
313 | u64 ce_ure_sram_err_addr1; /* 0x00B120 */ | ||
314 | u64 ce_ure_sram_err_addr2; /* 0x00B128 */ | ||
315 | u64 ce_ure_sram_rd_addr0; /* 0x00B130 */ | ||
316 | u64 ce_ure_sram_rd_addr1; /* 0x00B138 */ | ||
317 | u64 ce_ure_sram_rd_addr2; /* 0x00B140 */ | ||
318 | u64 ce_ure_sram_wr_addr0; /* 0x00B148 */ | ||
319 | u64 ce_ure_sram_wr_addr1; /* 0x00B150 */ | ||
320 | u64 ce_ure_sram_wr_addr2; /* 0x00B158 */ | ||
321 | u64 ce_ure_buf_flush10; /* 0x00B160 */ | ||
322 | u64 ce_ure_buf_flush11; /* 0x00B168 */ | ||
323 | u64 ce_ure_buf_flush12; /* 0x00B170 */ | ||
324 | u64 ce_ure_buf_flush13; /* 0x00B178 */ | ||
325 | u64 ce_ure_buf_flush20; /* 0x00B180 */ | ||
326 | u64 ce_ure_buf_flush21; /* 0x00B188 */ | ||
327 | u64 ce_ure_buf_flush22; /* 0x00B190 */ | ||
328 | u64 ce_ure_buf_flush23; /* 0x00B198 */ | ||
329 | u64 ce_ure_pcie_control1; /* 0x00B1A0 */ | ||
330 | u64 ce_ure_pcie_control2; /* 0x00B1A8 */ | ||
331 | |||
332 | u64 ce_pad_00B1B0[458]; /* 0x00B1B0 -- 0x00BFF8 */ | ||
333 | |||
334 | /* Upstream Data Buffer, Port1 */ | ||
335 | struct ce_ure_maint_ups_dat1_data { | ||
336 | u64 data63_0[512]; /* 0x00C000 -- 0x00CFF8 */ | ||
337 | u64 data127_64[512]; /* 0x00D000 -- 0x00DFF8 */ | ||
338 | u64 parity[512]; /* 0x00E000 -- 0x00EFF8 */ | ||
339 | } ce_ure_maint_ups_dat1; | ||
340 | |||
341 | /* Upstream Header Buffer, Port1 */ | ||
342 | struct ce_ure_maint_ups_hdr1_data { | ||
343 | u64 data63_0[512]; /* 0x00F000 -- 0x00FFF8 */ | ||
344 | u64 data127_64[512]; /* 0x010000 -- 0x010FF8 */ | ||
345 | u64 parity[512]; /* 0x011000 -- 0x011FF8 */ | ||
346 | } ce_ure_maint_ups_hdr1; | ||
347 | |||
348 | /* Upstream Data Buffer, Port2 */ | ||
349 | struct ce_ure_maint_ups_dat2_data { | ||
350 | u64 data63_0[512]; /* 0x012000 -- 0x012FF8 */ | ||
351 | u64 data127_64[512]; /* 0x013000 -- 0x013FF8 */ | ||
352 | u64 parity[512]; /* 0x014000 -- 0x014FF8 */ | ||
353 | } ce_ure_maint_ups_dat2; | ||
354 | |||
355 | /* Upstream Header Buffer, Port2 */ | ||
356 | struct ce_ure_maint_ups_hdr2_data { | ||
357 | u64 data63_0[512]; /* 0x015000 -- 0x015FF8 */ | ||
358 | u64 data127_64[512]; /* 0x016000 -- 0x016FF8 */ | ||
359 | u64 parity[512]; /* 0x017000 -- 0x017FF8 */ | ||
360 | } ce_ure_maint_ups_hdr2; | ||
361 | |||
362 | /* Downstream Data Buffer */ | ||
363 | struct ce_ure_maint_dns_dat_data { | ||
364 | u64 data63_0[512]; /* 0x018000 -- 0x018FF8 */ | ||
365 | u64 data127_64[512]; /* 0x019000 -- 0x019FF8 */ | ||
366 | u64 parity[512]; /* 0x01A000 -- 0x01AFF8 */ | ||
367 | } ce_ure_maint_dns_dat; | ||
368 | |||
369 | /* Downstream Header Buffer */ | ||
370 | struct ce_ure_maint_dns_hdr_data { | ||
371 | u64 data31_0[64]; /* 0x01B000 -- 0x01B1F8 */ | ||
372 | u64 data95_32[64]; /* 0x01B200 -- 0x01B3F8 */ | ||
373 | u64 parity[64]; /* 0x01B400 -- 0x01B5F8 */ | ||
374 | } ce_ure_maint_dns_hdr; | ||
375 | |||
376 | /* RCI Buffer Data */ | ||
377 | struct ce_ure_maint_rci_data { | ||
378 | u64 data41_0[64]; /* 0x01B600 -- 0x01B7F8 */ | ||
379 | u64 data69_42[64]; /* 0x01B800 -- 0x01B9F8 */ | ||
380 | } ce_ure_maint_rci; | ||
381 | |||
382 | /* Response Queue */ | ||
383 | u64 ce_ure_maint_rspq[64]; /* 0x01BA00 -- 0x01BBF8 */ | ||
384 | |||
385 | u64 ce_pad_01C000[4224]; /* 0x01BC00 -- 0x023FF8 */ | ||
386 | |||
387 | /* Admin Build-a-Packet Buffer */ | ||
388 | struct ce_adm_maint_bap_buf_data { | ||
389 | u64 data63_0[258]; /* 0x024000 -- 0x024808 */ | ||
390 | u64 data127_64[258]; /* 0x024810 -- 0x025018 */ | ||
391 | u64 parity[258]; /* 0x025020 -- 0x025828 */ | ||
392 | } ce_adm_maint_bap_buf; | ||
393 | |||
394 | u64 ce_pad_025830[5370]; /* 0x025830 -- 0x02FFF8 */ | ||
395 | |||
396 | /* URE: 40bit PMU ATE Buffer */ /* 0x030000 -- 0x037FF8 */ | ||
397 | u64 ce_ure_ate40[TIOCE_NUM_M40_ATES]; | ||
398 | |||
399 | /* URE: 32/40bit PMU ATE Buffer */ /* 0x038000 -- 0x03BFF8 */ | ||
400 | u64 ce_ure_ate3240[TIOCE_NUM_M3240_ATES]; | ||
401 | |||
402 | u64 ce_pad_03C000[2050]; /* 0x03C000 -- 0x040008 */ | ||
403 | |||
404 | /* | ||
405 | * DRE: Down Stream Request Engine | ||
406 | */ | ||
407 | u64 ce_dre_dyn_credit_status1; /* 0x040010 */ | ||
408 | u64 ce_dre_dyn_credit_status2; /* 0x040018 */ | ||
409 | u64 ce_dre_last_credit_status1; /* 0x040020 */ | ||
410 | u64 ce_dre_last_credit_status2; /* 0x040028 */ | ||
411 | u64 ce_dre_credit_limit1; /* 0x040030 */ | ||
412 | u64 ce_dre_credit_limit2; /* 0x040038 */ | ||
413 | u64 ce_dre_force_credit1; /* 0x040040 */ | ||
414 | u64 ce_dre_force_credit2; /* 0x040048 */ | ||
415 | u64 ce_dre_debug_mux1; /* 0x040050 */ | ||
416 | u64 ce_dre_debug_mux2; /* 0x040058 */ | ||
417 | u64 ce_dre_ssp_err_cmd_wrd; /* 0x040060 */ | ||
418 | u64 ce_dre_ssp_err_addr; /* 0x040068 */ | ||
419 | u64 ce_dre_comp_err_cmd_wrd; /* 0x040070 */ | ||
420 | u64 ce_dre_comp_err_addr; /* 0x040078 */ | ||
421 | u64 ce_dre_req_status; /* 0x040080 */ | ||
422 | u64 ce_dre_config1; /* 0x040088 */ | ||
423 | u64 ce_dre_config2; /* 0x040090 */ | ||
424 | u64 ce_dre_config_req_status; /* 0x040098 */ | ||
425 | u64 ce_pad_0400A0[12]; /* 0x0400A0 -- 0x0400F8 */ | ||
426 | u64 ce_dre_dyn_fifo; /* 0x040100 */ | ||
427 | u64 ce_pad_040108[3]; /* 0x040108 -- 0x040118 */ | ||
428 | u64 ce_dre_last_fifo; /* 0x040120 */ | ||
429 | |||
430 | u64 ce_pad_040128[27]; /* 0x040128 -- 0x0401F8 */ | ||
431 | |||
432 | /* DRE Downstream Head Queue */ | ||
433 | struct ce_dre_maint_ds_head_queue { | ||
434 | u64 data63_0[32]; /* 0x040200 -- 0x0402F8 */ | ||
435 | u64 data127_64[32]; /* 0x040300 -- 0x0403F8 */ | ||
436 | u64 parity[32]; /* 0x040400 -- 0x0404F8 */ | ||
437 | } ce_dre_maint_ds_head_q; | ||
438 | |||
439 | u64 ce_pad_040500[352]; /* 0x040500 -- 0x040FF8 */ | ||
440 | |||
441 | /* DRE Downstream Data Queue */ | ||
442 | struct ce_dre_maint_ds_data_queue { | ||
443 | u64 data63_0[256]; /* 0x041000 -- 0x0417F8 */ | ||
444 | u64 ce_pad_041800[256]; /* 0x041800 -- 0x041FF8 */ | ||
445 | u64 data127_64[256]; /* 0x042000 -- 0x0427F8 */ | ||
446 | u64 ce_pad_042800[256]; /* 0x042800 -- 0x042FF8 */ | ||
447 | u64 parity[256]; /* 0x043000 -- 0x0437F8 */ | ||
448 | u64 ce_pad_043800[256]; /* 0x043800 -- 0x043FF8 */ | ||
449 | } ce_dre_maint_ds_data_q; | ||
450 | |||
451 | /* DRE URE Upstream Response Queue */ | ||
452 | struct ce_dre_maint_ure_us_rsp_queue { | ||
453 | u64 data63_0[8]; /* 0x044000 -- 0x044038 */ | ||
454 | u64 ce_pad_044040[24]; /* 0x044040 -- 0x0440F8 */ | ||
455 | u64 data127_64[8]; /* 0x044100 -- 0x044138 */ | ||
456 | u64 ce_pad_044140[24]; /* 0x044140 -- 0x0441F8 */ | ||
457 | u64 parity[8]; /* 0x044200 -- 0x044238 */ | ||
458 | u64 ce_pad_044240[24]; /* 0x044240 -- 0x0442F8 */ | ||
459 | } ce_dre_maint_ure_us_rsp_q; | ||
460 | |||
461 | u64 ce_dre_maint_us_wrt_rsp[32];/* 0x044300 -- 0x0443F8 */ | ||
462 | |||
463 | u64 ce_end_of_struct; /* 0x044400 */ | ||
464 | } tioce_t; | ||
465 | |||
466 | /* ce_lsiX_gb_cfg1 register bit masks & shifts */ | ||
467 | #define CE_LSI_GB_CFG1_RXL0S_THS_SHFT 0 | ||
468 | #define CE_LSI_GB_CFG1_RXL0S_THS_MASK (0xffULL << 0) | ||
469 | #define CE_LSI_GB_CFG1_RXL0S_SMP_SHFT 8 | ||
470 | #define CE_LSI_GB_CFG1_RXL0S_SMP_MASK (0xfULL << 8) | ||
471 | #define CE_LSI_GB_CFG1_RXL0S_ADJ_SHFT 12 | ||
472 | #define CE_LSI_GB_CFG1_RXL0S_ADJ_MASK (0x7ULL << 12) | ||
473 | #define CE_LSI_GB_CFG1_RXL0S_FLT_SHFT 15 | ||
474 | #define CE_LSI_GB_CFG1_RXL0S_FLT_MASK (0x1ULL << 15) | ||
475 | #define CE_LSI_GB_CFG1_LPBK_SEL_SHFT 16 | ||
476 | #define CE_LSI_GB_CFG1_LPBK_SEL_MASK (0x3ULL << 16) | ||
477 | #define CE_LSI_GB_CFG1_LPBK_EN_SHFT 18 | ||
478 | #define CE_LSI_GB_CFG1_LPBK_EN_MASK (0x1ULL << 18) | ||
479 | #define CE_LSI_GB_CFG1_RVRS_LB_SHFT 19 | ||
480 | #define CE_LSI_GB_CFG1_RVRS_LB_MASK (0x1ULL << 19) | ||
481 | #define CE_LSI_GB_CFG1_RVRS_CLK_SHFT 20 | ||
482 | #define CE_LSI_GB_CFG1_RVRS_CLK_MASK (0x3ULL << 20) | ||
483 | #define CE_LSI_GB_CFG1_SLF_TS_SHFT 24 | ||
484 | #define CE_LSI_GB_CFG1_SLF_TS_MASK (0xfULL << 24) | ||
485 | |||
486 | /* ce_adm_int_mask/ce_adm_int_status register bit defines */ | ||
487 | #define CE_ADM_INT_CE_ERROR_SHFT 0 | ||
488 | #define CE_ADM_INT_LSI1_IP_ERROR_SHFT 1 | ||
489 | #define CE_ADM_INT_LSI2_IP_ERROR_SHFT 2 | ||
490 | #define CE_ADM_INT_PCIE_ERROR_SHFT 3 | ||
491 | #define CE_ADM_INT_PORT1_HOTPLUG_EVENT_SHFT 4 | ||
492 | #define CE_ADM_INT_PORT2_HOTPLUG_EVENT_SHFT 5 | ||
493 | #define CE_ADM_INT_PCIE_PORT1_DEV_A_SHFT 6 | ||
494 | #define CE_ADM_INT_PCIE_PORT1_DEV_B_SHFT 7 | ||
495 | #define CE_ADM_INT_PCIE_PORT1_DEV_C_SHFT 8 | ||
496 | #define CE_ADM_INT_PCIE_PORT1_DEV_D_SHFT 9 | ||
497 | #define CE_ADM_INT_PCIE_PORT2_DEV_A_SHFT 10 | ||
498 | #define CE_ADM_INT_PCIE_PORT2_DEV_B_SHFT 11 | ||
499 | #define CE_ADM_INT_PCIE_PORT2_DEV_C_SHFT 12 | ||
500 | #define CE_ADM_INT_PCIE_PORT2_DEV_D_SHFT 13 | ||
501 | #define CE_ADM_INT_PCIE_MSG_SHFT 14 /*see int_dest_14*/ | ||
502 | #define CE_ADM_INT_PCIE_MSG_SLOT_0_SHFT 14 | ||
503 | #define CE_ADM_INT_PCIE_MSG_SLOT_1_SHFT 15 | ||
504 | #define CE_ADM_INT_PCIE_MSG_SLOT_2_SHFT 16 | ||
505 | #define CE_ADM_INT_PCIE_MSG_SLOT_3_SHFT 17 | ||
506 | #define CE_ADM_INT_PORT1_PM_PME_MSG_SHFT 22 | ||
507 | #define CE_ADM_INT_PORT2_PM_PME_MSG_SHFT 23 | ||
508 | |||
509 | /* ce_adm_force_int register bit defines */ | ||
510 | #define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_A_SHFT 0 | ||
511 | #define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_B_SHFT 1 | ||
512 | #define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_C_SHFT 2 | ||
513 | #define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_D_SHFT 3 | ||
514 | #define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_A_SHFT 4 | ||
515 | #define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_B_SHFT 5 | ||
516 | #define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_C_SHFT 6 | ||
517 | #define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_D_SHFT 7 | ||
518 | #define CE_ADM_FORCE_INT_ALWAYS_SHFT 8 | ||
519 | |||
520 | /* ce_adm_int_dest register bit masks & shifts */ | ||
521 | #define INTR_VECTOR_SHFT 56 | ||
522 | |||
523 | /* ce_adm_error_mask and ce_adm_error_summary register bit masks */ | ||
524 | #define CE_ADM_ERR_CRM_SSP_REQ_INVALID (0x1ULL << 0) | ||
525 | #define CE_ADM_ERR_SSP_REQ_HEADER (0x1ULL << 1) | ||
526 | #define CE_ADM_ERR_SSP_RSP_HEADER (0x1ULL << 2) | ||
527 | #define CE_ADM_ERR_SSP_PROTOCOL_ERROR (0x1ULL << 3) | ||
528 | #define CE_ADM_ERR_SSP_SBE (0x1ULL << 4) | ||
529 | #define CE_ADM_ERR_SSP_MBE (0x1ULL << 5) | ||
530 | #define CE_ADM_ERR_CXM_CREDIT_OFLOW (0x1ULL << 6) | ||
531 | #define CE_ADM_ERR_DRE_SSP_REQ_INVAL (0x1ULL << 7) | ||
532 | #define CE_ADM_ERR_SSP_REQ_LONG (0x1ULL << 8) | ||
533 | #define CE_ADM_ERR_SSP_REQ_OFLOW (0x1ULL << 9) | ||
534 | #define CE_ADM_ERR_SSP_REQ_SHORT (0x1ULL << 10) | ||
535 | #define CE_ADM_ERR_SSP_REQ_SIDEBAND (0x1ULL << 11) | ||
536 | #define CE_ADM_ERR_SSP_REQ_ADDR_ERR (0x1ULL << 12) | ||
537 | #define CE_ADM_ERR_SSP_REQ_BAD_BE (0x1ULL << 13) | ||
538 | #define CE_ADM_ERR_PCIE_COMPL_TIMEOUT (0x1ULL << 14) | ||
539 | #define CE_ADM_ERR_PCIE_UNEXP_COMPL (0x1ULL << 15) | ||
540 | #define CE_ADM_ERR_PCIE_ERR_COMPL (0x1ULL << 16) | ||
541 | #define CE_ADM_ERR_DRE_CREDIT_OFLOW (0x1ULL << 17) | ||
542 | #define CE_ADM_ERR_DRE_SRAM_PE (0x1ULL << 18) | ||
543 | #define CE_ADM_ERR_SSP_RSP_INVALID (0x1ULL << 19) | ||
544 | #define CE_ADM_ERR_SSP_RSP_LONG (0x1ULL << 20) | ||
545 | #define CE_ADM_ERR_SSP_RSP_SHORT (0x1ULL << 21) | ||
546 | #define CE_ADM_ERR_SSP_RSP_SIDEBAND (0x1ULL << 22) | ||
547 | #define CE_ADM_ERR_URE_SSP_RSP_UNEXP (0x1ULL << 23) | ||
548 | #define CE_ADM_ERR_URE_SSP_WR_REQ_TIMEOUT (0x1ULL << 24) | ||
549 | #define CE_ADM_ERR_URE_SSP_RD_REQ_TIMEOUT (0x1ULL << 25) | ||
550 | #define CE_ADM_ERR_URE_ATE3240_PAGE_FAULT (0x1ULL << 26) | ||
551 | #define CE_ADM_ERR_URE_ATE40_PAGE_FAULT (0x1ULL << 27) | ||
552 | #define CE_ADM_ERR_URE_CREDIT_OFLOW (0x1ULL << 28) | ||
553 | #define CE_ADM_ERR_URE_SRAM_PE (0x1ULL << 29) | ||
554 | #define CE_ADM_ERR_ADM_SSP_RSP_UNEXP (0x1ULL << 30) | ||
555 | #define CE_ADM_ERR_ADM_SSP_REQ_TIMEOUT (0x1ULL << 31) | ||
556 | #define CE_ADM_ERR_MMR_ACCESS_ERROR (0x1ULL << 32) | ||
557 | #define CE_ADM_ERR_MMR_ADDR_ERROR (0x1ULL << 33) | ||
558 | #define CE_ADM_ERR_ADM_CREDIT_OFLOW (0x1ULL << 34) | ||
559 | #define CE_ADM_ERR_ADM_SRAM_PE (0x1ULL << 35) | ||
560 | #define CE_ADM_ERR_DTL1_MIN_PDATA_CREDIT_ERR (0x1ULL << 36) | ||
561 | #define CE_ADM_ERR_DTL1_INF_COMPL_CRED_UPDT_ERR (0x1ULL << 37) | ||
562 | #define CE_ADM_ERR_DTL1_INF_POSTED_CRED_UPDT_ERR (0x1ULL << 38) | ||
563 | #define CE_ADM_ERR_DTL1_INF_NPOSTED_CRED_UPDT_ERR (0x1ULL << 39) | ||
564 | #define CE_ADM_ERR_DTL1_COMP_HD_CRED_MAX_ERR (0x1ULL << 40) | ||
565 | #define CE_ADM_ERR_DTL1_COMP_D_CRED_MAX_ERR (0x1ULL << 41) | ||
566 | #define CE_ADM_ERR_DTL1_NPOSTED_HD_CRED_MAX_ERR (0x1ULL << 42) | ||
567 | #define CE_ADM_ERR_DTL1_NPOSTED_D_CRED_MAX_ERR (0x1ULL << 43) | ||
568 | #define CE_ADM_ERR_DTL1_POSTED_HD_CRED_MAX_ERR (0x1ULL << 44) | ||
569 | #define CE_ADM_ERR_DTL1_POSTED_D_CRED_MAX_ERR (0x1ULL << 45) | ||
570 | #define CE_ADM_ERR_DTL2_MIN_PDATA_CREDIT_ERR (0x1ULL << 46) | ||
571 | #define CE_ADM_ERR_DTL2_INF_COMPL_CRED_UPDT_ERR (0x1ULL << 47) | ||
572 | #define CE_ADM_ERR_DTL2_INF_POSTED_CRED_UPDT_ERR (0x1ULL << 48) | ||
573 | #define CE_ADM_ERR_DTL2_INF_NPOSTED_CRED_UPDT_ERR (0x1ULL << 49) | ||
574 | #define CE_ADM_ERR_DTL2_COMP_HD_CRED_MAX_ERR (0x1ULL << 50) | ||
575 | #define CE_ADM_ERR_DTL2_COMP_D_CRED_MAX_ERR (0x1ULL << 51) | ||
576 | #define CE_ADM_ERR_DTL2_NPOSTED_HD_CRED_MAX_ERR (0x1ULL << 52) | ||
577 | #define CE_ADM_ERR_DTL2_NPOSTED_D_CRED_MAX_ERR (0x1ULL << 53) | ||
578 | #define CE_ADM_ERR_DTL2_POSTED_HD_CRED_MAX_ERR (0x1ULL << 54) | ||
579 | #define CE_ADM_ERR_DTL2_POSTED_D_CRED_MAX_ERR (0x1ULL << 55) | ||
580 | #define CE_ADM_ERR_PORT1_PCIE_COR_ERR (0x1ULL << 56) | ||
581 | #define CE_ADM_ERR_PORT1_PCIE_NFAT_ERR (0x1ULL << 57) | ||
582 | #define CE_ADM_ERR_PORT1_PCIE_FAT_ERR (0x1ULL << 58) | ||
583 | #define CE_ADM_ERR_PORT2_PCIE_COR_ERR (0x1ULL << 59) | ||
584 | #define CE_ADM_ERR_PORT2_PCIE_NFAT_ERR (0x1ULL << 60) | ||
585 | #define CE_ADM_ERR_PORT2_PCIE_FAT_ERR (0x1ULL << 61) | ||
586 | |||
587 | /* ce_adm_ure_ups_buf_barrier_flush register bit masks and shifts */ | ||
588 | #define FLUSH_SEL_PORT1_PIPE0_SHFT 0 | ||
589 | #define FLUSH_SEL_PORT1_PIPE1_SHFT 4 | ||
590 | #define FLUSH_SEL_PORT1_PIPE2_SHFT 8 | ||
591 | #define FLUSH_SEL_PORT1_PIPE3_SHFT 12 | ||
592 | #define FLUSH_SEL_PORT2_PIPE0_SHFT 16 | ||
593 | #define FLUSH_SEL_PORT2_PIPE1_SHFT 20 | ||
594 | #define FLUSH_SEL_PORT2_PIPE2_SHFT 24 | ||
595 | #define FLUSH_SEL_PORT2_PIPE3_SHFT 28 | ||
596 | |||
597 | /* ce_dre_config1 register bit masks and shifts */ | ||
598 | #define CE_DRE_RO_ENABLE (0x1ULL << 0) | ||
599 | #define CE_DRE_DYN_RO_ENABLE (0x1ULL << 1) | ||
600 | #define CE_DRE_SUP_CONFIG_COMP_ERROR (0x1ULL << 2) | ||
601 | #define CE_DRE_SUP_IO_COMP_ERROR (0x1ULL << 3) | ||
602 | #define CE_DRE_ADDR_MODE_SHFT 4 | ||
603 | |||
604 | /* ce_dre_config_req_status register bit masks */ | ||
605 | #define CE_DRE_LAST_CONFIG_COMPLETION (0x7ULL << 0) | ||
606 | #define CE_DRE_DOWNSTREAM_CONFIG_ERROR (0x1ULL << 3) | ||
607 | #define CE_DRE_CONFIG_COMPLETION_VALID (0x1ULL << 4) | ||
608 | #define CE_DRE_CONFIG_REQUEST_ACTIVE (0x1ULL << 5) | ||
609 | |||
610 | /* ce_ure_control register bit masks & shifts */ | ||
611 | #define CE_URE_RD_MRG_ENABLE (0x1ULL << 0) | ||
612 | #define CE_URE_WRT_MRG_ENABLE1 (0x1ULL << 4) | ||
613 | #define CE_URE_WRT_MRG_ENABLE2 (0x1ULL << 5) | ||
614 | #define CE_URE_WRT_MRG_TIMER_SHFT 12 | ||
615 | #define CE_URE_WRT_MRG_TIMER_MASK (0x7FFULL << CE_URE_WRT_MRG_TIMER_SHFT) | ||
616 | #define CE_URE_WRT_MRG_TIMER(x) (((u64)(x) << \ | ||
617 | CE_URE_WRT_MRG_TIMER_SHFT) & \ | ||
618 | CE_URE_WRT_MRG_TIMER_MASK) | ||
619 | #define CE_URE_RSPQ_BYPASS_DISABLE (0x1ULL << 24) | ||
620 | #define CE_URE_UPS_DAT1_PAR_DISABLE (0x1ULL << 32) | ||
621 | #define CE_URE_UPS_HDR1_PAR_DISABLE (0x1ULL << 33) | ||
622 | #define CE_URE_UPS_DAT2_PAR_DISABLE (0x1ULL << 34) | ||
623 | #define CE_URE_UPS_HDR2_PAR_DISABLE (0x1ULL << 35) | ||
624 | #define CE_URE_ATE_PAR_DISABLE (0x1ULL << 36) | ||
625 | #define CE_URE_RCI_PAR_DISABLE (0x1ULL << 37) | ||
626 | #define CE_URE_RSPQ_PAR_DISABLE (0x1ULL << 38) | ||
627 | #define CE_URE_DNS_DAT_PAR_DISABLE (0x1ULL << 39) | ||
628 | #define CE_URE_DNS_HDR_PAR_DISABLE (0x1ULL << 40) | ||
629 | #define CE_URE_MALFORM_DISABLE (0x1ULL << 44) | ||
630 | #define CE_URE_UNSUP_DISABLE (0x1ULL << 45) | ||
631 | |||
632 | /* ce_ure_page_map register bit masks & shifts */ | ||
633 | #define CE_URE_ATE3240_ENABLE (0x1ULL << 0) | ||
634 | #define CE_URE_ATE40_ENABLE (0x1ULL << 1) | ||
635 | #define CE_URE_PAGESIZE_SHFT 4 | ||
636 | #define CE_URE_PAGESIZE_MASK (0x7ULL << CE_URE_PAGESIZE_SHFT) | ||
637 | #define CE_URE_4K_PAGESIZE (0x0ULL << CE_URE_PAGESIZE_SHFT) | ||
638 | #define CE_URE_16K_PAGESIZE (0x1ULL << CE_URE_PAGESIZE_SHFT) | ||
639 | #define CE_URE_64K_PAGESIZE (0x2ULL << CE_URE_PAGESIZE_SHFT) | ||
640 | #define CE_URE_128K_PAGESIZE (0x3ULL << CE_URE_PAGESIZE_SHFT) | ||
641 | #define CE_URE_256K_PAGESIZE (0x4ULL << CE_URE_PAGESIZE_SHFT) | ||
642 | |||
643 | /* ce_ure_pipe_sel register bit masks & shifts */ | ||
644 | #define PKT_TRAFIC_SHRT 16 | ||
645 | #define BUS_SRC_ID_SHFT 8 | ||
646 | #define DEV_SRC_ID_SHFT 3 | ||
647 | #define FNC_SRC_ID_SHFT 0 | ||
648 | #define CE_URE_TC_MASK (0x07ULL << PKT_TRAFIC_SHRT) | ||
649 | #define CE_URE_BUS_MASK (0xFFULL << BUS_SRC_ID_SHFT) | ||
650 | #define CE_URE_DEV_MASK (0x1FULL << DEV_SRC_ID_SHFT) | ||
651 | #define CE_URE_FNC_MASK (0x07ULL << FNC_SRC_ID_SHFT) | ||
652 | #define CE_URE_PIPE_BUS(b) (((u64)(b) << BUS_SRC_ID_SHFT) & \ | ||
653 | CE_URE_BUS_MASK) | ||
654 | #define CE_URE_PIPE_DEV(d) (((u64)(d) << DEV_SRC_ID_SHFT) & \ | ||
655 | CE_URE_DEV_MASK) | ||
656 | #define CE_URE_PIPE_FNC(f) (((u64)(f) << FNC_SRC_ID_SHFT) & \ | ||
657 | CE_URE_FNC_MASK) | ||
658 | |||
659 | #define CE_URE_SEL1_SHFT 0 | ||
660 | #define CE_URE_SEL2_SHFT 20 | ||
661 | #define CE_URE_SEL3_SHFT 40 | ||
662 | #define CE_URE_SEL1_MASK (0x7FFFFULL << CE_URE_SEL1_SHFT) | ||
663 | #define CE_URE_SEL2_MASK (0x7FFFFULL << CE_URE_SEL2_SHFT) | ||
664 | #define CE_URE_SEL3_MASK (0x7FFFFULL << CE_URE_SEL3_SHFT) | ||
665 | |||
666 | |||
667 | /* ce_ure_pipe_mask register bit masks & shifts */ | ||
668 | #define CE_URE_MASK1_SHFT 0 | ||
669 | #define CE_URE_MASK2_SHFT 20 | ||
670 | #define CE_URE_MASK3_SHFT 40 | ||
671 | #define CE_URE_MASK1_MASK (0x7FFFFULL << CE_URE_MASK1_SHFT) | ||
672 | #define CE_URE_MASK2_MASK (0x7FFFFULL << CE_URE_MASK2_SHFT) | ||
673 | #define CE_URE_MASK3_MASK (0x7FFFFULL << CE_URE_MASK3_SHFT) | ||
674 | |||
675 | |||
676 | /* ce_ure_pcie_control1 register bit masks & shifts */ | ||
677 | #define CE_URE_SI (0x1ULL << 0) | ||
678 | #define CE_URE_ELAL_SHFT 4 | ||
679 | #define CE_URE_ELAL_MASK (0x7ULL << CE_URE_ELAL_SHFT) | ||
680 | #define CE_URE_ELAL_SET(n) (((u64)(n) << CE_URE_ELAL_SHFT) & \ | ||
681 | CE_URE_ELAL_MASK) | ||
682 | #define CE_URE_ELAL1_SHFT 8 | ||
683 | #define CE_URE_ELAL1_MASK (0x7ULL << CE_URE_ELAL1_SHFT) | ||
684 | #define CE_URE_ELAL1_SET(n) (((u64)(n) << CE_URE_ELAL1_SHFT) & \ | ||
685 | CE_URE_ELAL1_MASK) | ||
686 | #define CE_URE_SCC (0x1ULL << 12) | ||
687 | #define CE_URE_PN1_SHFT 16 | ||
688 | #define CE_URE_PN1_MASK (0xFFULL << CE_URE_PN1_SHFT) | ||
689 | #define CE_URE_PN2_SHFT 24 | ||
690 | #define CE_URE_PN2_MASK (0xFFULL << CE_URE_PN2_SHFT) | ||
691 | #define CE_URE_PN1_SET(n) (((u64)(n) << CE_URE_PN1_SHFT) & \ | ||
692 | CE_URE_PN1_MASK) | ||
693 | #define CE_URE_PN2_SET(n) (((u64)(n) << CE_URE_PN2_SHFT) & \ | ||
694 | CE_URE_PN2_MASK) | ||
695 | |||
696 | /* ce_ure_pcie_control2 register bit masks & shifts */ | ||
697 | #define CE_URE_ABP (0x1ULL << 0) | ||
698 | #define CE_URE_PCP (0x1ULL << 1) | ||
699 | #define CE_URE_MSP (0x1ULL << 2) | ||
700 | #define CE_URE_AIP (0x1ULL << 3) | ||
701 | #define CE_URE_PIP (0x1ULL << 4) | ||
702 | #define CE_URE_HPS (0x1ULL << 5) | ||
703 | #define CE_URE_HPC (0x1ULL << 6) | ||
704 | #define CE_URE_SPLV_SHFT 7 | ||
705 | #define CE_URE_SPLV_MASK (0xFFULL << CE_URE_SPLV_SHFT) | ||
706 | #define CE_URE_SPLV_SET(n) (((u64)(n) << CE_URE_SPLV_SHFT) & \ | ||
707 | CE_URE_SPLV_MASK) | ||
708 | #define CE_URE_SPLS_SHFT 15 | ||
709 | #define CE_URE_SPLS_MASK (0x3ULL << CE_URE_SPLS_SHFT) | ||
710 | #define CE_URE_SPLS_SET(n) (((u64)(n) << CE_URE_SPLS_SHFT) & \ | ||
711 | CE_URE_SPLS_MASK) | ||
712 | #define CE_URE_PSN1_SHFT 19 | ||
713 | #define CE_URE_PSN1_MASK (0x1FFFULL << CE_URE_PSN1_SHFT) | ||
714 | #define CE_URE_PSN2_SHFT 32 | ||
715 | #define CE_URE_PSN2_MASK (0x1FFFULL << CE_URE_PSN2_SHFT) | ||
716 | #define CE_URE_PSN1_SET(n) (((u64)(n) << CE_URE_PSN1_SHFT) & \ | ||
717 | CE_URE_PSN1_MASK) | ||
718 | #define CE_URE_PSN2_SET(n) (((u64)(n) << CE_URE_PSN2_SHFT) & \ | ||
719 | CE_URE_PSN2_MASK) | ||
720 | |||
721 | /* | ||
722 | * PIO address space ranges for CE | ||
723 | */ | ||
724 | |||
725 | /* Local CE Registers Space */ | ||
726 | #define CE_PIO_MMR 0x00000000 | ||
727 | #define CE_PIO_MMR_LEN 0x04000000 | ||
728 | |||
729 | /* PCI Compatible Config Space */ | ||
730 | #define CE_PIO_CONFIG_SPACE 0x04000000 | ||
731 | #define CE_PIO_CONFIG_SPACE_LEN 0x04000000 | ||
732 | |||
733 | /* PCI I/O Space Alias */ | ||
734 | #define CE_PIO_IO_SPACE_ALIAS 0x08000000 | ||
735 | #define CE_PIO_IO_SPACE_ALIAS_LEN 0x08000000 | ||
736 | |||
737 | /* PCI Enhanced Config Space */ | ||
738 | #define CE_PIO_E_CONFIG_SPACE 0x10000000 | ||
739 | #define CE_PIO_E_CONFIG_SPACE_LEN 0x10000000 | ||
740 | |||
741 | /* PCI I/O Space */ | ||
742 | #define CE_PIO_IO_SPACE 0x100000000 | ||
743 | #define CE_PIO_IO_SPACE_LEN 0x100000000 | ||
744 | |||
745 | /* PCI MEM Space */ | ||
746 | #define CE_PIO_MEM_SPACE 0x200000000 | ||
747 | #define CE_PIO_MEM_SPACE_LEN TIO_HWIN_SIZE | ||
748 | |||
749 | |||
750 | /* | ||
751 | * CE PCI Enhanced Config Space shifts & masks | ||
752 | */ | ||
753 | #define CE_E_CONFIG_BUS_SHFT 20 | ||
754 | #define CE_E_CONFIG_BUS_MASK (0xFF << CE_E_CONFIG_BUS_SHFT) | ||
755 | #define CE_E_CONFIG_DEVICE_SHFT 15 | ||
756 | #define CE_E_CONFIG_DEVICE_MASK (0x1F << CE_E_CONFIG_DEVICE_SHFT) | ||
757 | #define CE_E_CONFIG_FUNC_SHFT 12 | ||
758 | #define CE_E_CONFIG_FUNC_MASK (0x7 << CE_E_CONFIG_FUNC_SHFT) | ||
759 | |||
760 | #endif /* __ASM_IA64_SN_TIOCE_H__ */ | ||
diff --git a/arch/ia64/include/asm/sn/tioce_provider.h b/arch/ia64/include/asm/sn/tioce_provider.h deleted file mode 100644 index 32c32f30b099..000000000000 --- a/arch/ia64/include/asm/sn/tioce_provider.h +++ /dev/null | |||
@@ -1,63 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 2003-2005 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_IA64_SN_CE_PROVIDER_H | ||
10 | #define _ASM_IA64_SN_CE_PROVIDER_H | ||
11 | |||
12 | #include <asm/sn/pcibus_provider_defs.h> | ||
13 | #include <asm/sn/tioce.h> | ||
14 | |||
15 | /* | ||
16 | * Common TIOCE structure shared between the prom and kernel | ||
17 | * | ||
18 | * DO NOT CHANGE THIS STRUCT WITHOUT MAKING CORRESPONDING CHANGES TO THE | ||
19 | * PROM VERSION. | ||
20 | */ | ||
21 | struct tioce_common { | ||
22 | struct pcibus_bussoft ce_pcibus; /* common pciio header */ | ||
23 | |||
24 | u32 ce_rev; | ||
25 | u64 ce_kernel_private; | ||
26 | u64 ce_prom_private; | ||
27 | }; | ||
28 | |||
29 | struct tioce_kernel { | ||
30 | struct tioce_common *ce_common; | ||
31 | spinlock_t ce_lock; | ||
32 | struct list_head ce_dmamap_list; | ||
33 | |||
34 | u64 ce_ate40_shadow[TIOCE_NUM_M40_ATES]; | ||
35 | u64 ce_ate3240_shadow[TIOCE_NUM_M3240_ATES]; | ||
36 | u32 ce_ate3240_pagesize; | ||
37 | |||
38 | u8 ce_port1_secondary; | ||
39 | |||
40 | /* per-port resources */ | ||
41 | struct { | ||
42 | int dirmap_refcnt; | ||
43 | u64 dirmap_shadow; | ||
44 | } ce_port[TIOCE_NUM_PORTS]; | ||
45 | }; | ||
46 | |||
47 | struct tioce_dmamap { | ||
48 | struct list_head ce_dmamap_list; /* headed by tioce_kernel */ | ||
49 | u32 refcnt; | ||
50 | |||
51 | u64 nbytes; /* # bytes mapped */ | ||
52 | |||
53 | u64 ct_start; /* coretalk start address */ | ||
54 | u64 pci_start; /* bus start address */ | ||
55 | |||
56 | u64 __iomem *ate_hw;/* hw ptr of first ate in map */ | ||
57 | u64 *ate_shadow; /* shadow ptr of firat ate */ | ||
58 | u16 ate_count; /* # ate's in the map */ | ||
59 | }; | ||
60 | |||
61 | extern int tioce_init_provider(void); | ||
62 | |||
63 | #endif /* __ASM_IA64_SN_CE_PROVIDER_H */ | ||
diff --git a/arch/ia64/include/asm/sn/tiocp.h b/arch/ia64/include/asm/sn/tiocp.h deleted file mode 100644 index e8ad0bb5b6c5..000000000000 --- a/arch/ia64/include/asm/sn/tiocp.h +++ /dev/null | |||
@@ -1,257 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2003-2005 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | #ifndef _ASM_IA64_SN_PCI_TIOCP_H | ||
9 | #define _ASM_IA64_SN_PCI_TIOCP_H | ||
10 | |||
11 | #define TIOCP_HOST_INTR_ADDR 0x003FFFFFFFFFFFFFUL | ||
12 | #define TIOCP_PCI64_CMDTYPE_MEM (0x1ull << 60) | ||
13 | #define TIOCP_PCI64_CMDTYPE_MSI (0x3ull << 60) | ||
14 | |||
15 | |||
16 | /***************************************************************************** | ||
17 | *********************** TIOCP MMR structure mapping *************************** | ||
18 | *****************************************************************************/ | ||
19 | |||
20 | struct tiocp{ | ||
21 | |||
22 | /* 0x000000-0x00FFFF -- Local Registers */ | ||
23 | |||
24 | /* 0x000000-0x000057 -- (Legacy Widget Space) Configuration */ | ||
25 | u64 cp_id; /* 0x000000 */ | ||
26 | u64 cp_stat; /* 0x000008 */ | ||
27 | u64 cp_err_upper; /* 0x000010 */ | ||
28 | u64 cp_err_lower; /* 0x000018 */ | ||
29 | #define cp_err cp_err_lower | ||
30 | u64 cp_control; /* 0x000020 */ | ||
31 | u64 cp_req_timeout; /* 0x000028 */ | ||
32 | u64 cp_intr_upper; /* 0x000030 */ | ||
33 | u64 cp_intr_lower; /* 0x000038 */ | ||
34 | #define cp_intr cp_intr_lower | ||
35 | u64 cp_err_cmdword; /* 0x000040 */ | ||
36 | u64 _pad_000048; /* 0x000048 */ | ||
37 | u64 cp_tflush; /* 0x000050 */ | ||
38 | |||
39 | /* 0x000058-0x00007F -- Bridge-specific Configuration */ | ||
40 | u64 cp_aux_err; /* 0x000058 */ | ||
41 | u64 cp_resp_upper; /* 0x000060 */ | ||
42 | u64 cp_resp_lower; /* 0x000068 */ | ||
43 | #define cp_resp cp_resp_lower | ||
44 | u64 cp_tst_pin_ctrl; /* 0x000070 */ | ||
45 | u64 cp_addr_lkerr; /* 0x000078 */ | ||
46 | |||
47 | /* 0x000080-0x00008F -- PMU & MAP */ | ||
48 | u64 cp_dir_map; /* 0x000080 */ | ||
49 | u64 _pad_000088; /* 0x000088 */ | ||
50 | |||
51 | /* 0x000090-0x00009F -- SSRAM */ | ||
52 | u64 cp_map_fault; /* 0x000090 */ | ||
53 | u64 _pad_000098; /* 0x000098 */ | ||
54 | |||
55 | /* 0x0000A0-0x0000AF -- Arbitration */ | ||
56 | u64 cp_arb; /* 0x0000A0 */ | ||
57 | u64 _pad_0000A8; /* 0x0000A8 */ | ||
58 | |||
59 | /* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */ | ||
60 | u64 cp_ate_parity_err; /* 0x0000B0 */ | ||
61 | u64 _pad_0000B8; /* 0x0000B8 */ | ||
62 | |||
63 | /* 0x0000C0-0x0000FF -- PCI/GIO */ | ||
64 | u64 cp_bus_timeout; /* 0x0000C0 */ | ||
65 | u64 cp_pci_cfg; /* 0x0000C8 */ | ||
66 | u64 cp_pci_err_upper; /* 0x0000D0 */ | ||
67 | u64 cp_pci_err_lower; /* 0x0000D8 */ | ||
68 | #define cp_pci_err cp_pci_err_lower | ||
69 | u64 _pad_0000E0[4]; /* 0x0000{E0..F8} */ | ||
70 | |||
71 | /* 0x000100-0x0001FF -- Interrupt */ | ||
72 | u64 cp_int_status; /* 0x000100 */ | ||
73 | u64 cp_int_enable; /* 0x000108 */ | ||
74 | u64 cp_int_rst_stat; /* 0x000110 */ | ||
75 | u64 cp_int_mode; /* 0x000118 */ | ||
76 | u64 cp_int_device; /* 0x000120 */ | ||
77 | u64 cp_int_host_err; /* 0x000128 */ | ||
78 | u64 cp_int_addr[8]; /* 0x0001{30,,,68} */ | ||
79 | u64 cp_err_int_view; /* 0x000170 */ | ||
80 | u64 cp_mult_int; /* 0x000178 */ | ||
81 | u64 cp_force_always[8]; /* 0x0001{80,,,B8} */ | ||
82 | u64 cp_force_pin[8]; /* 0x0001{C0,,,F8} */ | ||
83 | |||
84 | /* 0x000200-0x000298 -- Device */ | ||
85 | u64 cp_device[4]; /* 0x0002{00,,,18} */ | ||
86 | u64 _pad_000220[4]; /* 0x0002{20,,,38} */ | ||
87 | u64 cp_wr_req_buf[4]; /* 0x0002{40,,,58} */ | ||
88 | u64 _pad_000260[4]; /* 0x0002{60,,,78} */ | ||
89 | u64 cp_rrb_map[2]; /* 0x0002{80,,,88} */ | ||
90 | #define cp_even_resp cp_rrb_map[0] /* 0x000280 */ | ||
91 | #define cp_odd_resp cp_rrb_map[1] /* 0x000288 */ | ||
92 | u64 cp_resp_status; /* 0x000290 */ | ||
93 | u64 cp_resp_clear; /* 0x000298 */ | ||
94 | |||
95 | u64 _pad_0002A0[12]; /* 0x0002{A0..F8} */ | ||
96 | |||
97 | /* 0x000300-0x0003F8 -- Buffer Address Match Registers */ | ||
98 | struct { | ||
99 | u64 upper; /* 0x0003{00,,,F0} */ | ||
100 | u64 lower; /* 0x0003{08,,,F8} */ | ||
101 | } cp_buf_addr_match[16]; | ||
102 | |||
103 | /* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */ | ||
104 | struct { | ||
105 | u64 flush_w_touch; /* 0x000{400,,,5C0} */ | ||
106 | u64 flush_wo_touch; /* 0x000{408,,,5C8} */ | ||
107 | u64 inflight; /* 0x000{410,,,5D0} */ | ||
108 | u64 prefetch; /* 0x000{418,,,5D8} */ | ||
109 | u64 total_pci_retry; /* 0x000{420,,,5E0} */ | ||
110 | u64 max_pci_retry; /* 0x000{428,,,5E8} */ | ||
111 | u64 max_latency; /* 0x000{430,,,5F0} */ | ||
112 | u64 clear_all; /* 0x000{438,,,5F8} */ | ||
113 | } cp_buf_count[8]; | ||
114 | |||
115 | |||
116 | /* 0x000600-0x0009FF -- PCI/X registers */ | ||
117 | u64 cp_pcix_bus_err_addr; /* 0x000600 */ | ||
118 | u64 cp_pcix_bus_err_attr; /* 0x000608 */ | ||
119 | u64 cp_pcix_bus_err_data; /* 0x000610 */ | ||
120 | u64 cp_pcix_pio_split_addr; /* 0x000618 */ | ||
121 | u64 cp_pcix_pio_split_attr; /* 0x000620 */ | ||
122 | u64 cp_pcix_dma_req_err_attr; /* 0x000628 */ | ||
123 | u64 cp_pcix_dma_req_err_addr; /* 0x000630 */ | ||
124 | u64 cp_pcix_timeout; /* 0x000638 */ | ||
125 | |||
126 | u64 _pad_000640[24]; /* 0x000{640,,,6F8} */ | ||
127 | |||
128 | /* 0x000700-0x000737 -- Debug Registers */ | ||
129 | u64 cp_ct_debug_ctl; /* 0x000700 */ | ||
130 | u64 cp_br_debug_ctl; /* 0x000708 */ | ||
131 | u64 cp_mux3_debug_ctl; /* 0x000710 */ | ||
132 | u64 cp_mux4_debug_ctl; /* 0x000718 */ | ||
133 | u64 cp_mux5_debug_ctl; /* 0x000720 */ | ||
134 | u64 cp_mux6_debug_ctl; /* 0x000728 */ | ||
135 | u64 cp_mux7_debug_ctl; /* 0x000730 */ | ||
136 | |||
137 | u64 _pad_000738[89]; /* 0x000{738,,,9F8} */ | ||
138 | |||
139 | /* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */ | ||
140 | struct { | ||
141 | u64 cp_buf_addr; /* 0x000{A00,,,AF0} */ | ||
142 | u64 cp_buf_attr; /* 0X000{A08,,,AF8} */ | ||
143 | } cp_pcix_read_buf_64[16]; | ||
144 | |||
145 | struct { | ||
146 | u64 cp_buf_addr; /* 0x000{B00,,,BE0} */ | ||
147 | u64 cp_buf_attr; /* 0x000{B08,,,BE8} */ | ||
148 | u64 cp_buf_valid; /* 0x000{B10,,,BF0} */ | ||
149 | u64 __pad1; /* 0x000{B18,,,BF8} */ | ||
150 | } cp_pcix_write_buf_64[8]; | ||
151 | |||
152 | /* End of Local Registers -- Start of Address Map space */ | ||
153 | |||
154 | char _pad_000c00[0x010000 - 0x000c00]; | ||
155 | |||
156 | /* 0x010000-0x011FF8 -- Internal ATE RAM (Auto Parity Generation) */ | ||
157 | u64 cp_int_ate_ram[1024]; /* 0x010000-0x011FF8 */ | ||
158 | |||
159 | char _pad_012000[0x14000 - 0x012000]; | ||
160 | |||
161 | /* 0x014000-0x015FF8 -- Internal ATE RAM (Manual Parity Generation) */ | ||
162 | u64 cp_int_ate_ram_mp[1024]; /* 0x014000-0x015FF8 */ | ||
163 | |||
164 | char _pad_016000[0x18000 - 0x016000]; | ||
165 | |||
166 | /* 0x18000-0x197F8 -- TIOCP Write Request Ram */ | ||
167 | u64 cp_wr_req_lower[256]; /* 0x18000 - 0x187F8 */ | ||
168 | u64 cp_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */ | ||
169 | u64 cp_wr_req_parity[256]; /* 0x19000 - 0x197F8 */ | ||
170 | |||
171 | char _pad_019800[0x1C000 - 0x019800]; | ||
172 | |||
173 | /* 0x1C000-0x1EFF8 -- TIOCP Read Response Ram */ | ||
174 | u64 cp_rd_resp_lower[512]; /* 0x1C000 - 0x1CFF8 */ | ||
175 | u64 cp_rd_resp_upper[512]; /* 0x1D000 - 0x1DFF8 */ | ||
176 | u64 cp_rd_resp_parity[512]; /* 0x1E000 - 0x1EFF8 */ | ||
177 | |||
178 | char _pad_01F000[0x20000 - 0x01F000]; | ||
179 | |||
180 | /* 0x020000-0x021FFF -- Host Device (CP) Configuration Space (not used) */ | ||
181 | char _pad_020000[0x021000 - 0x20000]; | ||
182 | |||
183 | /* 0x021000-0x027FFF -- PCI Device Configuration Spaces */ | ||
184 | union { | ||
185 | u8 c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */ | ||
186 | u16 s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */ | ||
187 | u32 l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */ | ||
188 | u64 d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */ | ||
189 | union { | ||
190 | u8 c[0x100 / 1]; | ||
191 | u16 s[0x100 / 2]; | ||
192 | u32 l[0x100 / 4]; | ||
193 | u64 d[0x100 / 8]; | ||
194 | } f[8]; | ||
195 | } cp_type0_cfg_dev[7]; /* 0x02{1000,,,7FFF} */ | ||
196 | |||
197 | /* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */ | ||
198 | union { | ||
199 | u8 c[0x1000 / 1]; /* 0x028000-0x029000 */ | ||
200 | u16 s[0x1000 / 2]; /* 0x028000-0x029000 */ | ||
201 | u32 l[0x1000 / 4]; /* 0x028000-0x029000 */ | ||
202 | u64 d[0x1000 / 8]; /* 0x028000-0x029000 */ | ||
203 | union { | ||
204 | u8 c[0x100 / 1]; | ||
205 | u16 s[0x100 / 2]; | ||
206 | u32 l[0x100 / 4]; | ||
207 | u64 d[0x100 / 8]; | ||
208 | } f[8]; | ||
209 | } cp_type1_cfg; /* 0x028000-0x029000 */ | ||
210 | |||
211 | char _pad_029000[0x030000-0x029000]; | ||
212 | |||
213 | /* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */ | ||
214 | union { | ||
215 | u8 c[8 / 1]; | ||
216 | u16 s[8 / 2]; | ||
217 | u32 l[8 / 4]; | ||
218 | u64 d[8 / 8]; | ||
219 | } cp_pci_iack; /* 0x030000-0x030007 */ | ||
220 | |||
221 | char _pad_030007[0x040000-0x030008]; | ||
222 | |||
223 | /* 0x040000-0x040007 -- PCIX Special Cycle */ | ||
224 | union { | ||
225 | u8 c[8 / 1]; | ||
226 | u16 s[8 / 2]; | ||
227 | u32 l[8 / 4]; | ||
228 | u64 d[8 / 8]; | ||
229 | } cp_pcix_cycle; /* 0x040000-0x040007 */ | ||
230 | |||
231 | char _pad_040007[0x200000-0x040008]; | ||
232 | |||
233 | /* 0x200000-0x7FFFFF -- PCI/GIO Device Spaces */ | ||
234 | union { | ||
235 | u8 c[0x100000 / 1]; | ||
236 | u16 s[0x100000 / 2]; | ||
237 | u32 l[0x100000 / 4]; | ||
238 | u64 d[0x100000 / 8]; | ||
239 | } cp_devio_raw[6]; /* 0x200000-0x7FFFFF */ | ||
240 | |||
241 | #define cp_devio(n) cp_devio_raw[((n)<2)?(n*2):(n+2)] | ||
242 | |||
243 | char _pad_800000[0xA00000-0x800000]; | ||
244 | |||
245 | /* 0xA00000-0xBFFFFF -- PCI/GIO Device Spaces w/flush */ | ||
246 | union { | ||
247 | u8 c[0x100000 / 1]; | ||
248 | u16 s[0x100000 / 2]; | ||
249 | u32 l[0x100000 / 4]; | ||
250 | u64 d[0x100000 / 8]; | ||
251 | } cp_devio_raw_flush[6]; /* 0xA00000-0xBFFFFF */ | ||
252 | |||
253 | #define cp_devio_flush(n) cp_devio_raw_flush[((n)<2)?(n*2):(n+2)] | ||
254 | |||
255 | }; | ||
256 | |||
257 | #endif /* _ASM_IA64_SN_PCI_TIOCP_H */ | ||
diff --git a/arch/ia64/include/asm/sn/tiocx.h b/arch/ia64/include/asm/sn/tiocx.h deleted file mode 100644 index d29728492f36..000000000000 --- a/arch/ia64/include/asm/sn/tiocx.h +++ /dev/null | |||
@@ -1,72 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_IA64_SN_TIO_TIOCX_H | ||
10 | #define _ASM_IA64_SN_TIO_TIOCX_H | ||
11 | |||
12 | #ifdef __KERNEL__ | ||
13 | |||
14 | struct cx_id_s { | ||
15 | unsigned int part_num; | ||
16 | unsigned int mfg_num; | ||
17 | int nasid; | ||
18 | }; | ||
19 | |||
20 | struct cx_dev { | ||
21 | struct cx_id_s cx_id; | ||
22 | int bt; /* board/blade type */ | ||
23 | void *soft; /* driver specific */ | ||
24 | struct hubdev_info *hubdev; | ||
25 | struct device dev; | ||
26 | struct cx_drv *driver; | ||
27 | }; | ||
28 | |||
29 | struct cx_device_id { | ||
30 | unsigned int part_num; | ||
31 | unsigned int mfg_num; | ||
32 | }; | ||
33 | |||
34 | struct cx_drv { | ||
35 | char *name; | ||
36 | const struct cx_device_id *id_table; | ||
37 | struct device_driver driver; | ||
38 | int (*probe) (struct cx_dev * dev, const struct cx_device_id * id); | ||
39 | int (*remove) (struct cx_dev * dev); | ||
40 | }; | ||
41 | |||
42 | /* create DMA address by stripping AS bits */ | ||
43 | #define TIOCX_DMA_ADDR(a) (u64)((u64)(a) & 0xffffcfffffffffUL) | ||
44 | |||
45 | #define TIOCX_TO_TIOCX_DMA_ADDR(a) (u64)(((u64)(a) & 0xfffffffff) | \ | ||
46 | ((((u64)(a)) & 0xffffc000000000UL) <<2)) | ||
47 | |||
48 | #define TIO_CE_ASIC_PARTNUM 0xce00 | ||
49 | #define TIOCX_CORELET 3 | ||
50 | |||
51 | /* These are taken from tio_mmr_as.h */ | ||
52 | #define TIO_ICE_FRZ_CFG TIO_MMR_ADDR_MOD(0x00000000b0008100UL) | ||
53 | #define TIO_ICE_PMI_TX_CFG TIO_MMR_ADDR_MOD(0x00000000b000b100UL) | ||
54 | #define TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3 TIO_MMR_ADDR_MOD(0x00000000b000be18UL) | ||
55 | #define TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3_CREDIT_CNT_MASK 0x000000000000000fUL | ||
56 | |||
57 | #define to_cx_dev(n) container_of(n, struct cx_dev, dev) | ||
58 | #define to_cx_driver(drv) container_of(drv, struct cx_drv, driver) | ||
59 | |||
60 | extern struct sn_irq_info *tiocx_irq_alloc(nasid_t, int, int, nasid_t, int); | ||
61 | extern void tiocx_irq_free(struct sn_irq_info *); | ||
62 | extern int cx_device_unregister(struct cx_dev *); | ||
63 | extern int cx_device_register(nasid_t, int, int, struct hubdev_info *, int); | ||
64 | extern int cx_driver_unregister(struct cx_drv *); | ||
65 | extern int cx_driver_register(struct cx_drv *); | ||
66 | extern u64 tiocx_dma_addr(u64 addr); | ||
67 | extern u64 tiocx_swin_base(int nasid); | ||
68 | extern void tiocx_mmr_store(int nasid, u64 offset, u64 value); | ||
69 | extern u64 tiocx_mmr_load(int nasid, u64 offset); | ||
70 | |||
71 | #endif // __KERNEL__ | ||
72 | #endif // _ASM_IA64_SN_TIO_TIOCX__ | ||
diff --git a/arch/ia64/include/asm/sn/types.h b/arch/ia64/include/asm/sn/types.h deleted file mode 100644 index 8e04ee211e59..000000000000 --- a/arch/ia64/include/asm/sn/types.h +++ /dev/null | |||
@@ -1,26 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | * Copyright (C) 1999 by Ralf Baechle | ||
8 | */ | ||
9 | #ifndef _ASM_IA64_SN_TYPES_H | ||
10 | #define _ASM_IA64_SN_TYPES_H | ||
11 | |||
12 | #include <linux/types.h> | ||
13 | |||
14 | typedef unsigned long cpuid_t; | ||
15 | typedef signed short nasid_t; /* node id in numa-as-id space */ | ||
16 | typedef signed char partid_t; /* partition ID type */ | ||
17 | typedef unsigned int moduleid_t; /* user-visible module number type */ | ||
18 | typedef unsigned int cmoduleid_t; /* kernel compact module id type */ | ||
19 | typedef unsigned char slotid_t; /* slot (blade) within module */ | ||
20 | typedef unsigned char slabid_t; /* slab (asic) within slot */ | ||
21 | typedef u64 nic_t; | ||
22 | typedef unsigned long iopaddr_t; | ||
23 | typedef unsigned long paddr_t; | ||
24 | typedef short cnodeid_t; | ||
25 | |||
26 | #endif /* _ASM_IA64_SN_TYPES_H */ | ||
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index c597ab5275b8..a63e472f5317 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -96,8 +96,6 @@ acpi_get_sysname(void) | |||
96 | } else if (!strcmp(hdr->oem_id, "SGI")) { | 96 | } else if (!strcmp(hdr->oem_id, "SGI")) { |
97 | if (!strcmp(hdr->oem_table_id + 4, "UV")) | 97 | if (!strcmp(hdr->oem_table_id + 4, "UV")) |
98 | return "uv"; | 98 | return "uv"; |
99 | else | ||
100 | return "sn2"; | ||
101 | } | 99 | } |
102 | 100 | ||
103 | #ifdef CONFIG_INTEL_IOMMU | 101 | #ifdef CONFIG_INTEL_IOMMU |
@@ -407,7 +405,7 @@ get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa) | |||
407 | int pxm; | 405 | int pxm; |
408 | 406 | ||
409 | pxm = pa->proximity_domain_lo; | 407 | pxm = pa->proximity_domain_lo; |
410 | if (ia64_platform_is("sn2") || acpi_srat_revision >= 2) | 408 | if (acpi_srat_revision >= 2) |
411 | pxm += pa->proximity_domain_hi[0] << 8; | 409 | pxm += pa->proximity_domain_hi[0] << 8; |
412 | return pxm; | 410 | return pxm; |
413 | } | 411 | } |
@@ -418,7 +416,7 @@ get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma) | |||
418 | int pxm; | 416 | int pxm; |
419 | 417 | ||
420 | pxm = ma->proximity_domain; | 418 | pxm = ma->proximity_domain; |
421 | if (!ia64_platform_is("sn2") && acpi_srat_revision <= 1) | 419 | if (acpi_srat_revision <= 1) |
422 | pxm &= 0xff; | 420 | pxm &= 0xff; |
423 | 421 | ||
424 | return pxm; | 422 | return pxm; |
@@ -710,9 +708,8 @@ int __init acpi_boot_init(void) | |||
710 | 708 | ||
711 | if (acpi_table_parse_madt | 709 | if (acpi_table_parse_madt |
712 | (ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) { | 710 | (ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) { |
713 | if (!ia64_platform_is("sn2")) | 711 | printk(KERN_ERR PREFIX |
714 | printk(KERN_ERR PREFIX | 712 | "Error parsing MADT - no IOSAPIC entries\n"); |
715 | "Error parsing MADT - no IOSAPIC entries\n"); | ||
716 | } | 713 | } |
717 | 714 | ||
718 | /* System-Level Interrupt Routing */ | 715 | /* System-Level Interrupt Routing */ |
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 8ed81b252694..6d17d26caf98 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c | |||
@@ -73,17 +73,6 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir) | |||
73 | irq_redir[irq] = (char) (redir & 0xff); | 73 | irq_redir[irq] = (char) (redir & 0xff); |
74 | } | 74 | } |
75 | } | 75 | } |
76 | |||
77 | bool is_affinity_mask_valid(const struct cpumask *cpumask) | ||
78 | { | ||
79 | if (ia64_platform_is("sn2")) { | ||
80 | /* Only allow one CPU to be specified in the smp_affinity mask */ | ||
81 | if (cpumask_weight(cpumask) != 1) | ||
82 | return false; | ||
83 | } | ||
84 | return true; | ||
85 | } | ||
86 | |||
87 | #endif /* CONFIG_SMP */ | 76 | #endif /* CONFIG_SMP */ |
88 | 77 | ||
89 | int __init arch_early_irq_init(void) | 78 | int __init arch_early_irq_init(void) |
diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c index 9b2331ac10ce..17085a8078fe 100644 --- a/arch/ia64/kernel/sal.c +++ b/arch/ia64/kernel/sal.c | |||
@@ -110,13 +110,6 @@ check_versions (struct ia64_sal_systab *systab) | |||
110 | sal_revision = SAL_VERSION_CODE(2, 8); | 110 | sal_revision = SAL_VERSION_CODE(2, 8); |
111 | sal_version = SAL_VERSION_CODE(0, 0); | 111 | sal_version = SAL_VERSION_CODE(0, 0); |
112 | } | 112 | } |
113 | |||
114 | if (ia64_platform_is("sn2") && (sal_revision == SAL_VERSION_CODE(2, 9))) | ||
115 | /* | ||
116 | * SGI Altix has hard-coded version 2.9 in their prom | ||
117 | * but they actually implement 3.2, so let's fix it here. | ||
118 | */ | ||
119 | sal_revision = SAL_VERSION_CODE(3, 2); | ||
120 | } | 113 | } |
121 | 114 | ||
122 | static void __init | 115 | static void __init |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index a0480a4e65c1..78d0d22dd17e 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -260,11 +260,11 @@ __initcall(register_memory); | |||
260 | * in kdump case. See the comment in sba_init() in sba_iommu.c. | 260 | * in kdump case. See the comment in sba_init() in sba_iommu.c. |
261 | * | 261 | * |
262 | * So, the only machvec that really supports loading the kdump kernel | 262 | * So, the only machvec that really supports loading the kdump kernel |
263 | * over 4 GB is "sn2". | 263 | * over 4 GB is "uv". |
264 | */ | 264 | */ |
265 | static int __init check_crashkernel_memory(unsigned long pbase, size_t size) | 265 | static int __init check_crashkernel_memory(unsigned long pbase, size_t size) |
266 | { | 266 | { |
267 | if (ia64_platform_is("sn2") || ia64_platform_is("uv")) | 267 | if (ia64_platform_is("uv")) |
268 | return 1; | 268 | return 1; |
269 | else | 269 | else |
270 | return pbase < (1UL << 32); | 270 | return pbase < (1UL << 32); |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index d0474a0c67db..df56f739dd11 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -57,7 +57,6 @@ | |||
57 | #include <asm/sal.h> | 57 | #include <asm/sal.h> |
58 | #include <asm/tlbflush.h> | 58 | #include <asm/tlbflush.h> |
59 | #include <asm/unistd.h> | 59 | #include <asm/unistd.h> |
60 | #include <asm/sn/arch.h> | ||
61 | 60 | ||
62 | #define SMP_DEBUG 0 | 61 | #define SMP_DEBUG 0 |
63 | 62 | ||
@@ -658,11 +657,6 @@ int __cpu_disable(void) | |||
658 | return (-EBUSY); | 657 | return (-EBUSY); |
659 | } | 658 | } |
660 | 659 | ||
661 | if (ia64_platform_is("sn2")) { | ||
662 | if (!sn_cpu_disable_allowed(cpu)) | ||
663 | return -EBUSY; | ||
664 | } | ||
665 | |||
666 | set_cpu_online(cpu, false); | 660 | set_cpu_online(cpu, false); |
667 | 661 | ||
668 | if (migrate_platform_irqs(cpu)) { | 662 | if (migrate_platform_irqs(cpu)) { |
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index 16c6d377c502..3776ef225125 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <asm/pgtable.h> | 24 | #include <asm/pgtable.h> |
25 | #include <linux/atomic.h> | 25 | #include <linux/atomic.h> |
26 | #include <asm/tlbflush.h> | 26 | #include <asm/tlbflush.h> |
27 | #include <asm/sn/arch.h> | ||
28 | 27 | ||
29 | 28 | ||
30 | extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *); | 29 | extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *); |
@@ -129,10 +128,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) | |||
129 | 128 | ||
130 | preempt_disable(); | 129 | preempt_disable(); |
131 | 130 | ||
132 | if (ia64_platform_is("sn2")) | 131 | flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); |
133 | sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE); | ||
134 | else | ||
135 | flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); | ||
136 | 132 | ||
137 | /* flush the just introduced uncached translation from the TLB */ | 133 | /* flush the just introduced uncached translation from the TLB */ |
138 | local_flush_tlb_all(); | 134 | local_flush_tlb_all(); |
diff --git a/arch/ia64/sn/Makefile b/arch/ia64/sn/Makefile deleted file mode 100644 index 79a7df02e812..000000000000 --- a/arch/ia64/sn/Makefile +++ /dev/null | |||
@@ -1,12 +0,0 @@ | |||
1 | # arch/ia64/sn/Makefile | ||
2 | # | ||
3 | # This file is subject to the terms and conditions of the GNU General Public | ||
4 | # License. See the file "COPYING" in the main directory of this archive | ||
5 | # for more details. | ||
6 | # | ||
7 | # Copyright (C) 2004 Silicon Graphics, Inc. All Rights Reserved. | ||
8 | # | ||
9 | # Makefile for the sn ia64 subplatform | ||
10 | # | ||
11 | |||
12 | obj-y += kernel/ pci/ | ||
diff --git a/arch/ia64/sn/include/ioerror.h b/arch/ia64/sn/include/ioerror.h deleted file mode 100644 index e68f2b0789a7..000000000000 --- a/arch/ia64/sn/include/ioerror.h +++ /dev/null | |||
@@ -1,81 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | #ifndef _ASM_IA64_SN_IOERROR_H | ||
9 | #define _ASM_IA64_SN_IOERROR_H | ||
10 | |||
11 | /* | ||
12 | * IO error structure. | ||
13 | * | ||
14 | * This structure would expand to hold the information retrieved from | ||
15 | * all IO related error registers. | ||
16 | * | ||
17 | * This structure is defined to hold all system specific | ||
18 | * information related to a single error. | ||
19 | * | ||
20 | * This serves a couple of purpose. | ||
21 | * - Error handling often involves translating one form of address to other | ||
22 | * form. So, instead of having different data structures at each level, | ||
23 | * we have a single structure, and the appropriate fields get filled in | ||
24 | * at each layer. | ||
25 | * - This provides a way to dump all error related information in any layer | ||
26 | * of erorr handling (debugging aid). | ||
27 | * | ||
28 | * A second possibility is to allow each layer to define its own error | ||
29 | * data structure, and fill in the proper fields. This has the advantage | ||
30 | * of isolating the layers. | ||
31 | * A big concern is the potential stack usage (and overflow), if each layer | ||
32 | * defines these structures on stack (assuming we don't want to do kmalloc. | ||
33 | * | ||
34 | * Any layer wishing to pass extra information to a layer next to it in | ||
35 | * error handling hierarchy, can do so as a separate parameter. | ||
36 | */ | ||
37 | |||
38 | typedef struct io_error_s { | ||
39 | /* Bit fields indicating which structure fields are valid */ | ||
40 | union { | ||
41 | struct { | ||
42 | unsigned ievb_errortype:1; | ||
43 | unsigned ievb_widgetnum:1; | ||
44 | unsigned ievb_widgetdev:1; | ||
45 | unsigned ievb_srccpu:1; | ||
46 | unsigned ievb_srcnode:1; | ||
47 | unsigned ievb_errnode:1; | ||
48 | unsigned ievb_sysioaddr:1; | ||
49 | unsigned ievb_xtalkaddr:1; | ||
50 | unsigned ievb_busspace:1; | ||
51 | unsigned ievb_busaddr:1; | ||
52 | unsigned ievb_vaddr:1; | ||
53 | unsigned ievb_memaddr:1; | ||
54 | unsigned ievb_epc:1; | ||
55 | unsigned ievb_ef:1; | ||
56 | unsigned ievb_tnum:1; | ||
57 | } iev_b; | ||
58 | unsigned iev_a; | ||
59 | } ie_v; | ||
60 | |||
61 | short ie_errortype; /* error type: extra info about error */ | ||
62 | short ie_widgetnum; /* Widget number that's in error */ | ||
63 | short ie_widgetdev; /* Device within widget in error */ | ||
64 | cpuid_t ie_srccpu; /* CPU on srcnode generating error */ | ||
65 | cnodeid_t ie_srcnode; /* Node which caused the error */ | ||
66 | cnodeid_t ie_errnode; /* Node where error was noticed */ | ||
67 | iopaddr_t ie_sysioaddr; /* Sys specific IO address */ | ||
68 | iopaddr_t ie_xtalkaddr; /* Xtalk (48bit) addr of Error */ | ||
69 | iopaddr_t ie_busspace; /* Bus specific address space */ | ||
70 | iopaddr_t ie_busaddr; /* Bus specific address */ | ||
71 | caddr_t ie_vaddr; /* Virtual address of error */ | ||
72 | iopaddr_t ie_memaddr; /* Physical memory address */ | ||
73 | caddr_t ie_epc; /* pc when error reported */ | ||
74 | caddr_t ie_ef; /* eframe when error reported */ | ||
75 | short ie_tnum; /* Xtalk TNUM field */ | ||
76 | } ioerror_t; | ||
77 | |||
78 | #define IOERROR_INIT(e) do { (e)->ie_v.iev_a = 0; } while (0) | ||
79 | #define IOERROR_SETVALUE(e,f,v) do { (e)->ie_ ## f = (v); (e)->ie_v.iev_b.ievb_ ## f = 1; } while (0) | ||
80 | |||
81 | #endif /* _ASM_IA64_SN_IOERROR_H */ | ||
diff --git a/arch/ia64/sn/include/tio.h b/arch/ia64/sn/include/tio.h deleted file mode 100644 index 6b2e7b75eb19..000000000000 --- a/arch/ia64/sn/include/tio.h +++ /dev/null | |||
@@ -1,41 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_IA64_SN_TIO_H | ||
10 | #define _ASM_IA64_SN_TIO_H | ||
11 | |||
12 | #define TIO_MMR_ADDR_MOD | ||
13 | |||
14 | #define TIO_NODE_ID TIO_MMR_ADDR_MOD(0x0000000090060e80) | ||
15 | |||
16 | #define TIO_ITTE_BASE 0xb0008800 /* base of translation table entries */ | ||
17 | #define TIO_ITTE(bigwin) (TIO_ITTE_BASE + 8*(bigwin)) | ||
18 | |||
19 | #define TIO_ITTE_OFFSET_BITS 8 /* size of offset field */ | ||
20 | #define TIO_ITTE_OFFSET_MASK ((1<<TIO_ITTE_OFFSET_BITS)-1) | ||
21 | #define TIO_ITTE_OFFSET_SHIFT 0 | ||
22 | |||
23 | #define TIO_ITTE_WIDGET_BITS 2 /* size of widget field */ | ||
24 | #define TIO_ITTE_WIDGET_MASK ((1<<TIO_ITTE_WIDGET_BITS)-1) | ||
25 | #define TIO_ITTE_WIDGET_SHIFT 12 | ||
26 | #define TIO_ITTE_VALID_MASK 0x1 | ||
27 | #define TIO_ITTE_VALID_SHIFT 16 | ||
28 | |||
29 | #define TIO_ITTE_WIDGET(itte) \ | ||
30 | (((itte) >> TIO_ITTE_WIDGET_SHIFT) & TIO_ITTE_WIDGET_MASK) | ||
31 | #define TIO_ITTE_VALID(itte) \ | ||
32 | (((itte) >> TIO_ITTE_VALID_SHIFT) & TIO_ITTE_VALID_MASK) | ||
33 | |||
34 | #define TIO_ITTE_PUT(nasid, bigwin, widget, addr, valid) \ | ||
35 | REMOTE_HUB_S((nasid), TIO_ITTE(bigwin), \ | ||
36 | (((((addr) >> TIO_BWIN_SIZE_BITS) & \ | ||
37 | TIO_ITTE_OFFSET_MASK) << TIO_ITTE_OFFSET_SHIFT) | \ | ||
38 | (((widget) & TIO_ITTE_WIDGET_MASK) << TIO_ITTE_WIDGET_SHIFT)) | \ | ||
39 | (( (valid) & TIO_ITTE_VALID_MASK) << TIO_ITTE_VALID_SHIFT)) | ||
40 | |||
41 | #endif /* _ASM_IA64_SN_TIO_H */ | ||
diff --git a/arch/ia64/sn/include/xtalk/hubdev.h b/arch/ia64/sn/include/xtalk/hubdev.h deleted file mode 100644 index 8182583c762c..000000000000 --- a/arch/ia64/sn/include/xtalk/hubdev.h +++ /dev/null | |||
@@ -1,91 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | #ifndef _ASM_IA64_SN_XTALK_HUBDEV_H | ||
9 | #define _ASM_IA64_SN_XTALK_HUBDEV_H | ||
10 | |||
11 | #include "xtalk/xwidgetdev.h" | ||
12 | |||
13 | #define HUB_WIDGET_ID_MAX 0xf | ||
14 | #define DEV_PER_WIDGET (2*2*8) | ||
15 | #define IIO_ITTE_WIDGET_BITS 4 /* size of widget field */ | ||
16 | #define IIO_ITTE_WIDGET_MASK ((1<<IIO_ITTE_WIDGET_BITS)-1) | ||
17 | #define IIO_ITTE_WIDGET_SHIFT 8 | ||
18 | |||
19 | #define IIO_ITTE_WIDGET(itte) \ | ||
20 | (((itte) >> IIO_ITTE_WIDGET_SHIFT) & IIO_ITTE_WIDGET_MASK) | ||
21 | |||
22 | /* | ||
23 | * Use the top big window as a surrogate for the first small window | ||
24 | */ | ||
25 | #define SWIN0_BIGWIN HUB_NUM_BIG_WINDOW | ||
26 | #define IIO_NUM_ITTES 7 | ||
27 | #define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1) | ||
28 | |||
29 | /* This struct is shared between the PROM and the kernel. | ||
30 | * Changes to this struct will require corresponding changes to the kernel. | ||
31 | */ | ||
32 | struct sn_flush_device_common { | ||
33 | int sfdl_bus; | ||
34 | int sfdl_slot; | ||
35 | int sfdl_pin; | ||
36 | struct common_bar_list { | ||
37 | unsigned long start; | ||
38 | unsigned long end; | ||
39 | } sfdl_bar_list[6]; | ||
40 | unsigned long sfdl_force_int_addr; | ||
41 | unsigned long sfdl_flush_value; | ||
42 | volatile unsigned long *sfdl_flush_addr; | ||
43 | u32 sfdl_persistent_busnum; | ||
44 | u32 sfdl_persistent_segment; | ||
45 | struct pcibus_info *sfdl_pcibus_info; | ||
46 | }; | ||
47 | |||
48 | /* This struct is kernel only and is not used by the PROM */ | ||
49 | struct sn_flush_device_kernel { | ||
50 | spinlock_t sfdl_flush_lock; | ||
51 | struct sn_flush_device_common *common; | ||
52 | }; | ||
53 | |||
54 | /* 01/16/06 This struct is the old PROM/kernel struct and needs to be included | ||
55 | * for older official PROMs to function on the new kernel base. This struct | ||
56 | * will be removed when the next official PROM release occurs. */ | ||
57 | |||
58 | struct sn_flush_device_war { | ||
59 | struct sn_flush_device_common common; | ||
60 | u32 filler; /* older PROMs expect the default size of a spinlock_t */ | ||
61 | }; | ||
62 | |||
63 | /* | ||
64 | * **widget_p - Used as an array[wid_num][device] of sn_flush_device_kernel. | ||
65 | */ | ||
66 | struct sn_flush_nasid_entry { | ||
67 | struct sn_flush_device_kernel **widget_p; // Used as an array of wid_num | ||
68 | u64 iio_itte[8]; | ||
69 | }; | ||
70 | |||
71 | struct hubdev_info { | ||
72 | geoid_t hdi_geoid; | ||
73 | short hdi_nasid; | ||
74 | short hdi_peer_nasid; /* Dual Porting Peer */ | ||
75 | |||
76 | struct sn_flush_nasid_entry hdi_flush_nasid_list; | ||
77 | struct xwidget_info hdi_xwidget_info[HUB_WIDGET_ID_MAX + 1]; | ||
78 | |||
79 | |||
80 | void *hdi_nodepda; | ||
81 | void *hdi_node_vertex; | ||
82 | u32 max_segment_number; | ||
83 | u32 max_pcibus_number; | ||
84 | }; | ||
85 | |||
86 | extern void hubdev_init_node(nodepda_t *, cnodeid_t); | ||
87 | extern void hub_error_init(struct hubdev_info *); | ||
88 | extern void ice_error_init(struct hubdev_info *); | ||
89 | |||
90 | |||
91 | #endif /* _ASM_IA64_SN_XTALK_HUBDEV_H */ | ||
diff --git a/arch/ia64/sn/include/xtalk/xbow.h b/arch/ia64/sn/include/xtalk/xbow.h deleted file mode 100644 index 90f37a4133d0..000000000000 --- a/arch/ia64/sn/include/xtalk/xbow.h +++ /dev/null | |||
@@ -1,301 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1992-1997,2000-2006 Silicon Graphics, Inc. All Rights | ||
7 | * Reserved. | ||
8 | */ | ||
9 | #ifndef _ASM_IA64_SN_XTALK_XBOW_H | ||
10 | #define _ASM_IA64_SN_XTALK_XBOW_H | ||
11 | |||
12 | #define XBOW_PORT_8 0x8 | ||
13 | #define XBOW_PORT_C 0xc | ||
14 | #define XBOW_PORT_F 0xf | ||
15 | |||
16 | #define MAX_XBOW_PORTS 8 /* number of ports on xbow chip */ | ||
17 | #define BASE_XBOW_PORT XBOW_PORT_8 /* Lowest external port */ | ||
18 | |||
19 | #define XBOW_CREDIT 4 | ||
20 | |||
21 | #define MAX_XBOW_NAME 16 | ||
22 | |||
23 | /* Register set for each xbow link */ | ||
24 | typedef volatile struct xb_linkregs_s { | ||
25 | /* | ||
26 | * we access these through synergy unswizzled space, so the address | ||
27 | * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.) | ||
28 | * That's why we put the register first and filler second. | ||
29 | */ | ||
30 | u32 link_ibf; | ||
31 | u32 filler0; /* filler for proper alignment */ | ||
32 | u32 link_control; | ||
33 | u32 filler1; | ||
34 | u32 link_status; | ||
35 | u32 filler2; | ||
36 | u32 link_arb_upper; | ||
37 | u32 filler3; | ||
38 | u32 link_arb_lower; | ||
39 | u32 filler4; | ||
40 | u32 link_status_clr; | ||
41 | u32 filler5; | ||
42 | u32 link_reset; | ||
43 | u32 filler6; | ||
44 | u32 link_aux_status; | ||
45 | u32 filler7; | ||
46 | } xb_linkregs_t; | ||
47 | |||
48 | typedef volatile struct xbow_s { | ||
49 | /* standard widget configuration 0x000000-0x000057 */ | ||
50 | struct widget_cfg xb_widget; /* 0x000000 */ | ||
51 | |||
52 | /* helper fieldnames for accessing bridge widget */ | ||
53 | |||
54 | #define xb_wid_id xb_widget.w_id | ||
55 | #define xb_wid_stat xb_widget.w_status | ||
56 | #define xb_wid_err_upper xb_widget.w_err_upper_addr | ||
57 | #define xb_wid_err_lower xb_widget.w_err_lower_addr | ||
58 | #define xb_wid_control xb_widget.w_control | ||
59 | #define xb_wid_req_timeout xb_widget.w_req_timeout | ||
60 | #define xb_wid_int_upper xb_widget.w_intdest_upper_addr | ||
61 | #define xb_wid_int_lower xb_widget.w_intdest_lower_addr | ||
62 | #define xb_wid_err_cmdword xb_widget.w_err_cmd_word | ||
63 | #define xb_wid_llp xb_widget.w_llp_cfg | ||
64 | #define xb_wid_stat_clr xb_widget.w_tflush | ||
65 | |||
66 | /* | ||
67 | * we access these through synergy unswizzled space, so the address | ||
68 | * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.) | ||
69 | * That's why we put the register first and filler second. | ||
70 | */ | ||
71 | /* xbow-specific widget configuration 0x000058-0x0000FF */ | ||
72 | u32 xb_wid_arb_reload; /* 0x00005C */ | ||
73 | u32 _pad_000058; | ||
74 | u32 xb_perf_ctr_a; /* 0x000064 */ | ||
75 | u32 _pad_000060; | ||
76 | u32 xb_perf_ctr_b; /* 0x00006c */ | ||
77 | u32 _pad_000068; | ||
78 | u32 xb_nic; /* 0x000074 */ | ||
79 | u32 _pad_000070; | ||
80 | |||
81 | /* Xbridge only */ | ||
82 | u32 xb_w0_rst_fnc; /* 0x00007C */ | ||
83 | u32 _pad_000078; | ||
84 | u32 xb_l8_rst_fnc; /* 0x000084 */ | ||
85 | u32 _pad_000080; | ||
86 | u32 xb_l9_rst_fnc; /* 0x00008c */ | ||
87 | u32 _pad_000088; | ||
88 | u32 xb_la_rst_fnc; /* 0x000094 */ | ||
89 | u32 _pad_000090; | ||
90 | u32 xb_lb_rst_fnc; /* 0x00009c */ | ||
91 | u32 _pad_000098; | ||
92 | u32 xb_lc_rst_fnc; /* 0x0000a4 */ | ||
93 | u32 _pad_0000a0; | ||
94 | u32 xb_ld_rst_fnc; /* 0x0000ac */ | ||
95 | u32 _pad_0000a8; | ||
96 | u32 xb_le_rst_fnc; /* 0x0000b4 */ | ||
97 | u32 _pad_0000b0; | ||
98 | u32 xb_lf_rst_fnc; /* 0x0000bc */ | ||
99 | u32 _pad_0000b8; | ||
100 | u32 xb_lock; /* 0x0000c4 */ | ||
101 | u32 _pad_0000c0; | ||
102 | u32 xb_lock_clr; /* 0x0000cc */ | ||
103 | u32 _pad_0000c8; | ||
104 | /* end of Xbridge only */ | ||
105 | u32 _pad_0000d0[12]; | ||
106 | |||
107 | /* Link Specific Registers, port 8..15 0x000100-0x000300 */ | ||
108 | xb_linkregs_t xb_link_raw[MAX_XBOW_PORTS]; | ||
109 | } xbow_t; | ||
110 | |||
111 | #define xb_link(p) xb_link_raw[(p) & (MAX_XBOW_PORTS - 1)] | ||
112 | |||
113 | #define XB_FLAGS_EXISTS 0x1 /* device exists */ | ||
114 | #define XB_FLAGS_MASTER 0x2 | ||
115 | #define XB_FLAGS_SLAVE 0x0 | ||
116 | #define XB_FLAGS_GBR 0x4 | ||
117 | #define XB_FLAGS_16BIT 0x8 | ||
118 | #define XB_FLAGS_8BIT 0x0 | ||
119 | |||
120 | /* is widget port number valid? (based on version 7.0 of xbow spec) */ | ||
121 | #define XBOW_WIDGET_IS_VALID(wid) ((wid) >= XBOW_PORT_8 && (wid) <= XBOW_PORT_F) | ||
122 | |||
123 | /* whether to use upper or lower arbitration register, given source widget id */ | ||
124 | #define XBOW_ARB_IS_UPPER(wid) ((wid) >= XBOW_PORT_8 && (wid) <= XBOW_PORT_B) | ||
125 | #define XBOW_ARB_IS_LOWER(wid) ((wid) >= XBOW_PORT_C && (wid) <= XBOW_PORT_F) | ||
126 | |||
127 | /* offset of arbitration register, given source widget id */ | ||
128 | #define XBOW_ARB_OFF(wid) (XBOW_ARB_IS_UPPER(wid) ? 0x1c : 0x24) | ||
129 | |||
130 | #define XBOW_WID_ID WIDGET_ID | ||
131 | #define XBOW_WID_STAT WIDGET_STATUS | ||
132 | #define XBOW_WID_ERR_UPPER WIDGET_ERR_UPPER_ADDR | ||
133 | #define XBOW_WID_ERR_LOWER WIDGET_ERR_LOWER_ADDR | ||
134 | #define XBOW_WID_CONTROL WIDGET_CONTROL | ||
135 | #define XBOW_WID_REQ_TO WIDGET_REQ_TIMEOUT | ||
136 | #define XBOW_WID_INT_UPPER WIDGET_INTDEST_UPPER_ADDR | ||
137 | #define XBOW_WID_INT_LOWER WIDGET_INTDEST_LOWER_ADDR | ||
138 | #define XBOW_WID_ERR_CMDWORD WIDGET_ERR_CMD_WORD | ||
139 | #define XBOW_WID_LLP WIDGET_LLP_CFG | ||
140 | #define XBOW_WID_STAT_CLR WIDGET_TFLUSH | ||
141 | #define XBOW_WID_ARB_RELOAD 0x5c | ||
142 | #define XBOW_WID_PERF_CTR_A 0x64 | ||
143 | #define XBOW_WID_PERF_CTR_B 0x6c | ||
144 | #define XBOW_WID_NIC 0x74 | ||
145 | |||
146 | /* Xbridge only */ | ||
147 | #define XBOW_W0_RST_FNC 0x00007C | ||
148 | #define XBOW_L8_RST_FNC 0x000084 | ||
149 | #define XBOW_L9_RST_FNC 0x00008c | ||
150 | #define XBOW_LA_RST_FNC 0x000094 | ||
151 | #define XBOW_LB_RST_FNC 0x00009c | ||
152 | #define XBOW_LC_RST_FNC 0x0000a4 | ||
153 | #define XBOW_LD_RST_FNC 0x0000ac | ||
154 | #define XBOW_LE_RST_FNC 0x0000b4 | ||
155 | #define XBOW_LF_RST_FNC 0x0000bc | ||
156 | #define XBOW_RESET_FENCE(x) ((x) > 7 && (x) < 16) ? \ | ||
157 | (XBOW_W0_RST_FNC + ((x) - 7) * 8) : \ | ||
158 | ((x) == 0) ? XBOW_W0_RST_FNC : 0 | ||
159 | #define XBOW_LOCK 0x0000c4 | ||
160 | #define XBOW_LOCK_CLR 0x0000cc | ||
161 | /* End of Xbridge only */ | ||
162 | |||
163 | /* used only in ide, but defined here within the reserved portion */ | ||
164 | /* of the widget0 address space (before 0xf4) */ | ||
165 | #define XBOW_WID_UNDEF 0xe4 | ||
166 | |||
167 | /* xbow link register set base, legal value for x is 0x8..0xf */ | ||
168 | #define XB_LINK_BASE 0x100 | ||
169 | #define XB_LINK_OFFSET 0x40 | ||
170 | #define XB_LINK_REG_BASE(x) (XB_LINK_BASE + ((x) & (MAX_XBOW_PORTS - 1)) * XB_LINK_OFFSET) | ||
171 | |||
172 | #define XB_LINK_IBUF_FLUSH(x) (XB_LINK_REG_BASE(x) + 0x4) | ||
173 | #define XB_LINK_CTRL(x) (XB_LINK_REG_BASE(x) + 0xc) | ||
174 | #define XB_LINK_STATUS(x) (XB_LINK_REG_BASE(x) + 0x14) | ||
175 | #define XB_LINK_ARB_UPPER(x) (XB_LINK_REG_BASE(x) + 0x1c) | ||
176 | #define XB_LINK_ARB_LOWER(x) (XB_LINK_REG_BASE(x) + 0x24) | ||
177 | #define XB_LINK_STATUS_CLR(x) (XB_LINK_REG_BASE(x) + 0x2c) | ||
178 | #define XB_LINK_RESET(x) (XB_LINK_REG_BASE(x) + 0x34) | ||
179 | #define XB_LINK_AUX_STATUS(x) (XB_LINK_REG_BASE(x) + 0x3c) | ||
180 | |||
181 | /* link_control(x) */ | ||
182 | #define XB_CTRL_LINKALIVE_IE 0x80000000 /* link comes alive */ | ||
183 | /* reserved: 0x40000000 */ | ||
184 | #define XB_CTRL_PERF_CTR_MODE_MSK 0x30000000 /* perf counter mode */ | ||
185 | #define XB_CTRL_IBUF_LEVEL_MSK 0x0e000000 /* input packet buffer | ||
186 | level */ | ||
187 | #define XB_CTRL_8BIT_MODE 0x01000000 /* force link into 8 | ||
188 | bit mode */ | ||
189 | #define XB_CTRL_BAD_LLP_PKT 0x00800000 /* force bad LLP | ||
190 | packet */ | ||
191 | #define XB_CTRL_WIDGET_CR_MSK 0x007c0000 /* LLP widget credit | ||
192 | mask */ | ||
193 | #define XB_CTRL_WIDGET_CR_SHFT 18 /* LLP widget credit | ||
194 | shift */ | ||
195 | #define XB_CTRL_ILLEGAL_DST_IE 0x00020000 /* illegal destination | ||
196 | */ | ||
197 | #define XB_CTRL_OALLOC_IBUF_IE 0x00010000 /* overallocated input | ||
198 | buffer */ | ||
199 | /* reserved: 0x0000fe00 */ | ||
200 | #define XB_CTRL_BNDWDTH_ALLOC_IE 0x00000100 /* bandwidth alloc */ | ||
201 | #define XB_CTRL_RCV_CNT_OFLOW_IE 0x00000080 /* rcv retry overflow */ | ||
202 | #define XB_CTRL_XMT_CNT_OFLOW_IE 0x00000040 /* xmt retry overflow */ | ||
203 | #define XB_CTRL_XMT_MAX_RTRY_IE 0x00000020 /* max transmit retry */ | ||
204 | #define XB_CTRL_RCV_IE 0x00000010 /* receive */ | ||
205 | #define XB_CTRL_XMT_RTRY_IE 0x00000008 /* transmit retry */ | ||
206 | /* reserved: 0x00000004 */ | ||
207 | #define XB_CTRL_MAXREQ_TOUT_IE 0x00000002 /* maximum request | ||
208 | timeout */ | ||
209 | #define XB_CTRL_SRC_TOUT_IE 0x00000001 /* source timeout */ | ||
210 | |||
211 | /* link_status(x) */ | ||
212 | #define XB_STAT_LINKALIVE XB_CTRL_LINKALIVE_IE | ||
213 | /* reserved: 0x7ff80000 */ | ||
214 | #define XB_STAT_MULTI_ERR 0x00040000 /* multi error */ | ||
215 | #define XB_STAT_ILLEGAL_DST_ERR XB_CTRL_ILLEGAL_DST_IE | ||
216 | #define XB_STAT_OALLOC_IBUF_ERR XB_CTRL_OALLOC_IBUF_IE | ||
217 | #define XB_STAT_BNDWDTH_ALLOC_ID_MSK 0x0000ff00 /* port bitmask */ | ||
218 | #define XB_STAT_RCV_CNT_OFLOW_ERR XB_CTRL_RCV_CNT_OFLOW_IE | ||
219 | #define XB_STAT_XMT_CNT_OFLOW_ERR XB_CTRL_XMT_CNT_OFLOW_IE | ||
220 | #define XB_STAT_XMT_MAX_RTRY_ERR XB_CTRL_XMT_MAX_RTRY_IE | ||
221 | #define XB_STAT_RCV_ERR XB_CTRL_RCV_IE | ||
222 | #define XB_STAT_XMT_RTRY_ERR XB_CTRL_XMT_RTRY_IE | ||
223 | /* reserved: 0x00000004 */ | ||
224 | #define XB_STAT_MAXREQ_TOUT_ERR XB_CTRL_MAXREQ_TOUT_IE | ||
225 | #define XB_STAT_SRC_TOUT_ERR XB_CTRL_SRC_TOUT_IE | ||
226 | |||
227 | /* link_aux_status(x) */ | ||
228 | #define XB_AUX_STAT_RCV_CNT 0xff000000 | ||
229 | #define XB_AUX_STAT_XMT_CNT 0x00ff0000 | ||
230 | #define XB_AUX_STAT_TOUT_DST 0x0000ff00 | ||
231 | #define XB_AUX_LINKFAIL_RST_BAD 0x00000040 | ||
232 | #define XB_AUX_STAT_PRESENT 0x00000020 | ||
233 | #define XB_AUX_STAT_PORT_WIDTH 0x00000010 | ||
234 | /* reserved: 0x0000000f */ | ||
235 | |||
236 | /* | ||
237 | * link_arb_upper/link_arb_lower(x), (reg) should be the link_arb_upper | ||
238 | * register if (x) is 0x8..0xb, link_arb_lower if (x) is 0xc..0xf | ||
239 | */ | ||
240 | #define XB_ARB_GBR_MSK 0x1f | ||
241 | #define XB_ARB_RR_MSK 0x7 | ||
242 | #define XB_ARB_GBR_SHFT(x) (((x) & 0x3) * 8) | ||
243 | #define XB_ARB_RR_SHFT(x) (((x) & 0x3) * 8 + 5) | ||
244 | #define XB_ARB_GBR_CNT(reg,x) ((reg) >> XB_ARB_GBR_SHFT(x) & XB_ARB_GBR_MSK) | ||
245 | #define XB_ARB_RR_CNT(reg,x) ((reg) >> XB_ARB_RR_SHFT(x) & XB_ARB_RR_MSK) | ||
246 | |||
247 | /* XBOW_WID_STAT */ | ||
248 | #define XB_WID_STAT_LINK_INTR_SHFT (24) | ||
249 | #define XB_WID_STAT_LINK_INTR_MASK (0xFF << XB_WID_STAT_LINK_INTR_SHFT) | ||
250 | #define XB_WID_STAT_LINK_INTR(x) \ | ||
251 | (0x1 << (((x)&7) + XB_WID_STAT_LINK_INTR_SHFT)) | ||
252 | #define XB_WID_STAT_WIDGET0_INTR 0x00800000 | ||
253 | #define XB_WID_STAT_SRCID_MASK 0x000003c0 /* Xbridge only */ | ||
254 | #define XB_WID_STAT_REG_ACC_ERR 0x00000020 | ||
255 | #define XB_WID_STAT_RECV_TOUT 0x00000010 /* Xbridge only */ | ||
256 | #define XB_WID_STAT_ARB_TOUT 0x00000008 /* Xbridge only */ | ||
257 | #define XB_WID_STAT_XTALK_ERR 0x00000004 | ||
258 | #define XB_WID_STAT_DST_TOUT 0x00000002 /* Xbridge only */ | ||
259 | #define XB_WID_STAT_MULTI_ERR 0x00000001 | ||
260 | |||
261 | #define XB_WID_STAT_SRCID_SHFT 6 | ||
262 | |||
263 | /* XBOW_WID_CONTROL */ | ||
264 | #define XB_WID_CTRL_REG_ACC_IE XB_WID_STAT_REG_ACC_ERR | ||
265 | #define XB_WID_CTRL_RECV_TOUT XB_WID_STAT_RECV_TOUT | ||
266 | #define XB_WID_CTRL_ARB_TOUT XB_WID_STAT_ARB_TOUT | ||
267 | #define XB_WID_CTRL_XTALK_IE XB_WID_STAT_XTALK_ERR | ||
268 | |||
269 | /* XBOW_WID_INT_UPPER */ | ||
270 | /* defined in xwidget.h for WIDGET_INTDEST_UPPER_ADDR */ | ||
271 | |||
272 | /* XBOW WIDGET part number, in the ID register */ | ||
273 | #define XBOW_WIDGET_PART_NUM 0x0 /* crossbow */ | ||
274 | #define XXBOW_WIDGET_PART_NUM 0xd000 /* Xbridge */ | ||
275 | #define XBOW_WIDGET_MFGR_NUM 0x0 | ||
276 | #define XXBOW_WIDGET_MFGR_NUM 0x0 | ||
277 | #define PXBOW_WIDGET_PART_NUM 0xd100 /* PIC */ | ||
278 | |||
279 | #define XBOW_REV_1_0 0x1 /* xbow rev 1.0 is "1" */ | ||
280 | #define XBOW_REV_1_1 0x2 /* xbow rev 1.1 is "2" */ | ||
281 | #define XBOW_REV_1_2 0x3 /* xbow rev 1.2 is "3" */ | ||
282 | #define XBOW_REV_1_3 0x4 /* xbow rev 1.3 is "4" */ | ||
283 | #define XBOW_REV_2_0 0x5 /* xbow rev 2.0 is "5" */ | ||
284 | |||
285 | #define XXBOW_PART_REV_1_0 (XXBOW_WIDGET_PART_NUM << 4 | 0x1 ) | ||
286 | #define XXBOW_PART_REV_2_0 (XXBOW_WIDGET_PART_NUM << 4 | 0x2 ) | ||
287 | |||
288 | /* XBOW_WID_ARB_RELOAD */ | ||
289 | #define XBOW_WID_ARB_RELOAD_INT 0x3f /* GBR reload interval */ | ||
290 | |||
291 | #define IS_XBRIDGE_XBOW(wid) \ | ||
292 | (XWIDGET_PART_NUM(wid) == XXBOW_WIDGET_PART_NUM && \ | ||
293 | XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM) | ||
294 | |||
295 | #define IS_PIC_XBOW(wid) \ | ||
296 | (XWIDGET_PART_NUM(wid) == PXBOW_WIDGET_PART_NUM && \ | ||
297 | XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM) | ||
298 | |||
299 | #define XBOW_WAR_ENABLED(pv, widid) ((1 << XWIDGET_REV_NUM(widid)) & pv) | ||
300 | |||
301 | #endif /* _ASM_IA64_SN_XTALK_XBOW_H */ | ||
diff --git a/arch/ia64/sn/include/xtalk/xwidgetdev.h b/arch/ia64/sn/include/xtalk/xwidgetdev.h deleted file mode 100644 index 2800eda0fd68..000000000000 --- a/arch/ia64/sn/include/xtalk/xwidgetdev.h +++ /dev/null | |||
@@ -1,70 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | */ | ||
8 | #ifndef _ASM_IA64_SN_XTALK_XWIDGET_H | ||
9 | #define _ASM_IA64_SN_XTALK_XWIDGET_H | ||
10 | |||
11 | /* WIDGET_ID */ | ||
12 | #define WIDGET_REV_NUM 0xf0000000 | ||
13 | #define WIDGET_PART_NUM 0x0ffff000 | ||
14 | #define WIDGET_MFG_NUM 0x00000ffe | ||
15 | #define WIDGET_REV_NUM_SHFT 28 | ||
16 | #define WIDGET_PART_NUM_SHFT 12 | ||
17 | #define WIDGET_MFG_NUM_SHFT 1 | ||
18 | |||
19 | #define XWIDGET_PART_NUM(widgetid) (((widgetid) & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT) | ||
20 | #define XWIDGET_REV_NUM(widgetid) (((widgetid) & WIDGET_REV_NUM) >> WIDGET_REV_NUM_SHFT) | ||
21 | #define XWIDGET_MFG_NUM(widgetid) (((widgetid) & WIDGET_MFG_NUM) >> WIDGET_MFG_NUM_SHFT) | ||
22 | #define XWIDGET_PART_REV_NUM(widgetid) ((XWIDGET_PART_NUM(widgetid) << 4) | \ | ||
23 | XWIDGET_REV_NUM(widgetid)) | ||
24 | #define XWIDGET_PART_REV_NUM_REV(partrev) (partrev & 0xf) | ||
25 | |||
26 | /* widget configuration registers */ | ||
27 | struct widget_cfg{ | ||
28 | u32 w_id; /* 0x04 */ | ||
29 | u32 w_pad_0; /* 0x00 */ | ||
30 | u32 w_status; /* 0x0c */ | ||
31 | u32 w_pad_1; /* 0x08 */ | ||
32 | u32 w_err_upper_addr; /* 0x14 */ | ||
33 | u32 w_pad_2; /* 0x10 */ | ||
34 | u32 w_err_lower_addr; /* 0x1c */ | ||
35 | u32 w_pad_3; /* 0x18 */ | ||
36 | u32 w_control; /* 0x24 */ | ||
37 | u32 w_pad_4; /* 0x20 */ | ||
38 | u32 w_req_timeout; /* 0x2c */ | ||
39 | u32 w_pad_5; /* 0x28 */ | ||
40 | u32 w_intdest_upper_addr; /* 0x34 */ | ||
41 | u32 w_pad_6; /* 0x30 */ | ||
42 | u32 w_intdest_lower_addr; /* 0x3c */ | ||
43 | u32 w_pad_7; /* 0x38 */ | ||
44 | u32 w_err_cmd_word; /* 0x44 */ | ||
45 | u32 w_pad_8; /* 0x40 */ | ||
46 | u32 w_llp_cfg; /* 0x4c */ | ||
47 | u32 w_pad_9; /* 0x48 */ | ||
48 | u32 w_tflush; /* 0x54 */ | ||
49 | u32 w_pad_10; /* 0x50 */ | ||
50 | }; | ||
51 | |||
52 | /* | ||
53 | * Crosstalk Widget Hardware Identification, as defined in the Crosstalk spec. | ||
54 | */ | ||
55 | struct xwidget_hwid{ | ||
56 | int mfg_num; | ||
57 | int rev_num; | ||
58 | int part_num; | ||
59 | }; | ||
60 | |||
61 | struct xwidget_info{ | ||
62 | |||
63 | struct xwidget_hwid xwi_hwid; /* Widget Identification */ | ||
64 | char xwi_masterxid; /* Hub's Widget Port Number */ | ||
65 | void *xwi_hubinfo; /* Hub's provider private info */ | ||
66 | u64 *xwi_hub_provider; /* prom provider functions */ | ||
67 | void *xwi_vertex; | ||
68 | }; | ||
69 | |||
70 | #endif /* _ASM_IA64_SN_XTALK_XWIDGET_H */ | ||
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile deleted file mode 100644 index 2f580603370d..000000000000 --- a/arch/ia64/sn/kernel/Makefile +++ /dev/null | |||
@@ -1,17 +0,0 @@ | |||
1 | # arch/ia64/sn/kernel/Makefile | ||
2 | # | ||
3 | # This file is subject to the terms and conditions of the GNU General Public | ||
4 | # License. See the file "COPYING" in the main directory of this archive | ||
5 | # for more details. | ||
6 | # | ||
7 | # Copyright (C) 1999,2001-2006,2008 Silicon Graphics, Inc. All Rights Reserved. | ||
8 | # | ||
9 | |||
10 | ccflags-y := -I $(srctree)/arch/ia64/sn/include | ||
11 | |||
12 | obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ | ||
13 | huberror.o io_acpi_init.o io_common.o \ | ||
14 | io_init.o iomv.o klconflib.o pio_phys.o \ | ||
15 | sn2/ | ||
16 | obj-$(CONFIG_IA64_GENERIC) += machvec.o | ||
17 | obj-$(CONFIG_PCI_MSI) += msi_sn.o | ||
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c deleted file mode 100644 index 9900e6d4add6..000000000000 --- a/arch/ia64/sn/kernel/bte.c +++ /dev/null | |||
@@ -1,475 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | */ | ||
8 | |||
9 | #include <linux/module.h> | ||
10 | #include <asm/sn/nodepda.h> | ||
11 | #include <asm/sn/addrs.h> | ||
12 | #include <asm/sn/arch.h> | ||
13 | #include <asm/sn/sn_cpuid.h> | ||
14 | #include <asm/sn/pda.h> | ||
15 | #include <asm/sn/shubio.h> | ||
16 | #include <asm/nodedata.h> | ||
17 | #include <asm/delay.h> | ||
18 | |||
19 | #include <linux/memblock.h> | ||
20 | #include <linux/string.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/slab.h> | ||
23 | |||
24 | #include <asm/sn/bte.h> | ||
25 | |||
26 | #ifndef L1_CACHE_MASK | ||
27 | #define L1_CACHE_MASK (L1_CACHE_BYTES - 1) | ||
28 | #endif | ||
29 | |||
30 | /* two interfaces on two btes */ | ||
31 | #define MAX_INTERFACES_TO_TRY 4 | ||
32 | #define MAX_NODES_TO_TRY 2 | ||
33 | |||
34 | static struct bteinfo_s *bte_if_on_node(nasid_t nasid, int interface) | ||
35 | { | ||
36 | nodepda_t *tmp_nodepda; | ||
37 | |||
38 | if (nasid_to_cnodeid(nasid) == -1) | ||
39 | return (struct bteinfo_s *)NULL; | ||
40 | |||
41 | tmp_nodepda = NODEPDA(nasid_to_cnodeid(nasid)); | ||
42 | return &tmp_nodepda->bte_if[interface]; | ||
43 | |||
44 | } | ||
45 | |||
46 | static inline void bte_start_transfer(struct bteinfo_s *bte, u64 len, u64 mode) | ||
47 | { | ||
48 | if (is_shub2()) { | ||
49 | BTE_CTRL_STORE(bte, (IBLS_BUSY | ((len) | (mode) << 24))); | ||
50 | } else { | ||
51 | BTE_LNSTAT_STORE(bte, len); | ||
52 | BTE_CTRL_STORE(bte, mode); | ||
53 | } | ||
54 | } | ||
55 | |||
56 | /************************************************************************ | ||
57 | * Block Transfer Engine copy related functions. | ||
58 | * | ||
59 | ***********************************************************************/ | ||
60 | |||
61 | /* | ||
62 | * bte_copy(src, dest, len, mode, notification) | ||
63 | * | ||
64 | * Use the block transfer engine to move kernel memory from src to dest | ||
65 | * using the assigned mode. | ||
66 | * | ||
67 | * Parameters: | ||
68 | * src - physical address of the transfer source. | ||
69 | * dest - physical address of the transfer destination. | ||
70 | * len - number of bytes to transfer from source to dest. | ||
71 | * mode - hardware defined. See reference information | ||
72 | * for IBCT0/1 in the SHUB Programmers Reference | ||
73 | * notification - kernel virtual address of the notification cache | ||
74 | * line. If NULL, the default is used and | ||
75 | * the bte_copy is synchronous. | ||
76 | * | ||
77 | * NOTE: This function requires src, dest, and len to | ||
78 | * be cacheline aligned. | ||
79 | */ | ||
80 | bte_result_t bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification) | ||
81 | { | ||
82 | u64 transfer_size; | ||
83 | u64 transfer_stat; | ||
84 | u64 notif_phys_addr; | ||
85 | struct bteinfo_s *bte; | ||
86 | bte_result_t bte_status; | ||
87 | unsigned long irq_flags; | ||
88 | unsigned long itc_end = 0; | ||
89 | int nasid_to_try[MAX_NODES_TO_TRY]; | ||
90 | int my_nasid = cpuid_to_nasid(raw_smp_processor_id()); | ||
91 | int bte_if_index, nasid_index; | ||
92 | int bte_first, btes_per_node = BTES_PER_NODE; | ||
93 | |||
94 | BTE_PRINTK(("bte_copy(0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%p)\n", | ||
95 | src, dest, len, mode, notification)); | ||
96 | |||
97 | if (len == 0) { | ||
98 | return BTE_SUCCESS; | ||
99 | } | ||
100 | |||
101 | BUG_ON(len & L1_CACHE_MASK); | ||
102 | BUG_ON(src & L1_CACHE_MASK); | ||
103 | BUG_ON(dest & L1_CACHE_MASK); | ||
104 | BUG_ON(len > BTE_MAX_XFER); | ||
105 | |||
106 | /* | ||
107 | * Start with interface corresponding to cpu number | ||
108 | */ | ||
109 | bte_first = raw_smp_processor_id() % btes_per_node; | ||
110 | |||
111 | if (mode & BTE_USE_DEST) { | ||
112 | /* try remote then local */ | ||
113 | nasid_to_try[0] = NASID_GET(dest); | ||
114 | if (mode & BTE_USE_ANY) { | ||
115 | nasid_to_try[1] = my_nasid; | ||
116 | } else { | ||
117 | nasid_to_try[1] = 0; | ||
118 | } | ||
119 | } else { | ||
120 | /* try local then remote */ | ||
121 | nasid_to_try[0] = my_nasid; | ||
122 | if (mode & BTE_USE_ANY) { | ||
123 | nasid_to_try[1] = NASID_GET(dest); | ||
124 | } else { | ||
125 | nasid_to_try[1] = 0; | ||
126 | } | ||
127 | } | ||
128 | |||
129 | retry_bteop: | ||
130 | do { | ||
131 | local_irq_save(irq_flags); | ||
132 | |||
133 | bte_if_index = bte_first; | ||
134 | nasid_index = 0; | ||
135 | |||
136 | /* Attempt to lock one of the BTE interfaces. */ | ||
137 | while (nasid_index < MAX_NODES_TO_TRY) { | ||
138 | bte = bte_if_on_node(nasid_to_try[nasid_index],bte_if_index); | ||
139 | |||
140 | if (bte == NULL) { | ||
141 | nasid_index++; | ||
142 | continue; | ||
143 | } | ||
144 | |||
145 | if (spin_trylock(&bte->spinlock)) { | ||
146 | if (!(*bte->most_rcnt_na & BTE_WORD_AVAILABLE) || | ||
147 | (BTE_LNSTAT_LOAD(bte) & BTE_ACTIVE)) { | ||
148 | /* Got the lock but BTE still busy */ | ||
149 | spin_unlock(&bte->spinlock); | ||
150 | } else { | ||
151 | /* we got the lock and it's not busy */ | ||
152 | break; | ||
153 | } | ||
154 | } | ||
155 | |||
156 | bte_if_index = (bte_if_index + 1) % btes_per_node; /* Next interface */ | ||
157 | if (bte_if_index == bte_first) { | ||
158 | /* | ||
159 | * We've tried all interfaces on this node | ||
160 | */ | ||
161 | nasid_index++; | ||
162 | } | ||
163 | |||
164 | bte = NULL; | ||
165 | } | ||
166 | |||
167 | if (bte != NULL) { | ||
168 | break; | ||
169 | } | ||
170 | |||
171 | local_irq_restore(irq_flags); | ||
172 | |||
173 | if (!(mode & BTE_WACQUIRE)) { | ||
174 | return BTEFAIL_NOTAVAIL; | ||
175 | } | ||
176 | } while (1); | ||
177 | |||
178 | if (notification == NULL) { | ||
179 | /* User does not want to be notified. */ | ||
180 | bte->most_rcnt_na = &bte->notify; | ||
181 | } else { | ||
182 | bte->most_rcnt_na = notification; | ||
183 | } | ||
184 | |||
185 | /* Calculate the number of cache lines to transfer. */ | ||
186 | transfer_size = ((len >> L1_CACHE_SHIFT) & BTE_LEN_MASK); | ||
187 | |||
188 | /* Initialize the notification to a known value. */ | ||
189 | *bte->most_rcnt_na = BTE_WORD_BUSY; | ||
190 | notif_phys_addr = (u64)bte->most_rcnt_na; | ||
191 | |||
192 | /* Set the source and destination registers */ | ||
193 | BTE_PRINTKV(("IBSA = 0x%lx)\n", src)); | ||
194 | BTE_SRC_STORE(bte, src); | ||
195 | BTE_PRINTKV(("IBDA = 0x%lx)\n", dest)); | ||
196 | BTE_DEST_STORE(bte, dest); | ||
197 | |||
198 | /* Set the notification register */ | ||
199 | BTE_PRINTKV(("IBNA = 0x%lx)\n", notif_phys_addr)); | ||
200 | BTE_NOTIF_STORE(bte, notif_phys_addr); | ||
201 | |||
202 | /* Initiate the transfer */ | ||
203 | BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode))); | ||
204 | bte_start_transfer(bte, transfer_size, BTE_VALID_MODE(mode)); | ||
205 | |||
206 | itc_end = ia64_get_itc() + (40000000 * local_cpu_data->cyc_per_usec); | ||
207 | |||
208 | spin_unlock_irqrestore(&bte->spinlock, irq_flags); | ||
209 | |||
210 | if (notification != NULL) { | ||
211 | return BTE_SUCCESS; | ||
212 | } | ||
213 | |||
214 | while ((transfer_stat = *bte->most_rcnt_na) == BTE_WORD_BUSY) { | ||
215 | cpu_relax(); | ||
216 | if (ia64_get_itc() > itc_end) { | ||
217 | BTE_PRINTK(("BTE timeout nasid 0x%x bte%d IBLS = 0x%lx na 0x%lx\n", | ||
218 | NASID_GET(bte->bte_base_addr), bte->bte_num, | ||
219 | BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na) ); | ||
220 | bte->bte_error_count++; | ||
221 | bte->bh_error = IBLS_ERROR; | ||
222 | bte_error_handler(NODEPDA(bte->bte_cnode)); | ||
223 | *bte->most_rcnt_na = BTE_WORD_AVAILABLE; | ||
224 | goto retry_bteop; | ||
225 | } | ||
226 | } | ||
227 | |||
228 | BTE_PRINTKV((" Delay Done. IBLS = 0x%lx, most_rcnt_na = 0x%lx\n", | ||
229 | BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na)); | ||
230 | |||
231 | if (transfer_stat & IBLS_ERROR) { | ||
232 | bte_status = BTE_GET_ERROR_STATUS(transfer_stat); | ||
233 | } else { | ||
234 | bte_status = BTE_SUCCESS; | ||
235 | } | ||
236 | *bte->most_rcnt_na = BTE_WORD_AVAILABLE; | ||
237 | |||
238 | BTE_PRINTK(("Returning status is 0x%lx and most_rcnt_na is 0x%lx\n", | ||
239 | BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na)); | ||
240 | |||
241 | return bte_status; | ||
242 | } | ||
243 | |||
244 | EXPORT_SYMBOL(bte_copy); | ||
245 | |||
246 | /* | ||
247 | * bte_unaligned_copy(src, dest, len, mode) | ||
248 | * | ||
249 | * use the block transfer engine to move kernel | ||
250 | * memory from src to dest using the assigned mode. | ||
251 | * | ||
252 | * Parameters: | ||
253 | * src - physical address of the transfer source. | ||
254 | * dest - physical address of the transfer destination. | ||
255 | * len - number of bytes to transfer from source to dest. | ||
256 | * mode - hardware defined. See reference information | ||
257 | * for IBCT0/1 in the SGI documentation. | ||
258 | * | ||
259 | * NOTE: If the source, dest, and len are all cache line aligned, | ||
260 | * then it would be _FAR_ preferable to use bte_copy instead. | ||
261 | */ | ||
262 | bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode) | ||
263 | { | ||
264 | int destFirstCacheOffset; | ||
265 | u64 headBteSource; | ||
266 | u64 headBteLen; | ||
267 | u64 headBcopySrcOffset; | ||
268 | u64 headBcopyDest; | ||
269 | u64 headBcopyLen; | ||
270 | u64 footBteSource; | ||
271 | u64 footBteLen; | ||
272 | u64 footBcopyDest; | ||
273 | u64 footBcopyLen; | ||
274 | bte_result_t rv; | ||
275 | char *bteBlock, *bteBlock_unaligned; | ||
276 | |||
277 | if (len == 0) { | ||
278 | return BTE_SUCCESS; | ||
279 | } | ||
280 | |||
281 | /* temporary buffer used during unaligned transfers */ | ||
282 | bteBlock_unaligned = kmalloc(len + 3 * L1_CACHE_BYTES, GFP_KERNEL); | ||
283 | if (bteBlock_unaligned == NULL) { | ||
284 | return BTEFAIL_NOTAVAIL; | ||
285 | } | ||
286 | bteBlock = (char *)L1_CACHE_ALIGN((u64) bteBlock_unaligned); | ||
287 | |||
288 | headBcopySrcOffset = src & L1_CACHE_MASK; | ||
289 | destFirstCacheOffset = dest & L1_CACHE_MASK; | ||
290 | |||
291 | /* | ||
292 | * At this point, the transfer is broken into | ||
293 | * (up to) three sections. The first section is | ||
294 | * from the start address to the first physical | ||
295 | * cache line, the second is from the first physical | ||
296 | * cache line to the last complete cache line, | ||
297 | * and the third is from the last cache line to the | ||
298 | * end of the buffer. The first and third sections | ||
299 | * are handled by bte copying into a temporary buffer | ||
300 | * and then bcopy'ing the necessary section into the | ||
301 | * final location. The middle section is handled with | ||
302 | * a standard bte copy. | ||
303 | * | ||
304 | * One nasty exception to the above rule is when the | ||
305 | * source and destination are not symmetrically | ||
306 | * mis-aligned. If the source offset from the first | ||
307 | * cache line is different from the destination offset, | ||
308 | * we make the first section be the entire transfer | ||
309 | * and the bcopy the entire block into place. | ||
310 | */ | ||
311 | if (headBcopySrcOffset == destFirstCacheOffset) { | ||
312 | |||
313 | /* | ||
314 | * Both the source and destination are the same | ||
315 | * distance from a cache line boundary so we can | ||
316 | * use the bte to transfer the bulk of the | ||
317 | * data. | ||
318 | */ | ||
319 | headBteSource = src & ~L1_CACHE_MASK; | ||
320 | headBcopyDest = dest; | ||
321 | if (headBcopySrcOffset) { | ||
322 | headBcopyLen = | ||
323 | (len > | ||
324 | (L1_CACHE_BYTES - | ||
325 | headBcopySrcOffset) ? L1_CACHE_BYTES | ||
326 | - headBcopySrcOffset : len); | ||
327 | headBteLen = L1_CACHE_BYTES; | ||
328 | } else { | ||
329 | headBcopyLen = 0; | ||
330 | headBteLen = 0; | ||
331 | } | ||
332 | |||
333 | if (len > headBcopyLen) { | ||
334 | footBcopyLen = (len - headBcopyLen) & L1_CACHE_MASK; | ||
335 | footBteLen = L1_CACHE_BYTES; | ||
336 | |||
337 | footBteSource = src + len - footBcopyLen; | ||
338 | footBcopyDest = dest + len - footBcopyLen; | ||
339 | |||
340 | if (footBcopyDest == (headBcopyDest + headBcopyLen)) { | ||
341 | /* | ||
342 | * We have two contiguous bcopy | ||
343 | * blocks. Merge them. | ||
344 | */ | ||
345 | headBcopyLen += footBcopyLen; | ||
346 | headBteLen += footBteLen; | ||
347 | } else if (footBcopyLen > 0) { | ||
348 | rv = bte_copy(footBteSource, | ||
349 | ia64_tpa((unsigned long)bteBlock), | ||
350 | footBteLen, mode, NULL); | ||
351 | if (rv != BTE_SUCCESS) { | ||
352 | kfree(bteBlock_unaligned); | ||
353 | return rv; | ||
354 | } | ||
355 | |||
356 | memcpy(__va(footBcopyDest), | ||
357 | (char *)bteBlock, footBcopyLen); | ||
358 | } | ||
359 | } else { | ||
360 | footBcopyLen = 0; | ||
361 | footBteLen = 0; | ||
362 | } | ||
363 | |||
364 | if (len > (headBcopyLen + footBcopyLen)) { | ||
365 | /* now transfer the middle. */ | ||
366 | rv = bte_copy((src + headBcopyLen), | ||
367 | (dest + | ||
368 | headBcopyLen), | ||
369 | (len - headBcopyLen - | ||
370 | footBcopyLen), mode, NULL); | ||
371 | if (rv != BTE_SUCCESS) { | ||
372 | kfree(bteBlock_unaligned); | ||
373 | return rv; | ||
374 | } | ||
375 | |||
376 | } | ||
377 | } else { | ||
378 | |||
379 | /* | ||
380 | * The transfer is not symmetric, we will | ||
381 | * allocate a buffer large enough for all the | ||
382 | * data, bte_copy into that buffer and then | ||
383 | * bcopy to the destination. | ||
384 | */ | ||
385 | |||
386 | headBcopySrcOffset = src & L1_CACHE_MASK; | ||
387 | headBcopyDest = dest; | ||
388 | headBcopyLen = len; | ||
389 | |||
390 | headBteSource = src - headBcopySrcOffset; | ||
391 | /* Add the leading and trailing bytes from source */ | ||
392 | headBteLen = L1_CACHE_ALIGN(len + headBcopySrcOffset); | ||
393 | } | ||
394 | |||
395 | if (headBcopyLen > 0) { | ||
396 | rv = bte_copy(headBteSource, | ||
397 | ia64_tpa((unsigned long)bteBlock), headBteLen, | ||
398 | mode, NULL); | ||
399 | if (rv != BTE_SUCCESS) { | ||
400 | kfree(bteBlock_unaligned); | ||
401 | return rv; | ||
402 | } | ||
403 | |||
404 | memcpy(__va(headBcopyDest), ((char *)bteBlock + | ||
405 | headBcopySrcOffset), headBcopyLen); | ||
406 | } | ||
407 | kfree(bteBlock_unaligned); | ||
408 | return BTE_SUCCESS; | ||
409 | } | ||
410 | |||
411 | EXPORT_SYMBOL(bte_unaligned_copy); | ||
412 | |||
413 | /************************************************************************ | ||
414 | * Block Transfer Engine initialization functions. | ||
415 | * | ||
416 | ***********************************************************************/ | ||
417 | static void bte_recovery_timeout(struct timer_list *t) | ||
418 | { | ||
419 | struct nodepda_s *nodepda = from_timer(nodepda, t, bte_recovery_timer); | ||
420 | |||
421 | bte_error_handler(nodepda); | ||
422 | } | ||
423 | |||
424 | /* | ||
425 | * bte_init_node(nodepda, cnode) | ||
426 | * | ||
427 | * Initialize the nodepda structure with BTE base addresses and | ||
428 | * spinlocks. | ||
429 | */ | ||
430 | void bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode) | ||
431 | { | ||
432 | int i; | ||
433 | |||
434 | /* | ||
435 | * Indicate that all the block transfer engines on this node | ||
436 | * are available. | ||
437 | */ | ||
438 | |||
439 | /* | ||
440 | * Allocate one bte_recover_t structure per node. It holds | ||
441 | * the recovery lock for node. All the bte interface structures | ||
442 | * will point at this one bte_recover structure to get the lock. | ||
443 | */ | ||
444 | spin_lock_init(&mynodepda->bte_recovery_lock); | ||
445 | timer_setup(&mynodepda->bte_recovery_timer, bte_recovery_timeout, 0); | ||
446 | |||
447 | for (i = 0; i < BTES_PER_NODE; i++) { | ||
448 | u64 *base_addr; | ||
449 | |||
450 | /* Which link status register should we use? */ | ||
451 | base_addr = (u64 *) | ||
452 | REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode), BTE_BASE_ADDR(i)); | ||
453 | mynodepda->bte_if[i].bte_base_addr = base_addr; | ||
454 | mynodepda->bte_if[i].bte_source_addr = BTE_SOURCE_ADDR(base_addr); | ||
455 | mynodepda->bte_if[i].bte_destination_addr = BTE_DEST_ADDR(base_addr); | ||
456 | mynodepda->bte_if[i].bte_control_addr = BTE_CTRL_ADDR(base_addr); | ||
457 | mynodepda->bte_if[i].bte_notify_addr = BTE_NOTIF_ADDR(base_addr); | ||
458 | |||
459 | /* | ||
460 | * Initialize the notification and spinlock | ||
461 | * so the first transfer can occur. | ||
462 | */ | ||
463 | mynodepda->bte_if[i].most_rcnt_na = | ||
464 | &(mynodepda->bte_if[i].notify); | ||
465 | mynodepda->bte_if[i].notify = BTE_WORD_AVAILABLE; | ||
466 | spin_lock_init(&mynodepda->bte_if[i].spinlock); | ||
467 | |||
468 | mynodepda->bte_if[i].bte_cnode = cnode; | ||
469 | mynodepda->bte_if[i].bte_error_count = 0; | ||
470 | mynodepda->bte_if[i].bte_num = i; | ||
471 | mynodepda->bte_if[i].cleanup_active = 0; | ||
472 | mynodepda->bte_if[i].bh_error = 0; | ||
473 | } | ||
474 | |||
475 | } | ||
diff --git a/arch/ia64/sn/kernel/bte_error.c b/arch/ia64/sn/kernel/bte_error.c deleted file mode 100644 index d92786c09b34..000000000000 --- a/arch/ia64/sn/kernel/bte_error.c +++ /dev/null | |||
@@ -1,255 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | */ | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <asm/sn/sn_sal.h> | ||
11 | #include "ioerror.h" | ||
12 | #include <asm/sn/addrs.h> | ||
13 | #include <asm/sn/shubio.h> | ||
14 | #include <asm/sn/geo.h> | ||
15 | #include "xtalk/xwidgetdev.h" | ||
16 | #include "xtalk/hubdev.h" | ||
17 | #include <asm/sn/bte.h> | ||
18 | #include <asm/param.h> | ||
19 | |||
20 | /* | ||
21 | * Bte error handling is done in two parts. The first captures | ||
22 | * any crb related errors. Since there can be multiple crbs per | ||
23 | * interface and multiple interfaces active, we need to wait until | ||
24 | * all active crbs are completed. This is the first job of the | ||
25 | * second part error handler. When all bte related CRBs are cleanly | ||
26 | * completed, it resets the interfaces and gets them ready for new | ||
27 | * transfers to be queued. | ||
28 | */ | ||
29 | |||
30 | /* | ||
31 | * Wait until all BTE related CRBs are completed | ||
32 | * and then reset the interfaces. | ||
33 | */ | ||
34 | static int shub1_bte_error_handler(struct nodepda_s *err_nodepda) | ||
35 | { | ||
36 | struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer; | ||
37 | nasid_t nasid; | ||
38 | int i; | ||
39 | int valid_crbs; | ||
40 | ii_imem_u_t imem; /* II IMEM Register */ | ||
41 | ii_icrb0_d_u_t icrbd; /* II CRB Register D */ | ||
42 | ii_ibcr_u_t ibcr; | ||
43 | ii_icmr_u_t icmr; | ||
44 | ii_ieclr_u_t ieclr; | ||
45 | |||
46 | BTE_PRINTK(("shub1_bte_error_handler(%p) - %d\n", err_nodepda, | ||
47 | smp_processor_id())); | ||
48 | |||
49 | if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) && | ||
50 | (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) { | ||
51 | BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda, | ||
52 | smp_processor_id())); | ||
53 | return 1; | ||
54 | } | ||
55 | |||
56 | /* Determine information about our hub */ | ||
57 | nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode); | ||
58 | |||
59 | /* | ||
60 | * A BTE transfer can use multiple CRBs. We need to make sure | ||
61 | * that all the BTE CRBs are complete (or timed out) before | ||
62 | * attempting to clean up the error. Resetting the BTE while | ||
63 | * there are still BTE CRBs active will hang the BTE. | ||
64 | * We should look at all the CRBs to see if they are allocated | ||
65 | * to the BTE and see if they are still active. When none | ||
66 | * are active, we can continue with the cleanup. | ||
67 | * | ||
68 | * We also want to make sure that the local NI port is up. | ||
69 | * When a router resets the NI port can go down, while it | ||
70 | * goes through the LLP handshake, but then comes back up. | ||
71 | */ | ||
72 | icmr.ii_icmr_regval = REMOTE_HUB_L(nasid, IIO_ICMR); | ||
73 | if (icmr.ii_icmr_fld_s.i_crb_mark != 0) { | ||
74 | /* | ||
75 | * There are errors which still need to be cleaned up by | ||
76 | * hubiio_crb_error_handler | ||
77 | */ | ||
78 | mod_timer(recovery_timer, jiffies + (HZ * 5)); | ||
79 | BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda, | ||
80 | smp_processor_id())); | ||
81 | return 1; | ||
82 | } | ||
83 | if (icmr.ii_icmr_fld_s.i_crb_vld != 0) { | ||
84 | |||
85 | valid_crbs = icmr.ii_icmr_fld_s.i_crb_vld; | ||
86 | |||
87 | for (i = 0; i < IIO_NUM_CRBS; i++) { | ||
88 | if (!((1 << i) & valid_crbs)) { | ||
89 | /* This crb was not marked as valid, ignore */ | ||
90 | continue; | ||
91 | } | ||
92 | icrbd.ii_icrb0_d_regval = | ||
93 | REMOTE_HUB_L(nasid, IIO_ICRB_D(i)); | ||
94 | if (icrbd.d_bteop) { | ||
95 | mod_timer(recovery_timer, jiffies + (HZ * 5)); | ||
96 | BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n", | ||
97 | err_nodepda, smp_processor_id(), | ||
98 | i)); | ||
99 | return 1; | ||
100 | } | ||
101 | } | ||
102 | } | ||
103 | |||
104 | BTE_PRINTK(("eh:%p:%d Cleaning up\n", err_nodepda, smp_processor_id())); | ||
105 | /* Re-enable both bte interfaces */ | ||
106 | imem.ii_imem_regval = REMOTE_HUB_L(nasid, IIO_IMEM); | ||
107 | imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1; | ||
108 | REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval); | ||
109 | |||
110 | /* Clear BTE0/1 error bits */ | ||
111 | ieclr.ii_ieclr_regval = 0; | ||
112 | if (err_nodepda->bte_if[0].bh_error != BTE_SUCCESS) | ||
113 | ieclr.ii_ieclr_fld_s.i_e_bte_0 = 1; | ||
114 | if (err_nodepda->bte_if[1].bh_error != BTE_SUCCESS) | ||
115 | ieclr.ii_ieclr_fld_s.i_e_bte_1 = 1; | ||
116 | REMOTE_HUB_S(nasid, IIO_IECLR, ieclr.ii_ieclr_regval); | ||
117 | |||
118 | /* Reinitialize both BTE state machines. */ | ||
119 | ibcr.ii_ibcr_regval = REMOTE_HUB_L(nasid, IIO_IBCR); | ||
120 | ibcr.ii_ibcr_fld_s.i_soft_reset = 1; | ||
121 | REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval); | ||
122 | |||
123 | del_timer(recovery_timer); | ||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * Wait until all BTE related CRBs are completed | ||
129 | * and then reset the interfaces. | ||
130 | */ | ||
131 | static int shub2_bte_error_handler(struct nodepda_s *err_nodepda) | ||
132 | { | ||
133 | struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer; | ||
134 | struct bteinfo_s *bte; | ||
135 | nasid_t nasid; | ||
136 | u64 status; | ||
137 | int i; | ||
138 | |||
139 | nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode); | ||
140 | |||
141 | /* | ||
142 | * Verify that all the BTEs are complete | ||
143 | */ | ||
144 | for (i = 0; i < BTES_PER_NODE; i++) { | ||
145 | bte = &err_nodepda->bte_if[i]; | ||
146 | status = BTE_LNSTAT_LOAD(bte); | ||
147 | if (status & IBLS_ERROR) { | ||
148 | bte->bh_error = BTE_SHUB2_ERROR(status); | ||
149 | continue; | ||
150 | } | ||
151 | if (!(status & IBLS_BUSY)) | ||
152 | continue; | ||
153 | mod_timer(recovery_timer, jiffies + (HZ * 5)); | ||
154 | BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda, | ||
155 | smp_processor_id())); | ||
156 | return 1; | ||
157 | } | ||
158 | if (ia64_sn_bte_recovery(nasid)) | ||
159 | panic("bte_error_handler(): Fatal BTE Error"); | ||
160 | |||
161 | del_timer(recovery_timer); | ||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * Wait until all BTE related CRBs are completed | ||
167 | * and then reset the interfaces. | ||
168 | */ | ||
169 | void bte_error_handler(struct nodepda_s *err_nodepda) | ||
170 | { | ||
171 | spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock; | ||
172 | int i; | ||
173 | unsigned long irq_flags; | ||
174 | volatile u64 *notify; | ||
175 | bte_result_t bh_error; | ||
176 | |||
177 | BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda, | ||
178 | smp_processor_id())); | ||
179 | |||
180 | spin_lock_irqsave(recovery_lock, irq_flags); | ||
181 | |||
182 | /* | ||
183 | * Lock all interfaces on this node to prevent new transfers | ||
184 | * from being queued. | ||
185 | */ | ||
186 | for (i = 0; i < BTES_PER_NODE; i++) { | ||
187 | if (err_nodepda->bte_if[i].cleanup_active) { | ||
188 | continue; | ||
189 | } | ||
190 | spin_lock(&err_nodepda->bte_if[i].spinlock); | ||
191 | BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda, | ||
192 | smp_processor_id(), i)); | ||
193 | err_nodepda->bte_if[i].cleanup_active = 1; | ||
194 | } | ||
195 | |||
196 | if (is_shub1()) { | ||
197 | if (shub1_bte_error_handler(err_nodepda)) { | ||
198 | spin_unlock_irqrestore(recovery_lock, irq_flags); | ||
199 | return; | ||
200 | } | ||
201 | } else { | ||
202 | if (shub2_bte_error_handler(err_nodepda)) { | ||
203 | spin_unlock_irqrestore(recovery_lock, irq_flags); | ||
204 | return; | ||
205 | } | ||
206 | } | ||
207 | |||
208 | for (i = 0; i < BTES_PER_NODE; i++) { | ||
209 | bh_error = err_nodepda->bte_if[i].bh_error; | ||
210 | if (bh_error != BTE_SUCCESS) { | ||
211 | /* There is an error which needs to be notified */ | ||
212 | notify = err_nodepda->bte_if[i].most_rcnt_na; | ||
213 | BTE_PRINTK(("cnode %d bte %d error=0x%lx\n", | ||
214 | err_nodepda->bte_if[i].bte_cnode, | ||
215 | err_nodepda->bte_if[i].bte_num, | ||
216 | IBLS_ERROR | (u64) bh_error)); | ||
217 | *notify = IBLS_ERROR | bh_error; | ||
218 | err_nodepda->bte_if[i].bh_error = BTE_SUCCESS; | ||
219 | } | ||
220 | |||
221 | err_nodepda->bte_if[i].cleanup_active = 0; | ||
222 | BTE_PRINTK(("eh:%p:%d Unlocked %d\n", err_nodepda, | ||
223 | smp_processor_id(), i)); | ||
224 | spin_unlock(&err_nodepda->bte_if[i].spinlock); | ||
225 | } | ||
226 | |||
227 | spin_unlock_irqrestore(recovery_lock, irq_flags); | ||
228 | } | ||
229 | |||
230 | /* | ||
231 | * First part error handler. This is called whenever any error CRB interrupt | ||
232 | * is generated by the II. | ||
233 | */ | ||
234 | void | ||
235 | bte_crb_error_handler(cnodeid_t cnode, int btenum, | ||
236 | int crbnum, ioerror_t * ioe, int bteop) | ||
237 | { | ||
238 | struct bteinfo_s *bte; | ||
239 | |||
240 | |||
241 | bte = &(NODEPDA(cnode)->bte_if[btenum]); | ||
242 | |||
243 | /* | ||
244 | * The caller has already figured out the error type, we save that | ||
245 | * in the bte handle structure for the thread exercising the | ||
246 | * interface to consume. | ||
247 | */ | ||
248 | bte->bh_error = ioe->ie_errortype + BTEFAIL_OFFSET; | ||
249 | bte->bte_error_count++; | ||
250 | |||
251 | BTE_PRINTK(("Got an error on cnode %d bte %d: HW error type 0x%x\n", | ||
252 | bte->bte_cnode, bte->bte_num, ioe->ie_errortype)); | ||
253 | bte_error_handler(NODEPDA(cnode)); | ||
254 | } | ||
255 | |||
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c deleted file mode 100644 index 97fa56dddf50..000000000000 --- a/arch/ia64/sn/kernel/huberror.c +++ /dev/null | |||
@@ -1,220 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1992 - 1997, 2000,2002-2007 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <linux/interrupt.h> | ||
11 | #include <asm/delay.h> | ||
12 | #include <asm/sn/sn_sal.h> | ||
13 | #include "ioerror.h" | ||
14 | #include <asm/sn/addrs.h> | ||
15 | #include <asm/sn/shubio.h> | ||
16 | #include <asm/sn/geo.h> | ||
17 | #include "xtalk/xwidgetdev.h" | ||
18 | #include "xtalk/hubdev.h" | ||
19 | #include <asm/sn/bte.h> | ||
20 | |||
21 | void hubiio_crb_error_handler(struct hubdev_info *hubdev_info); | ||
22 | extern void bte_crb_error_handler(cnodeid_t, int, int, ioerror_t *, | ||
23 | int); | ||
24 | static irqreturn_t hub_eint_handler(int irq, void *arg) | ||
25 | { | ||
26 | struct hubdev_info *hubdev_info; | ||
27 | struct ia64_sal_retval ret_stuff; | ||
28 | nasid_t nasid; | ||
29 | |||
30 | ret_stuff.status = 0; | ||
31 | ret_stuff.v0 = 0; | ||
32 | hubdev_info = (struct hubdev_info *)arg; | ||
33 | nasid = hubdev_info->hdi_nasid; | ||
34 | |||
35 | if (is_shub1()) { | ||
36 | SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT, | ||
37 | (u64) nasid, 0, 0, 0, 0, 0, 0); | ||
38 | |||
39 | if ((int)ret_stuff.v0) | ||
40 | panic("%s: Fatal %s Error", __func__, | ||
41 | ((nasid & 1) ? "TIO" : "HUBII")); | ||
42 | |||
43 | if (!(nasid & 1)) /* Not a TIO, handle CRB errors */ | ||
44 | (void)hubiio_crb_error_handler(hubdev_info); | ||
45 | } else | ||
46 | if (nasid & 1) { /* TIO errors */ | ||
47 | SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT, | ||
48 | (u64) nasid, 0, 0, 0, 0, 0, 0); | ||
49 | |||
50 | if ((int)ret_stuff.v0) | ||
51 | panic("%s: Fatal TIO Error", __func__); | ||
52 | } else | ||
53 | bte_error_handler(NODEPDA(nasid_to_cnodeid(nasid))); | ||
54 | |||
55 | return IRQ_HANDLED; | ||
56 | } | ||
57 | |||
58 | /* | ||
59 | * Free the hub CRB "crbnum" which encountered an error. | ||
60 | * Assumption is, error handling was successfully done, | ||
61 | * and we now want to return the CRB back to Hub for normal usage. | ||
62 | * | ||
63 | * In order to free the CRB, all that's needed is to de-allocate it | ||
64 | * | ||
65 | * Assumption: | ||
66 | * No other processor is mucking around with the hub control register. | ||
67 | * So, upper layer has to single thread this. | ||
68 | */ | ||
69 | void hubiio_crb_free(struct hubdev_info *hubdev_info, int crbnum) | ||
70 | { | ||
71 | ii_icrb0_b_u_t icrbb; | ||
72 | |||
73 | /* | ||
74 | * The hardware does NOT clear the mark bit, so it must get cleared | ||
75 | * here to be sure the error is not processed twice. | ||
76 | */ | ||
77 | icrbb.ii_icrb0_b_regval = REMOTE_HUB_L(hubdev_info->hdi_nasid, | ||
78 | IIO_ICRB_B(crbnum)); | ||
79 | icrbb.b_mark = 0; | ||
80 | REMOTE_HUB_S(hubdev_info->hdi_nasid, IIO_ICRB_B(crbnum), | ||
81 | icrbb.ii_icrb0_b_regval); | ||
82 | /* | ||
83 | * Deallocate the register wait till hub indicates it's done. | ||
84 | */ | ||
85 | REMOTE_HUB_S(hubdev_info->hdi_nasid, IIO_ICDR, (IIO_ICDR_PND | crbnum)); | ||
86 | while (REMOTE_HUB_L(hubdev_info->hdi_nasid, IIO_ICDR) & IIO_ICDR_PND) | ||
87 | cpu_relax(); | ||
88 | |||
89 | } | ||
90 | |||
91 | /* | ||
92 | * hubiio_crb_error_handler | ||
93 | * | ||
94 | * This routine gets invoked when a hub gets an error | ||
95 | * interrupt. So, the routine is running in interrupt context | ||
96 | * at error interrupt level. | ||
97 | * Action: | ||
98 | * It's responsible for identifying ALL the CRBs that are marked | ||
99 | * with error, and process them. | ||
100 | * | ||
101 | * If you find the CRB that's marked with error, map this to the | ||
102 | * reason it caused error, and invoke appropriate error handler. | ||
103 | * | ||
104 | * XXX Be aware of the information in the context register. | ||
105 | * | ||
106 | * NOTE: | ||
107 | * Use REMOTE_HUB_* macro instead of LOCAL_HUB_* so that the interrupt | ||
108 | * handler can be run on any node. (not necessarily the node | ||
109 | * corresponding to the hub that encountered error). | ||
110 | */ | ||
111 | |||
112 | void hubiio_crb_error_handler(struct hubdev_info *hubdev_info) | ||
113 | { | ||
114 | nasid_t nasid; | ||
115 | ii_icrb0_a_u_t icrba; /* II CRB Register A */ | ||
116 | ii_icrb0_b_u_t icrbb; /* II CRB Register B */ | ||
117 | ii_icrb0_c_u_t icrbc; /* II CRB Register C */ | ||
118 | ii_icrb0_d_u_t icrbd; /* II CRB Register D */ | ||
119 | ii_icrb0_e_u_t icrbe; /* II CRB Register D */ | ||
120 | int i; | ||
121 | int num_errors = 0; /* Num of errors handled */ | ||
122 | ioerror_t ioerror; | ||
123 | |||
124 | nasid = hubdev_info->hdi_nasid; | ||
125 | |||
126 | /* | ||
127 | * XXX - Add locking for any recovery actions | ||
128 | */ | ||
129 | /* | ||
130 | * Scan through all CRBs in the Hub, and handle the errors | ||
131 | * in any of the CRBs marked. | ||
132 | */ | ||
133 | for (i = 0; i < IIO_NUM_CRBS; i++) { | ||
134 | /* Check this crb entry to see if it is in error. */ | ||
135 | icrbb.ii_icrb0_b_regval = REMOTE_HUB_L(nasid, IIO_ICRB_B(i)); | ||
136 | |||
137 | if (icrbb.b_mark == 0) { | ||
138 | continue; | ||
139 | } | ||
140 | |||
141 | icrba.ii_icrb0_a_regval = REMOTE_HUB_L(nasid, IIO_ICRB_A(i)); | ||
142 | |||
143 | IOERROR_INIT(&ioerror); | ||
144 | |||
145 | /* read other CRB error registers. */ | ||
146 | icrbc.ii_icrb0_c_regval = REMOTE_HUB_L(nasid, IIO_ICRB_C(i)); | ||
147 | icrbd.ii_icrb0_d_regval = REMOTE_HUB_L(nasid, IIO_ICRB_D(i)); | ||
148 | icrbe.ii_icrb0_e_regval = REMOTE_HUB_L(nasid, IIO_ICRB_E(i)); | ||
149 | |||
150 | IOERROR_SETVALUE(&ioerror, errortype, icrbb.b_ecode); | ||
151 | |||
152 | /* Check if this error is due to BTE operation, | ||
153 | * and handle it separately. | ||
154 | */ | ||
155 | if (icrbd.d_bteop || | ||
156 | ((icrbb.b_initiator == IIO_ICRB_INIT_BTE0 || | ||
157 | icrbb.b_initiator == IIO_ICRB_INIT_BTE1) && | ||
158 | (icrbb.b_imsgtype == IIO_ICRB_IMSGT_BTE || | ||
159 | icrbb.b_imsgtype == IIO_ICRB_IMSGT_SN1NET))) { | ||
160 | |||
161 | int bte_num; | ||
162 | |||
163 | if (icrbd.d_bteop) | ||
164 | bte_num = icrbc.c_btenum; | ||
165 | else /* b_initiator bit 2 gives BTE number */ | ||
166 | bte_num = (icrbb.b_initiator & 0x4) >> 2; | ||
167 | |||
168 | hubiio_crb_free(hubdev_info, i); | ||
169 | |||
170 | bte_crb_error_handler(nasid_to_cnodeid(nasid), bte_num, | ||
171 | i, &ioerror, icrbd.d_bteop); | ||
172 | num_errors++; | ||
173 | continue; | ||
174 | } | ||
175 | } | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * Function : hub_error_init | ||
180 | * Purpose : initialize the error handling requirements for a given hub. | ||
181 | * Parameters : cnode, the compact nodeid. | ||
182 | * Assumptions : Called only once per hub, either by a local cpu. Or by a | ||
183 | * remote cpu, when this hub is headless.(cpuless) | ||
184 | * Returns : None | ||
185 | */ | ||
186 | void hub_error_init(struct hubdev_info *hubdev_info) | ||
187 | { | ||
188 | |||
189 | if (request_irq(SGI_II_ERROR, hub_eint_handler, IRQF_SHARED, | ||
190 | "SN_hub_error", hubdev_info)) { | ||
191 | printk(KERN_ERR "hub_error_init: Failed to request_irq for 0x%p\n", | ||
192 | hubdev_info); | ||
193 | return; | ||
194 | } | ||
195 | irq_set_handler(SGI_II_ERROR, handle_level_irq); | ||
196 | sn_set_err_irq_affinity(SGI_II_ERROR); | ||
197 | } | ||
198 | |||
199 | |||
200 | /* | ||
201 | * Function : ice_error_init | ||
202 | * Purpose : initialize the error handling requirements for a given tio. | ||
203 | * Parameters : cnode, the compact nodeid. | ||
204 | * Assumptions : Called only once per tio. | ||
205 | * Returns : None | ||
206 | */ | ||
207 | void ice_error_init(struct hubdev_info *hubdev_info) | ||
208 | { | ||
209 | |||
210 | if (request_irq | ||
211 | (SGI_TIO_ERROR, (void *)hub_eint_handler, IRQF_SHARED, "SN_TIO_error", | ||
212 | (void *)hubdev_info)) { | ||
213 | printk("ice_error_init: request_irq() error hubdev_info 0x%p\n", | ||
214 | hubdev_info); | ||
215 | return; | ||
216 | } | ||
217 | irq_set_handler(SGI_TIO_ERROR, handle_level_irq); | ||
218 | sn_set_err_irq_affinity(SGI_TIO_ERROR); | ||
219 | } | ||
220 | |||
diff --git a/arch/ia64/sn/kernel/idle.c b/arch/ia64/sn/kernel/idle.c deleted file mode 100644 index 49d178f022b5..000000000000 --- a/arch/ia64/sn/kernel/idle.c +++ /dev/null | |||
@@ -1,30 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 2001-2004 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #include <asm/sn/leds.h> | ||
10 | |||
11 | void snidle(int state) | ||
12 | { | ||
13 | if (state) { | ||
14 | if (pda->idle_flag == 0) { | ||
15 | /* | ||
16 | * Turn the activity LED off. | ||
17 | */ | ||
18 | set_led_bits(0, LED_CPU_ACTIVITY); | ||
19 | } | ||
20 | |||
21 | pda->idle_flag = 1; | ||
22 | } else { | ||
23 | /* | ||
24 | * Turn the activity LED on. | ||
25 | */ | ||
26 | set_led_bits(LED_CPU_ACTIVITY, LED_CPU_ACTIVITY); | ||
27 | |||
28 | pda->idle_flag = 0; | ||
29 | } | ||
30 | } | ||
diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c deleted file mode 100644 index c31fe637b0b4..000000000000 --- a/arch/ia64/sn/kernel/io_acpi_init.c +++ /dev/null | |||
@@ -1,513 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #include <asm/sn/types.h> | ||
10 | #include <asm/sn/addrs.h> | ||
11 | #include <asm/sn/pcidev.h> | ||
12 | #include <asm/sn/pcibus_provider_defs.h> | ||
13 | #include <asm/sn/sn_sal.h> | ||
14 | #include "xtalk/hubdev.h" | ||
15 | #include <linux/acpi.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/export.h> | ||
18 | |||
19 | |||
20 | /* | ||
21 | * The code in this file will only be executed when running with | ||
22 | * a PROM that has ACPI IO support. (i.e., SN_ACPI_BASE_SUPPORT() == 1) | ||
23 | */ | ||
24 | |||
25 | |||
26 | /* | ||
27 | * This value must match the UUID the PROM uses | ||
28 | * (io/acpi/defblk.c) when building a vendor descriptor. | ||
29 | */ | ||
30 | struct acpi_vendor_uuid sn_uuid = { | ||
31 | .subtype = 0, | ||
32 | .data = { 0x2c, 0xc6, 0xa6, 0xfe, 0x9c, 0x44, 0xda, 0x11, | ||
33 | 0xa2, 0x7c, 0x08, 0x00, 0x69, 0x13, 0xea, 0x51 }, | ||
34 | }; | ||
35 | |||
36 | struct sn_pcidev_match { | ||
37 | u8 bus; | ||
38 | unsigned int devfn; | ||
39 | acpi_handle handle; | ||
40 | }; | ||
41 | |||
42 | /* | ||
43 | * Perform the early IO init in PROM. | ||
44 | */ | ||
45 | static long | ||
46 | sal_ioif_init(u64 *result) | ||
47 | { | ||
48 | struct ia64_sal_retval isrv = {0,0,0,0}; | ||
49 | |||
50 | SAL_CALL_NOLOCK(isrv, | ||
51 | SN_SAL_IOIF_INIT, 0, 0, 0, 0, 0, 0, 0); | ||
52 | *result = isrv.v0; | ||
53 | return isrv.status; | ||
54 | } | ||
55 | |||
56 | /* | ||
57 | * sn_acpi_hubdev_init() - This function is called by acpi_ns_get_device_callback() | ||
58 | * for all SGIHUB and SGITIO acpi devices defined in the | ||
59 | * DSDT. It obtains the hubdev_info pointer from the | ||
60 | * ACPI vendor resource, which the PROM setup, and sets up the | ||
61 | * hubdev_info in the pda. | ||
62 | */ | ||
63 | |||
64 | static acpi_status __init | ||
65 | sn_acpi_hubdev_init(acpi_handle handle, u32 depth, void *context, void **ret) | ||
66 | { | ||
67 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
68 | struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
69 | u64 addr; | ||
70 | struct hubdev_info *hubdev; | ||
71 | struct hubdev_info *hubdev_ptr; | ||
72 | int i; | ||
73 | u64 nasid; | ||
74 | struct acpi_resource *resource; | ||
75 | acpi_status status; | ||
76 | struct acpi_resource_vendor_typed *vendor; | ||
77 | extern void sn_common_hubdev_init(struct hubdev_info *); | ||
78 | |||
79 | status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS, | ||
80 | &sn_uuid, &buffer); | ||
81 | if (ACPI_FAILURE(status)) { | ||
82 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer); | ||
83 | printk(KERN_ERR | ||
84 | "sn_acpi_hubdev_init: acpi_get_vendor_resource() " | ||
85 | "(0x%x) failed for: %s\n", status, | ||
86 | (char *)name_buffer.pointer); | ||
87 | kfree(name_buffer.pointer); | ||
88 | return AE_OK; /* Continue walking namespace */ | ||
89 | } | ||
90 | |||
91 | resource = buffer.pointer; | ||
92 | vendor = &resource->data.vendor_typed; | ||
93 | if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) != | ||
94 | sizeof(struct hubdev_info *)) { | ||
95 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer); | ||
96 | printk(KERN_ERR | ||
97 | "sn_acpi_hubdev_init: Invalid vendor data length: " | ||
98 | "%d for: %s\n", | ||
99 | vendor->byte_length, (char *)name_buffer.pointer); | ||
100 | kfree(name_buffer.pointer); | ||
101 | goto exit; | ||
102 | } | ||
103 | |||
104 | memcpy(&addr, vendor->byte_data, sizeof(struct hubdev_info *)); | ||
105 | hubdev_ptr = __va((struct hubdev_info *) addr); | ||
106 | |||
107 | nasid = hubdev_ptr->hdi_nasid; | ||
108 | i = nasid_to_cnodeid(nasid); | ||
109 | hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo); | ||
110 | *hubdev = *hubdev_ptr; | ||
111 | sn_common_hubdev_init(hubdev); | ||
112 | |||
113 | exit: | ||
114 | kfree(buffer.pointer); | ||
115 | return AE_OK; /* Continue walking namespace */ | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * sn_get_bussoft_ptr() - The pcibus_bussoft pointer is found in | ||
120 | * the ACPI Vendor resource for this bus. | ||
121 | */ | ||
122 | static struct pcibus_bussoft * | ||
123 | sn_get_bussoft_ptr(struct pci_bus *bus) | ||
124 | { | ||
125 | u64 addr; | ||
126 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
127 | struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
128 | acpi_handle handle; | ||
129 | struct pcibus_bussoft *prom_bussoft_ptr; | ||
130 | struct acpi_resource *resource; | ||
131 | acpi_status status; | ||
132 | struct acpi_resource_vendor_typed *vendor; | ||
133 | |||
134 | |||
135 | handle = acpi_device_handle(PCI_CONTROLLER(bus)->companion); | ||
136 | status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS, | ||
137 | &sn_uuid, &buffer); | ||
138 | if (ACPI_FAILURE(status)) { | ||
139 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer); | ||
140 | printk(KERN_ERR "%s: " | ||
141 | "acpi_get_vendor_resource() failed (0x%x) for: %s\n", | ||
142 | __func__, status, (char *)name_buffer.pointer); | ||
143 | kfree(name_buffer.pointer); | ||
144 | return NULL; | ||
145 | } | ||
146 | resource = buffer.pointer; | ||
147 | vendor = &resource->data.vendor_typed; | ||
148 | |||
149 | if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) != | ||
150 | sizeof(struct pcibus_bussoft *)) { | ||
151 | printk(KERN_ERR | ||
152 | "%s: Invalid vendor data length %d\n", | ||
153 | __func__, vendor->byte_length); | ||
154 | kfree(buffer.pointer); | ||
155 | return NULL; | ||
156 | } | ||
157 | memcpy(&addr, vendor->byte_data, sizeof(struct pcibus_bussoft *)); | ||
158 | prom_bussoft_ptr = __va((struct pcibus_bussoft *) addr); | ||
159 | kfree(buffer.pointer); | ||
160 | |||
161 | return prom_bussoft_ptr; | ||
162 | } | ||
163 | |||
164 | /* | ||
165 | * sn_extract_device_info - Extract the pcidev_info and the sn_irq_info | ||
166 | * pointers from the vendor resource using the | ||
167 | * provided acpi handle, and copy the structures | ||
168 | * into the argument buffers. | ||
169 | */ | ||
170 | static int | ||
171 | sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info, | ||
172 | struct sn_irq_info **sn_irq_info) | ||
173 | { | ||
174 | u64 addr; | ||
175 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
176 | struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
177 | struct sn_irq_info *irq_info, *irq_info_prom; | ||
178 | struct pcidev_info *pcidev_ptr, *pcidev_prom_ptr; | ||
179 | struct acpi_resource *resource; | ||
180 | int ret = 0; | ||
181 | acpi_status status; | ||
182 | struct acpi_resource_vendor_typed *vendor; | ||
183 | |||
184 | /* | ||
185 | * The pointer to this device's pcidev_info structure in | ||
186 | * the PROM, is in the vendor resource. | ||
187 | */ | ||
188 | status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS, | ||
189 | &sn_uuid, &buffer); | ||
190 | if (ACPI_FAILURE(status)) { | ||
191 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer); | ||
192 | printk(KERN_ERR | ||
193 | "%s: acpi_get_vendor_resource() failed (0x%x) for: %s\n", | ||
194 | __func__, status, (char *)name_buffer.pointer); | ||
195 | kfree(name_buffer.pointer); | ||
196 | return 1; | ||
197 | } | ||
198 | |||
199 | resource = buffer.pointer; | ||
200 | vendor = &resource->data.vendor_typed; | ||
201 | if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) != | ||
202 | sizeof(struct pci_devdev_info *)) { | ||
203 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer); | ||
204 | printk(KERN_ERR | ||
205 | "%s: Invalid vendor data length: %d for: %s\n", | ||
206 | __func__, vendor->byte_length, | ||
207 | (char *)name_buffer.pointer); | ||
208 | kfree(name_buffer.pointer); | ||
209 | ret = 1; | ||
210 | goto exit; | ||
211 | } | ||
212 | |||
213 | pcidev_ptr = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL); | ||
214 | if (!pcidev_ptr) | ||
215 | panic("%s: Unable to alloc memory for pcidev_info", __func__); | ||
216 | |||
217 | memcpy(&addr, vendor->byte_data, sizeof(struct pcidev_info *)); | ||
218 | pcidev_prom_ptr = __va(addr); | ||
219 | memcpy(pcidev_ptr, pcidev_prom_ptr, sizeof(struct pcidev_info)); | ||
220 | |||
221 | /* Get the IRQ info */ | ||
222 | irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL); | ||
223 | if (!irq_info) | ||
224 | panic("%s: Unable to alloc memory for sn_irq_info", __func__); | ||
225 | |||
226 | if (pcidev_ptr->pdi_sn_irq_info) { | ||
227 | irq_info_prom = __va(pcidev_ptr->pdi_sn_irq_info); | ||
228 | memcpy(irq_info, irq_info_prom, sizeof(struct sn_irq_info)); | ||
229 | } | ||
230 | |||
231 | *pcidev_info = pcidev_ptr; | ||
232 | *sn_irq_info = irq_info; | ||
233 | |||
234 | exit: | ||
235 | kfree(buffer.pointer); | ||
236 | return ret; | ||
237 | } | ||
238 | |||
239 | static unsigned int | ||
240 | get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle) | ||
241 | { | ||
242 | unsigned long long adr; | ||
243 | acpi_handle child; | ||
244 | unsigned int devfn; | ||
245 | int function; | ||
246 | acpi_handle parent; | ||
247 | int slot; | ||
248 | acpi_status status; | ||
249 | struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
250 | |||
251 | acpi_get_name(device_handle, ACPI_FULL_PATHNAME, &name_buffer); | ||
252 | |||
253 | /* | ||
254 | * Do an upward search to find the root bus device, and | ||
255 | * obtain the host devfn from the previous child device. | ||
256 | */ | ||
257 | child = device_handle; | ||
258 | while (child) { | ||
259 | status = acpi_get_parent(child, &parent); | ||
260 | if (ACPI_FAILURE(status)) { | ||
261 | printk(KERN_ERR "%s: acpi_get_parent() failed " | ||
262 | "(0x%x) for: %s\n", __func__, status, | ||
263 | (char *)name_buffer.pointer); | ||
264 | panic("%s: Unable to find host devfn\n", __func__); | ||
265 | } | ||
266 | if (parent == rootbus_handle) | ||
267 | break; | ||
268 | child = parent; | ||
269 | } | ||
270 | if (!child) { | ||
271 | printk(KERN_ERR "%s: Unable to find root bus for: %s\n", | ||
272 | __func__, (char *)name_buffer.pointer); | ||
273 | BUG(); | ||
274 | } | ||
275 | |||
276 | status = acpi_evaluate_integer(child, METHOD_NAME__ADR, NULL, &adr); | ||
277 | if (ACPI_FAILURE(status)) { | ||
278 | printk(KERN_ERR "%s: Unable to get _ADR (0x%x) for: %s\n", | ||
279 | __func__, status, (char *)name_buffer.pointer); | ||
280 | panic("%s: Unable to find host devfn\n", __func__); | ||
281 | } | ||
282 | |||
283 | kfree(name_buffer.pointer); | ||
284 | |||
285 | slot = (adr >> 16) & 0xffff; | ||
286 | function = adr & 0xffff; | ||
287 | devfn = PCI_DEVFN(slot, function); | ||
288 | return devfn; | ||
289 | } | ||
290 | |||
291 | /* | ||
292 | * find_matching_device - Callback routine to find the ACPI device | ||
293 | * that matches up with our pci_dev device. | ||
294 | * Matching is done on bus number and devfn. | ||
295 | * To find the bus number for a particular | ||
296 | * ACPI device, we must look at the _BBN method | ||
297 | * of its parent. | ||
298 | */ | ||
299 | static acpi_status | ||
300 | find_matching_device(acpi_handle handle, u32 lvl, void *context, void **rv) | ||
301 | { | ||
302 | unsigned long long bbn = -1; | ||
303 | unsigned long long adr; | ||
304 | acpi_handle parent = NULL; | ||
305 | acpi_status status; | ||
306 | unsigned int devfn; | ||
307 | int function; | ||
308 | int slot; | ||
309 | struct sn_pcidev_match *info = context; | ||
310 | struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
311 | |||
312 | status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, | ||
313 | &adr); | ||
314 | if (ACPI_SUCCESS(status)) { | ||
315 | status = acpi_get_parent(handle, &parent); | ||
316 | if (ACPI_FAILURE(status)) { | ||
317 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer); | ||
318 | printk(KERN_ERR | ||
319 | "%s: acpi_get_parent() failed (0x%x) for: %s\n", | ||
320 | __func__, status, (char *)name_buffer.pointer); | ||
321 | kfree(name_buffer.pointer); | ||
322 | return AE_OK; | ||
323 | } | ||
324 | status = acpi_evaluate_integer(parent, METHOD_NAME__BBN, | ||
325 | NULL, &bbn); | ||
326 | if (ACPI_FAILURE(status)) { | ||
327 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer); | ||
328 | printk(KERN_ERR | ||
329 | "%s: Failed to find _BBN in parent of: %s\n", | ||
330 | __func__, (char *)name_buffer.pointer); | ||
331 | kfree(name_buffer.pointer); | ||
332 | return AE_OK; | ||
333 | } | ||
334 | |||
335 | slot = (adr >> 16) & 0xffff; | ||
336 | function = adr & 0xffff; | ||
337 | devfn = PCI_DEVFN(slot, function); | ||
338 | if ((info->devfn == devfn) && (info->bus == bbn)) { | ||
339 | /* We have a match! */ | ||
340 | info->handle = handle; | ||
341 | return 1; | ||
342 | } | ||
343 | } | ||
344 | return AE_OK; | ||
345 | } | ||
346 | |||
347 | /* | ||
348 | * sn_acpi_get_pcidev_info - Search ACPI namespace for the acpi | ||
349 | * device matching the specified pci_dev, | ||
350 | * and return the pcidev info and irq info. | ||
351 | */ | ||
352 | int | ||
353 | sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info, | ||
354 | struct sn_irq_info **sn_irq_info) | ||
355 | { | ||
356 | unsigned int host_devfn; | ||
357 | struct sn_pcidev_match pcidev_match; | ||
358 | acpi_handle rootbus_handle; | ||
359 | unsigned long long segment; | ||
360 | acpi_status status; | ||
361 | struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
362 | |||
363 | rootbus_handle = acpi_device_handle(PCI_CONTROLLER(dev)->companion); | ||
364 | status = acpi_evaluate_integer(rootbus_handle, METHOD_NAME__SEG, NULL, | ||
365 | &segment); | ||
366 | if (ACPI_SUCCESS(status)) { | ||
367 | if (segment != pci_domain_nr(dev)) { | ||
368 | acpi_get_name(rootbus_handle, ACPI_FULL_PATHNAME, | ||
369 | &name_buffer); | ||
370 | printk(KERN_ERR | ||
371 | "%s: Segment number mismatch, 0x%llx vs 0x%x for: %s\n", | ||
372 | __func__, segment, pci_domain_nr(dev), | ||
373 | (char *)name_buffer.pointer); | ||
374 | kfree(name_buffer.pointer); | ||
375 | return 1; | ||
376 | } | ||
377 | } else { | ||
378 | acpi_get_name(rootbus_handle, ACPI_FULL_PATHNAME, &name_buffer); | ||
379 | printk(KERN_ERR "%s: Unable to get __SEG from: %s\n", | ||
380 | __func__, (char *)name_buffer.pointer); | ||
381 | kfree(name_buffer.pointer); | ||
382 | return 1; | ||
383 | } | ||
384 | |||
385 | /* | ||
386 | * We want to search all devices in this segment/domain | ||
387 | * of the ACPI namespace for the matching ACPI device, | ||
388 | * which holds the pcidev_info pointer in its vendor resource. | ||
389 | */ | ||
390 | pcidev_match.bus = dev->bus->number; | ||
391 | pcidev_match.devfn = dev->devfn; | ||
392 | pcidev_match.handle = NULL; | ||
393 | |||
394 | acpi_walk_namespace(ACPI_TYPE_DEVICE, rootbus_handle, ACPI_UINT32_MAX, | ||
395 | find_matching_device, NULL, &pcidev_match, NULL); | ||
396 | |||
397 | if (!pcidev_match.handle) { | ||
398 | printk(KERN_ERR | ||
399 | "%s: Could not find matching ACPI device for %s.\n", | ||
400 | __func__, pci_name(dev)); | ||
401 | return 1; | ||
402 | } | ||
403 | |||
404 | if (sn_extract_device_info(pcidev_match.handle, pcidev_info, sn_irq_info)) | ||
405 | return 1; | ||
406 | |||
407 | /* Build up the pcidev_info.pdi_slot_host_handle */ | ||
408 | host_devfn = get_host_devfn(pcidev_match.handle, rootbus_handle); | ||
409 | (*pcidev_info)->pdi_slot_host_handle = | ||
410 | ((unsigned long) pci_domain_nr(dev) << 40) | | ||
411 | /* bus == 0 */ | ||
412 | host_devfn; | ||
413 | return 0; | ||
414 | } | ||
415 | |||
416 | /* | ||
417 | * sn_acpi_slot_fixup - Obtain the pcidev_info and sn_irq_info. | ||
418 | * Perform any SN specific slot fixup. | ||
419 | * At present there does not appear to be | ||
420 | * any generic way to handle a ROM image | ||
421 | * that has been shadowed by the PROM, so | ||
422 | * we pass a pointer to it within the | ||
423 | * pcidev_info structure. | ||
424 | */ | ||
425 | |||
426 | void | ||
427 | sn_acpi_slot_fixup(struct pci_dev *dev) | ||
428 | { | ||
429 | struct pcidev_info *pcidev_info = NULL; | ||
430 | struct sn_irq_info *sn_irq_info = NULL; | ||
431 | struct resource *res; | ||
432 | size_t size; | ||
433 | |||
434 | if (sn_acpi_get_pcidev_info(dev, &pcidev_info, &sn_irq_info)) { | ||
435 | panic("%s: Failure obtaining pcidev_info for %s\n", | ||
436 | __func__, pci_name(dev)); | ||
437 | } | ||
438 | |||
439 | if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) { | ||
440 | /* | ||
441 | * A valid ROM image exists and has been shadowed by the | ||
442 | * PROM. Setup the pci_dev ROM resource with the address | ||
443 | * of the shadowed copy, and the actual length of the ROM image. | ||
444 | */ | ||
445 | size = pci_resource_len(dev, PCI_ROM_RESOURCE); | ||
446 | |||
447 | res = &dev->resource[PCI_ROM_RESOURCE]; | ||
448 | |||
449 | pci_disable_rom(dev); | ||
450 | if (res->parent) | ||
451 | release_resource(res); | ||
452 | |||
453 | res->start = pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]; | ||
454 | res->end = res->start + size - 1; | ||
455 | res->flags = IORESOURCE_MEM | IORESOURCE_ROM_SHADOW | | ||
456 | IORESOURCE_PCI_FIXED; | ||
457 | } | ||
458 | sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info); | ||
459 | } | ||
460 | EXPORT_SYMBOL(sn_acpi_slot_fixup); | ||
461 | |||
462 | |||
463 | /* | ||
464 | * sn_acpi_bus_fixup - Perform SN specific setup of software structs | ||
465 | * (pcibus_bussoft, pcidev_info) and hardware | ||
466 | * registers, for the specified bus and devices under it. | ||
467 | */ | ||
468 | void | ||
469 | sn_acpi_bus_fixup(struct pci_bus *bus) | ||
470 | { | ||
471 | struct pci_dev *pci_dev = NULL; | ||
472 | struct pcibus_bussoft *prom_bussoft_ptr; | ||
473 | |||
474 | if (!bus->parent) { /* If root bus */ | ||
475 | prom_bussoft_ptr = sn_get_bussoft_ptr(bus); | ||
476 | if (prom_bussoft_ptr == NULL) { | ||
477 | printk(KERN_ERR | ||
478 | "%s: 0x%04x:0x%02x Unable to " | ||
479 | "obtain prom_bussoft_ptr\n", | ||
480 | __func__, pci_domain_nr(bus), bus->number); | ||
481 | return; | ||
482 | } | ||
483 | sn_common_bus_fixup(bus, prom_bussoft_ptr); | ||
484 | } | ||
485 | list_for_each_entry(pci_dev, &bus->devices, bus_list) { | ||
486 | sn_acpi_slot_fixup(pci_dev); | ||
487 | } | ||
488 | } | ||
489 | |||
490 | /* | ||
491 | * sn_io_acpi_init - PROM has ACPI support for IO, defining at a minimum the | ||
492 | * nodes and root buses in the DSDT. As a result, bus scanning | ||
493 | * will be initiated by the Linux ACPI code. | ||
494 | */ | ||
495 | |||
496 | void __init | ||
497 | sn_io_acpi_init(void) | ||
498 | { | ||
499 | u64 result; | ||
500 | long status; | ||
501 | |||
502 | /* SN Altix does not follow the IOSAPIC IRQ routing model */ | ||
503 | acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM; | ||
504 | |||
505 | /* Setup hubdev_info for all SGIHUB/SGITIO devices */ | ||
506 | acpi_get_devices("SGIHUB", sn_acpi_hubdev_init, NULL, NULL); | ||
507 | acpi_get_devices("SGITIO", sn_acpi_hubdev_init, NULL, NULL); | ||
508 | |||
509 | status = sal_ioif_init(&result); | ||
510 | if (status || result) | ||
511 | panic("sal_ioif_init failed: [%lx] %s\n", | ||
512 | status, ia64_sal_strerror(status)); | ||
513 | } | ||
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c deleted file mode 100644 index d46847323ef6..000000000000 --- a/arch/ia64/sn/kernel/io_common.c +++ /dev/null | |||
@@ -1,561 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #include <linux/memblock.h> | ||
10 | #include <linux/export.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <asm/sn/types.h> | ||
13 | #include <asm/sn/addrs.h> | ||
14 | #include <asm/sn/sn_feature_sets.h> | ||
15 | #include <asm/sn/geo.h> | ||
16 | #include <asm/sn/io.h> | ||
17 | #include <asm/sn/l1.h> | ||
18 | #include <asm/sn/module.h> | ||
19 | #include <asm/sn/pcibr_provider.h> | ||
20 | #include <asm/sn/pcibus_provider_defs.h> | ||
21 | #include <asm/sn/pcidev.h> | ||
22 | #include <asm/sn/simulator.h> | ||
23 | #include <asm/sn/sn_sal.h> | ||
24 | #include <asm/sn/tioca_provider.h> | ||
25 | #include <asm/sn/tioce_provider.h> | ||
26 | #include "xtalk/hubdev.h" | ||
27 | #include "xtalk/xwidgetdev.h" | ||
28 | #include <linux/acpi.h> | ||
29 | #include <asm/sn/sn2/sn_hwperf.h> | ||
30 | #include <asm/sn/acpi.h> | ||
31 | |||
32 | extern void sn_init_cpei_timer(void); | ||
33 | extern void register_sn_procfs(void); | ||
34 | extern void sn_io_acpi_init(void); | ||
35 | extern void sn_io_init(void); | ||
36 | |||
37 | |||
38 | static struct list_head sn_sysdata_list; | ||
39 | |||
40 | /* sysdata list struct */ | ||
41 | struct sysdata_el { | ||
42 | struct list_head entry; | ||
43 | void *sysdata; | ||
44 | }; | ||
45 | |||
46 | int sn_ioif_inited; /* SN I/O infrastructure initialized? */ | ||
47 | |||
48 | int sn_acpi_rev; /* SN ACPI revision */ | ||
49 | EXPORT_SYMBOL_GPL(sn_acpi_rev); | ||
50 | |||
51 | struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */ | ||
52 | |||
53 | /* | ||
54 | * Hooks and struct for unsupported pci providers | ||
55 | */ | ||
56 | |||
57 | static dma_addr_t | ||
58 | sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size, int type) | ||
59 | { | ||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | static void | ||
64 | sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction) | ||
65 | { | ||
66 | return; | ||
67 | } | ||
68 | |||
69 | static void * | ||
70 | sn_default_pci_bus_fixup(struct pcibus_bussoft *soft, struct pci_controller *controller) | ||
71 | { | ||
72 | return NULL; | ||
73 | } | ||
74 | |||
75 | static struct sn_pcibus_provider sn_pci_default_provider = { | ||
76 | .dma_map = sn_default_pci_map, | ||
77 | .dma_map_consistent = sn_default_pci_map, | ||
78 | .dma_unmap = sn_default_pci_unmap, | ||
79 | .bus_fixup = sn_default_pci_bus_fixup, | ||
80 | }; | ||
81 | |||
82 | /* | ||
83 | * Retrieve the DMA Flush List given nasid, widget, and device. | ||
84 | * This list is needed to implement the WAR - Flush DMA data on PIO Reads. | ||
85 | */ | ||
86 | static inline u64 | ||
87 | sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num, | ||
88 | u64 address) | ||
89 | { | ||
90 | struct ia64_sal_retval ret_stuff; | ||
91 | ret_stuff.status = 0; | ||
92 | ret_stuff.v0 = 0; | ||
93 | |||
94 | SAL_CALL_NOLOCK(ret_stuff, | ||
95 | (u64) SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST, | ||
96 | (u64) nasid, (u64) widget_num, | ||
97 | (u64) device_num, (u64) address, 0, 0, 0); | ||
98 | return ret_stuff.status; | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified | ||
103 | * device. | ||
104 | */ | ||
105 | inline struct pcidev_info * | ||
106 | sn_pcidev_info_get(struct pci_dev *dev) | ||
107 | { | ||
108 | struct pcidev_info *pcidev; | ||
109 | |||
110 | list_for_each_entry(pcidev, | ||
111 | &(SN_PLATFORM_DATA(dev)->pcidev_info), pdi_list) { | ||
112 | if (pcidev->pdi_linux_pcidev == dev) | ||
113 | return pcidev; | ||
114 | } | ||
115 | return NULL; | ||
116 | } | ||
117 | |||
118 | /* Older PROM flush WAR | ||
119 | * | ||
120 | * 01/16/06 -- This war will be in place until a new official PROM is released. | ||
121 | * Additionally note that the struct sn_flush_device_war also has to be | ||
122 | * removed from arch/ia64/sn/include/xtalk/hubdev.h | ||
123 | */ | ||
124 | |||
125 | static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device, | ||
126 | struct sn_flush_device_common *common) | ||
127 | { | ||
128 | struct sn_flush_device_war *war_list; | ||
129 | struct sn_flush_device_war *dev_entry; | ||
130 | struct ia64_sal_retval isrv = {0,0,0,0}; | ||
131 | |||
132 | printk_once(KERN_WARNING | ||
133 | "PROM version < 4.50 -- implementing old PROM flush WAR\n"); | ||
134 | |||
135 | war_list = kcalloc(DEV_PER_WIDGET, sizeof(*war_list), GFP_KERNEL); | ||
136 | BUG_ON(!war_list); | ||
137 | |||
138 | SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST, | ||
139 | nasid, widget, __pa(war_list), 0, 0, 0 ,0); | ||
140 | if (isrv.status) | ||
141 | panic("sn_device_fixup_war failed: %s\n", | ||
142 | ia64_sal_strerror(isrv.status)); | ||
143 | |||
144 | dev_entry = war_list + device; | ||
145 | memcpy(common,dev_entry, sizeof(*common)); | ||
146 | kfree(war_list); | ||
147 | |||
148 | return isrv.status; | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * sn_common_hubdev_init() - This routine is called to initialize the HUB data | ||
153 | * structure for each node in the system. | ||
154 | */ | ||
155 | void __init | ||
156 | sn_common_hubdev_init(struct hubdev_info *hubdev) | ||
157 | { | ||
158 | |||
159 | struct sn_flush_device_kernel *sn_flush_device_kernel; | ||
160 | struct sn_flush_device_kernel *dev_entry; | ||
161 | s64 status; | ||
162 | int widget, device, size; | ||
163 | |||
164 | /* Attach the error interrupt handlers */ | ||
165 | if (hubdev->hdi_nasid & 1) /* If TIO */ | ||
166 | ice_error_init(hubdev); | ||
167 | else | ||
168 | hub_error_init(hubdev); | ||
169 | |||
170 | for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) | ||
171 | hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev; | ||
172 | |||
173 | if (!hubdev->hdi_flush_nasid_list.widget_p) | ||
174 | return; | ||
175 | |||
176 | size = (HUB_WIDGET_ID_MAX + 1) * | ||
177 | sizeof(struct sn_flush_device_kernel *); | ||
178 | hubdev->hdi_flush_nasid_list.widget_p = | ||
179 | kzalloc(size, GFP_KERNEL); | ||
180 | BUG_ON(!hubdev->hdi_flush_nasid_list.widget_p); | ||
181 | |||
182 | for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) { | ||
183 | size = DEV_PER_WIDGET * | ||
184 | sizeof(struct sn_flush_device_kernel); | ||
185 | sn_flush_device_kernel = kzalloc(size, GFP_KERNEL); | ||
186 | BUG_ON(!sn_flush_device_kernel); | ||
187 | |||
188 | dev_entry = sn_flush_device_kernel; | ||
189 | for (device = 0; device < DEV_PER_WIDGET; | ||
190 | device++, dev_entry++) { | ||
191 | size = sizeof(struct sn_flush_device_common); | ||
192 | dev_entry->common = kzalloc(size, GFP_KERNEL); | ||
193 | BUG_ON(!dev_entry->common); | ||
194 | if (sn_prom_feature_available(PRF_DEVICE_FLUSH_LIST)) | ||
195 | status = sal_get_device_dmaflush_list( | ||
196 | hubdev->hdi_nasid, widget, device, | ||
197 | (u64)(dev_entry->common)); | ||
198 | else | ||
199 | status = sn_device_fixup_war(hubdev->hdi_nasid, | ||
200 | widget, device, | ||
201 | dev_entry->common); | ||
202 | if (status != SALRET_OK) | ||
203 | panic("SAL call failed: %s\n", | ||
204 | ia64_sal_strerror(status)); | ||
205 | |||
206 | spin_lock_init(&dev_entry->sfdl_flush_lock); | ||
207 | } | ||
208 | |||
209 | if (sn_flush_device_kernel) | ||
210 | hubdev->hdi_flush_nasid_list.widget_p[widget] = | ||
211 | sn_flush_device_kernel; | ||
212 | } | ||
213 | } | ||
214 | |||
215 | void sn_pci_unfixup_slot(struct pci_dev *dev) | ||
216 | { | ||
217 | struct pci_dev *host_pci_dev = SN_PCIDEV_INFO(dev)->host_pci_dev; | ||
218 | |||
219 | sn_irq_unfixup(dev); | ||
220 | pci_dev_put(host_pci_dev); | ||
221 | pci_dev_put(dev); | ||
222 | } | ||
223 | |||
224 | /* | ||
225 | * sn_pci_fixup_slot() | ||
226 | */ | ||
227 | void sn_pci_fixup_slot(struct pci_dev *dev, struct pcidev_info *pcidev_info, | ||
228 | struct sn_irq_info *sn_irq_info) | ||
229 | { | ||
230 | int segment = pci_domain_nr(dev->bus); | ||
231 | struct pcibus_bussoft *bs; | ||
232 | struct pci_dev *host_pci_dev; | ||
233 | unsigned int bus_no, devfn; | ||
234 | |||
235 | pci_dev_get(dev); /* for the sysdata pointer */ | ||
236 | |||
237 | /* Add pcidev_info to list in pci_controller.platform_data */ | ||
238 | list_add_tail(&pcidev_info->pdi_list, | ||
239 | &(SN_PLATFORM_DATA(dev->bus)->pcidev_info)); | ||
240 | /* | ||
241 | * Using the PROMs values for the PCI host bus, get the Linux | ||
242 | * PCI host_pci_dev struct and set up host bus linkages | ||
243 | */ | ||
244 | |||
245 | bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff; | ||
246 | devfn = pcidev_info->pdi_slot_host_handle & 0xffffffff; | ||
247 | host_pci_dev = pci_get_domain_bus_and_slot(segment, bus_no, devfn); | ||
248 | |||
249 | pcidev_info->host_pci_dev = host_pci_dev; | ||
250 | pcidev_info->pdi_linux_pcidev = dev; | ||
251 | pcidev_info->pdi_host_pcidev_info = SN_PCIDEV_INFO(host_pci_dev); | ||
252 | bs = SN_PCIBUS_BUSSOFT(dev->bus); | ||
253 | pcidev_info->pdi_pcibus_info = bs; | ||
254 | |||
255 | if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) { | ||
256 | SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type]; | ||
257 | } else { | ||
258 | SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider; | ||
259 | } | ||
260 | |||
261 | /* Only set up IRQ stuff if this device has a host bus context */ | ||
262 | if (bs && sn_irq_info->irq_irq) { | ||
263 | pcidev_info->pdi_sn_irq_info = sn_irq_info; | ||
264 | dev->irq = pcidev_info->pdi_sn_irq_info->irq_irq; | ||
265 | sn_irq_fixup(dev, sn_irq_info); | ||
266 | } else { | ||
267 | pcidev_info->pdi_sn_irq_info = NULL; | ||
268 | kfree(sn_irq_info); | ||
269 | } | ||
270 | } | ||
271 | |||
272 | /* | ||
273 | * sn_common_bus_fixup - Perform platform specific bus fixup. | ||
274 | * Execute the ASIC specific fixup routine | ||
275 | * for this bus. | ||
276 | */ | ||
277 | void | ||
278 | sn_common_bus_fixup(struct pci_bus *bus, | ||
279 | struct pcibus_bussoft *prom_bussoft_ptr) | ||
280 | { | ||
281 | int cnode; | ||
282 | struct pci_controller *controller; | ||
283 | struct hubdev_info *hubdev_info; | ||
284 | int nasid; | ||
285 | void *provider_soft; | ||
286 | struct sn_pcibus_provider *provider; | ||
287 | struct sn_platform_data *sn_platform_data; | ||
288 | |||
289 | controller = PCI_CONTROLLER(bus); | ||
290 | /* | ||
291 | * Per-provider fixup. Copies the bus soft structure from prom | ||
292 | * to local area and links SN_PCIBUS_BUSSOFT(). | ||
293 | */ | ||
294 | |||
295 | if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES) { | ||
296 | printk(KERN_WARNING "sn_common_bus_fixup: Unsupported asic type, %d", | ||
297 | prom_bussoft_ptr->bs_asic_type); | ||
298 | return; | ||
299 | } | ||
300 | |||
301 | if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB) | ||
302 | return; /* no further fixup necessary */ | ||
303 | |||
304 | provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type]; | ||
305 | if (provider == NULL) | ||
306 | panic("sn_common_bus_fixup: No provider registered for this asic type, %d", | ||
307 | prom_bussoft_ptr->bs_asic_type); | ||
308 | |||
309 | if (provider->bus_fixup) | ||
310 | provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr, | ||
311 | controller); | ||
312 | else | ||
313 | provider_soft = NULL; | ||
314 | |||
315 | /* | ||
316 | * Generic bus fixup goes here. Don't reference prom_bussoft_ptr | ||
317 | * after this point. | ||
318 | */ | ||
319 | controller->platform_data = kzalloc(sizeof(struct sn_platform_data), | ||
320 | GFP_KERNEL); | ||
321 | BUG_ON(controller->platform_data == NULL); | ||
322 | sn_platform_data = | ||
323 | (struct sn_platform_data *) controller->platform_data; | ||
324 | sn_platform_data->provider_soft = provider_soft; | ||
325 | INIT_LIST_HEAD(&((struct sn_platform_data *) | ||
326 | controller->platform_data)->pcidev_info); | ||
327 | nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base); | ||
328 | cnode = nasid_to_cnodeid(nasid); | ||
329 | hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo); | ||
330 | SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info = | ||
331 | &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]); | ||
332 | |||
333 | /* | ||
334 | * If the node information we obtained during the fixup phase is | ||
335 | * invalid then set controller->node to -1 (undetermined) | ||
336 | */ | ||
337 | if (controller->node >= num_online_nodes()) { | ||
338 | struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus); | ||
339 | |||
340 | printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%u " | ||
341 | "L_IO=%llx L_MEM=%llx BASE=%llx\n", | ||
342 | b->bs_asic_type, b->bs_xid, b->bs_persist_busnum, | ||
343 | b->bs_legacy_io, b->bs_legacy_mem, b->bs_base); | ||
344 | printk(KERN_WARNING "on node %d but only %d nodes online." | ||
345 | "Association set to undetermined.\n", | ||
346 | controller->node, num_online_nodes()); | ||
347 | controller->node = -1; | ||
348 | } | ||
349 | } | ||
350 | |||
351 | void sn_bus_store_sysdata(struct pci_dev *dev) | ||
352 | { | ||
353 | struct sysdata_el *element; | ||
354 | |||
355 | element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL); | ||
356 | if (!element) { | ||
357 | dev_dbg(&dev->dev, "%s: out of memory!\n", __func__); | ||
358 | return; | ||
359 | } | ||
360 | element->sysdata = SN_PCIDEV_INFO(dev); | ||
361 | list_add(&element->entry, &sn_sysdata_list); | ||
362 | } | ||
363 | |||
364 | void sn_bus_free_sysdata(void) | ||
365 | { | ||
366 | struct sysdata_el *element; | ||
367 | struct list_head *list, *safe; | ||
368 | |||
369 | list_for_each_safe(list, safe, &sn_sysdata_list) { | ||
370 | element = list_entry(list, struct sysdata_el, entry); | ||
371 | list_del(&element->entry); | ||
372 | list_del(&(((struct pcidev_info *) | ||
373 | (element->sysdata))->pdi_list)); | ||
374 | kfree(element->sysdata); | ||
375 | kfree(element); | ||
376 | } | ||
377 | return; | ||
378 | } | ||
379 | |||
380 | /* | ||
381 | * hubdev_init_node() - Creates the HUB data structure and link them to it's | ||
382 | * own NODE specific data area. | ||
383 | */ | ||
384 | void __init hubdev_init_node(nodepda_t * npda, cnodeid_t node) | ||
385 | { | ||
386 | struct hubdev_info *hubdev_info; | ||
387 | int size; | ||
388 | |||
389 | size = sizeof(struct hubdev_info); | ||
390 | |||
391 | if (node >= num_online_nodes()) /* Headless/memless IO nodes */ | ||
392 | node = 0; | ||
393 | |||
394 | hubdev_info = (struct hubdev_info *)memblock_alloc_node(size, | ||
395 | SMP_CACHE_BYTES, | ||
396 | node); | ||
397 | if (!hubdev_info) | ||
398 | panic("%s: Failed to allocate %d bytes align=0x%x nid=%d\n", | ||
399 | __func__, size, SMP_CACHE_BYTES, node); | ||
400 | |||
401 | npda->pdinfo = (void *)hubdev_info; | ||
402 | } | ||
403 | |||
404 | geoid_t | ||
405 | cnodeid_get_geoid(cnodeid_t cnode) | ||
406 | { | ||
407 | struct hubdev_info *hubdev; | ||
408 | |||
409 | hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo); | ||
410 | return hubdev->hdi_geoid; | ||
411 | } | ||
412 | |||
413 | void sn_generate_path(struct pci_bus *pci_bus, char *address) | ||
414 | { | ||
415 | nasid_t nasid; | ||
416 | cnodeid_t cnode; | ||
417 | geoid_t geoid; | ||
418 | moduleid_t moduleid; | ||
419 | u16 bricktype; | ||
420 | |||
421 | nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base); | ||
422 | cnode = nasid_to_cnodeid(nasid); | ||
423 | geoid = cnodeid_get_geoid(cnode); | ||
424 | moduleid = geo_module(geoid); | ||
425 | |||
426 | sprintf(address, "module_%c%c%c%c%.2d", | ||
427 | '0'+RACK_GET_CLASS(MODULE_GET_RACK(moduleid)), | ||
428 | '0'+RACK_GET_GROUP(MODULE_GET_RACK(moduleid)), | ||
429 | '0'+RACK_GET_NUM(MODULE_GET_RACK(moduleid)), | ||
430 | MODULE_GET_BTCHAR(moduleid), MODULE_GET_BPOS(moduleid)); | ||
431 | |||
432 | /* Tollhouse requires slot id to be displayed */ | ||
433 | bricktype = MODULE_GET_BTYPE(moduleid); | ||
434 | if ((bricktype == L1_BRICKTYPE_191010) || | ||
435 | (bricktype == L1_BRICKTYPE_1932)) | ||
436 | sprintf(address + strlen(address), "^%d", | ||
437 | geo_slot(geoid)); | ||
438 | } | ||
439 | |||
440 | void sn_pci_fixup_bus(struct pci_bus *bus) | ||
441 | { | ||
442 | |||
443 | if (SN_ACPI_BASE_SUPPORT()) | ||
444 | sn_acpi_bus_fixup(bus); | ||
445 | else | ||
446 | sn_bus_fixup(bus); | ||
447 | } | ||
448 | |||
449 | /* | ||
450 | * sn_io_early_init - Perform early IO (and some non-IO) initialization. | ||
451 | * In particular, setup the sn_pci_provider[] array. | ||
452 | * This needs to be done prior to any bus scanning | ||
453 | * (acpi_scan_init()) in the ACPI case, as the SN | ||
454 | * bus fixup code will reference the array. | ||
455 | */ | ||
456 | static int __init | ||
457 | sn_io_early_init(void) | ||
458 | { | ||
459 | int i; | ||
460 | |||
461 | if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM()) | ||
462 | return 0; | ||
463 | |||
464 | /* we set the acpi revision to that of the DSDT table OEM rev. */ | ||
465 | { | ||
466 | struct acpi_table_header *header = NULL; | ||
467 | |||
468 | acpi_get_table(ACPI_SIG_DSDT, 1, &header); | ||
469 | BUG_ON(header == NULL); | ||
470 | sn_acpi_rev = header->oem_revision; | ||
471 | } | ||
472 | |||
473 | /* | ||
474 | * prime sn_pci_provider[]. Individual provider init routines will | ||
475 | * override their respective default entries. | ||
476 | */ | ||
477 | |||
478 | for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++) | ||
479 | sn_pci_provider[i] = &sn_pci_default_provider; | ||
480 | |||
481 | pcibr_init_provider(); | ||
482 | tioca_init_provider(); | ||
483 | tioce_init_provider(); | ||
484 | |||
485 | sn_irq_lh_init(); | ||
486 | INIT_LIST_HEAD(&sn_sysdata_list); | ||
487 | sn_init_cpei_timer(); | ||
488 | |||
489 | #ifdef CONFIG_PROC_FS | ||
490 | register_sn_procfs(); | ||
491 | #endif | ||
492 | |||
493 | { | ||
494 | struct acpi_table_header *header; | ||
495 | (void)acpi_get_table(ACPI_SIG_DSDT, 1, &header); | ||
496 | printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n", | ||
497 | header->oem_revision); | ||
498 | } | ||
499 | if (SN_ACPI_BASE_SUPPORT()) | ||
500 | sn_io_acpi_init(); | ||
501 | else | ||
502 | sn_io_init(); | ||
503 | return 0; | ||
504 | } | ||
505 | |||
506 | arch_initcall(sn_io_early_init); | ||
507 | |||
508 | /* | ||
509 | * sn_io_late_init() - Perform any final platform specific IO initialization. | ||
510 | */ | ||
511 | |||
512 | int __init | ||
513 | sn_io_late_init(void) | ||
514 | { | ||
515 | struct pci_bus *bus; | ||
516 | struct pcibus_bussoft *bussoft; | ||
517 | cnodeid_t cnode; | ||
518 | nasid_t nasid; | ||
519 | cnodeid_t near_cnode; | ||
520 | |||
521 | if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM()) | ||
522 | return 0; | ||
523 | |||
524 | /* | ||
525 | * Setup closest node in pci_controller->node for | ||
526 | * PIC, TIOCP, TIOCE (TIOCA does it during bus fixup using | ||
527 | * info from the PROM). | ||
528 | */ | ||
529 | bus = NULL; | ||
530 | while ((bus = pci_find_next_bus(bus)) != NULL) { | ||
531 | bussoft = SN_PCIBUS_BUSSOFT(bus); | ||
532 | nasid = NASID_GET(bussoft->bs_base); | ||
533 | cnode = nasid_to_cnodeid(nasid); | ||
534 | if ((bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) || | ||
535 | (bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCE) || | ||
536 | (bussoft->bs_asic_type == PCIIO_ASIC_TYPE_PIC)) { | ||
537 | /* PCI Bridge: find nearest node with CPUs */ | ||
538 | int e = sn_hwperf_get_nearest_node(cnode, NULL, | ||
539 | &near_cnode); | ||
540 | if (e < 0) { | ||
541 | near_cnode = (cnodeid_t)-1; /* use any node */ | ||
542 | printk(KERN_WARNING "sn_io_late_init: failed " | ||
543 | "to find near node with CPUs for " | ||
544 | "node %d, err=%d\n", cnode, e); | ||
545 | } | ||
546 | PCI_CONTROLLER(bus)->node = near_cnode; | ||
547 | } | ||
548 | } | ||
549 | |||
550 | sn_ioif_inited = 1; /* SN I/O infrastructure now initialized */ | ||
551 | |||
552 | return 0; | ||
553 | } | ||
554 | |||
555 | fs_initcall(sn_io_late_init); | ||
556 | |||
557 | EXPORT_SYMBOL(sn_pci_unfixup_slot); | ||
558 | EXPORT_SYMBOL(sn_bus_store_sysdata); | ||
559 | EXPORT_SYMBOL(sn_bus_free_sysdata); | ||
560 | EXPORT_SYMBOL(sn_generate_path); | ||
561 | |||
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c deleted file mode 100644 index d63809a6adfa..000000000000 --- a/arch/ia64/sn/kernel/io_init.c +++ /dev/null | |||
@@ -1,308 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #include <linux/slab.h> | ||
10 | #include <linux/export.h> | ||
11 | #include <asm/sn/types.h> | ||
12 | #include <asm/sn/addrs.h> | ||
13 | #include <asm/sn/io.h> | ||
14 | #include <asm/sn/module.h> | ||
15 | #include <asm/sn/intr.h> | ||
16 | #include <asm/sn/pcibus_provider_defs.h> | ||
17 | #include <asm/sn/pcidev.h> | ||
18 | #include <asm/sn/sn_sal.h> | ||
19 | #include "xtalk/hubdev.h" | ||
20 | |||
21 | /* | ||
22 | * The code in this file will only be executed when running with | ||
23 | * a PROM that does _not_ have base ACPI IO support. | ||
24 | * (i.e., SN_ACPI_BASE_SUPPORT() == 0) | ||
25 | */ | ||
26 | |||
27 | static int max_segment_number; /* Default highest segment number */ | ||
28 | static int max_pcibus_number = 255; /* Default highest pci bus number */ | ||
29 | |||
30 | |||
31 | /* | ||
32 | * Retrieve the hub device info structure for the given nasid. | ||
33 | */ | ||
34 | static inline u64 sal_get_hubdev_info(u64 handle, u64 address) | ||
35 | { | ||
36 | struct ia64_sal_retval ret_stuff; | ||
37 | ret_stuff.status = 0; | ||
38 | ret_stuff.v0 = 0; | ||
39 | |||
40 | SAL_CALL_NOLOCK(ret_stuff, | ||
41 | (u64) SN_SAL_IOIF_GET_HUBDEV_INFO, | ||
42 | (u64) handle, (u64) address, 0, 0, 0, 0, 0); | ||
43 | return ret_stuff.v0; | ||
44 | } | ||
45 | |||
46 | /* | ||
47 | * Retrieve the pci bus information given the bus number. | ||
48 | */ | ||
49 | static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address) | ||
50 | { | ||
51 | struct ia64_sal_retval ret_stuff; | ||
52 | ret_stuff.status = 0; | ||
53 | ret_stuff.v0 = 0; | ||
54 | |||
55 | SAL_CALL_NOLOCK(ret_stuff, | ||
56 | (u64) SN_SAL_IOIF_GET_PCIBUS_INFO, | ||
57 | (u64) segment, (u64) busnum, (u64) address, 0, 0, 0, 0); | ||
58 | return ret_stuff.v0; | ||
59 | } | ||
60 | |||
61 | /* | ||
62 | * Retrieve the pci device information given the bus and device|function number. | ||
63 | */ | ||
64 | static inline u64 | ||
65 | sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev, | ||
66 | u64 sn_irq_info) | ||
67 | { | ||
68 | struct ia64_sal_retval ret_stuff; | ||
69 | ret_stuff.status = 0; | ||
70 | ret_stuff.v0 = 0; | ||
71 | |||
72 | SAL_CALL_NOLOCK(ret_stuff, | ||
73 | (u64) SN_SAL_IOIF_GET_PCIDEV_INFO, | ||
74 | (u64) segment, (u64) bus_number, (u64) devfn, | ||
75 | (u64) pci_dev, | ||
76 | sn_irq_info, 0, 0); | ||
77 | return ret_stuff.v0; | ||
78 | } | ||
79 | |||
80 | |||
81 | /* | ||
82 | * sn_fixup_ionodes() - This routine initializes the HUB data structure for | ||
83 | * each node in the system. This function is only | ||
84 | * executed when running with a non-ACPI capable PROM. | ||
85 | */ | ||
86 | static void __init sn_fixup_ionodes(void) | ||
87 | { | ||
88 | |||
89 | struct hubdev_info *hubdev; | ||
90 | u64 status; | ||
91 | u64 nasid; | ||
92 | int i; | ||
93 | extern void sn_common_hubdev_init(struct hubdev_info *); | ||
94 | |||
95 | /* | ||
96 | * Get SGI Specific HUB chipset information. | ||
97 | * Inform Prom that this kernel can support domain bus numbering. | ||
98 | */ | ||
99 | for (i = 0; i < num_cnodes; i++) { | ||
100 | hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo); | ||
101 | nasid = cnodeid_to_nasid(i); | ||
102 | hubdev->max_segment_number = 0xffffffff; | ||
103 | hubdev->max_pcibus_number = 0xff; | ||
104 | status = sal_get_hubdev_info(nasid, (u64) __pa(hubdev)); | ||
105 | if (status) | ||
106 | continue; | ||
107 | |||
108 | /* Save the largest Domain and pcibus numbers found. */ | ||
109 | if (hubdev->max_segment_number) { | ||
110 | /* | ||
111 | * Dealing with a Prom that supports segments. | ||
112 | */ | ||
113 | max_segment_number = hubdev->max_segment_number; | ||
114 | max_pcibus_number = hubdev->max_pcibus_number; | ||
115 | } | ||
116 | sn_common_hubdev_init(hubdev); | ||
117 | } | ||
118 | } | ||
119 | |||
120 | /* | ||
121 | * sn_pci_legacy_window_fixup - Setup PCI resources for | ||
122 | * legacy IO and MEM space. This needs to | ||
123 | * be done here, as the PROM does not have | ||
124 | * ACPI support defining the root buses | ||
125 | * and their resources (_CRS), | ||
126 | */ | ||
127 | static void | ||
128 | sn_legacy_pci_window_fixup(struct resource *res, | ||
129 | u64 legacy_io, u64 legacy_mem) | ||
130 | { | ||
131 | res[0].name = "legacy_io"; | ||
132 | res[0].flags = IORESOURCE_IO; | ||
133 | res[0].start = legacy_io; | ||
134 | res[0].end = res[0].start + 0xffff; | ||
135 | res[0].parent = &ioport_resource; | ||
136 | res[1].name = "legacy_mem"; | ||
137 | res[1].flags = IORESOURCE_MEM; | ||
138 | res[1].start = legacy_mem; | ||
139 | res[1].end = res[1].start + (1024 * 1024) - 1; | ||
140 | res[1].parent = &iomem_resource; | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * sn_io_slot_fixup() - We are not running with an ACPI capable PROM, | ||
145 | * and need to convert the pci_dev->resource | ||
146 | * 'start' and 'end' addresses to mapped addresses, | ||
147 | * and setup the pci_controller->window array entries. | ||
148 | */ | ||
149 | void | ||
150 | sn_io_slot_fixup(struct pci_dev *dev) | ||
151 | { | ||
152 | int idx; | ||
153 | struct resource *res; | ||
154 | unsigned long size; | ||
155 | struct pcidev_info *pcidev_info; | ||
156 | struct sn_irq_info *sn_irq_info; | ||
157 | int status; | ||
158 | |||
159 | pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL); | ||
160 | if (!pcidev_info) | ||
161 | panic("%s: Unable to alloc memory for pcidev_info", __func__); | ||
162 | |||
163 | sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL); | ||
164 | if (!sn_irq_info) | ||
165 | panic("%s: Unable to alloc memory for sn_irq_info", __func__); | ||
166 | |||
167 | /* Call to retrieve pci device information needed by kernel. */ | ||
168 | status = sal_get_pcidev_info((u64) pci_domain_nr(dev), | ||
169 | (u64) dev->bus->number, | ||
170 | dev->devfn, | ||
171 | (u64) __pa(pcidev_info), | ||
172 | (u64) __pa(sn_irq_info)); | ||
173 | |||
174 | BUG_ON(status); /* Cannot get platform pci device information */ | ||
175 | |||
176 | |||
177 | /* Copy over PIO Mapped Addresses */ | ||
178 | for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { | ||
179 | if (!pcidev_info->pdi_pio_mapped_addr[idx]) | ||
180 | continue; | ||
181 | |||
182 | res = &dev->resource[idx]; | ||
183 | |||
184 | size = res->end - res->start; | ||
185 | if (size == 0) | ||
186 | continue; | ||
187 | |||
188 | res->start = pcidev_info->pdi_pio_mapped_addr[idx]; | ||
189 | res->end = res->start + size; | ||
190 | |||
191 | /* | ||
192 | * if it's already in the device structure, remove it before | ||
193 | * inserting | ||
194 | */ | ||
195 | if (res->parent && res->parent->child) | ||
196 | release_resource(res); | ||
197 | |||
198 | if (res->flags & IORESOURCE_IO) | ||
199 | insert_resource(&ioport_resource, res); | ||
200 | else | ||
201 | insert_resource(&iomem_resource, res); | ||
202 | /* | ||
203 | * If ROM, mark as shadowed in PROM. | ||
204 | */ | ||
205 | if (idx == PCI_ROM_RESOURCE) { | ||
206 | pci_disable_rom(dev); | ||
207 | res->flags = IORESOURCE_MEM | IORESOURCE_ROM_SHADOW | | ||
208 | IORESOURCE_PCI_FIXED; | ||
209 | } | ||
210 | } | ||
211 | |||
212 | sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info); | ||
213 | } | ||
214 | EXPORT_SYMBOL(sn_io_slot_fixup); | ||
215 | |||
216 | /* | ||
217 | * sn_pci_controller_fixup() - This routine sets up a bus's resources | ||
218 | * consistent with the Linux PCI abstraction layer. | ||
219 | */ | ||
220 | static void __init | ||
221 | sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus) | ||
222 | { | ||
223 | s64 status = 0; | ||
224 | struct pci_controller *controller; | ||
225 | struct pcibus_bussoft *prom_bussoft_ptr; | ||
226 | struct resource *res; | ||
227 | LIST_HEAD(resources); | ||
228 | |||
229 | status = sal_get_pcibus_info((u64) segment, (u64) busnum, | ||
230 | (u64) ia64_tpa(&prom_bussoft_ptr)); | ||
231 | if (status > 0) | ||
232 | return; /*bus # does not exist */ | ||
233 | prom_bussoft_ptr = __va(prom_bussoft_ptr); | ||
234 | |||
235 | controller = kzalloc(sizeof(*controller), GFP_KERNEL); | ||
236 | BUG_ON(!controller); | ||
237 | controller->segment = segment; | ||
238 | |||
239 | res = kcalloc(2, sizeof(struct resource), GFP_KERNEL); | ||
240 | BUG_ON(!res); | ||
241 | |||
242 | /* | ||
243 | * Temporarily save the prom_bussoft_ptr for use by sn_bus_fixup(). | ||
244 | * (platform_data will be overwritten later in sn_common_bus_fixup()) | ||
245 | */ | ||
246 | controller->platform_data = prom_bussoft_ptr; | ||
247 | |||
248 | sn_legacy_pci_window_fixup(res, | ||
249 | prom_bussoft_ptr->bs_legacy_io, | ||
250 | prom_bussoft_ptr->bs_legacy_mem); | ||
251 | pci_add_resource_offset(&resources, &res[0], | ||
252 | prom_bussoft_ptr->bs_legacy_io); | ||
253 | pci_add_resource_offset(&resources, &res[1], | ||
254 | prom_bussoft_ptr->bs_legacy_mem); | ||
255 | |||
256 | bus = pci_scan_root_bus(NULL, busnum, &pci_root_ops, controller, | ||
257 | &resources); | ||
258 | if (bus == NULL) { | ||
259 | kfree(res); | ||
260 | kfree(controller); | ||
261 | return; | ||
262 | } | ||
263 | pci_bus_add_devices(bus); | ||
264 | } | ||
265 | |||
266 | /* | ||
267 | * sn_bus_fixup | ||
268 | */ | ||
269 | void | ||
270 | sn_bus_fixup(struct pci_bus *bus) | ||
271 | { | ||
272 | struct pci_dev *pci_dev = NULL; | ||
273 | struct pcibus_bussoft *prom_bussoft_ptr; | ||
274 | |||
275 | if (!bus->parent) { /* If root bus */ | ||
276 | prom_bussoft_ptr = PCI_CONTROLLER(bus)->platform_data; | ||
277 | if (prom_bussoft_ptr == NULL) { | ||
278 | printk(KERN_ERR | ||
279 | "sn_bus_fixup: 0x%04x:0x%02x Unable to " | ||
280 | "obtain prom_bussoft_ptr\n", | ||
281 | pci_domain_nr(bus), bus->number); | ||
282 | return; | ||
283 | } | ||
284 | sn_common_bus_fixup(bus, prom_bussoft_ptr); | ||
285 | } | ||
286 | list_for_each_entry(pci_dev, &bus->devices, bus_list) { | ||
287 | sn_io_slot_fixup(pci_dev); | ||
288 | } | ||
289 | |||
290 | } | ||
291 | |||
292 | /* | ||
293 | * sn_io_init - PROM does not have ACPI support to define nodes or root buses, | ||
294 | * so we need to do things the hard way, including initiating the | ||
295 | * bus scanning ourselves. | ||
296 | */ | ||
297 | |||
298 | void __init sn_io_init(void) | ||
299 | { | ||
300 | int i, j; | ||
301 | |||
302 | sn_fixup_ionodes(); | ||
303 | |||
304 | /* busses are not known yet ... */ | ||
305 | for (i = 0; i <= max_segment_number; i++) | ||
306 | for (j = 0; j <= max_pcibus_number; j++) | ||
307 | sn_pci_controller_fixup(i, j, NULL); | ||
308 | } | ||
diff --git a/arch/ia64/sn/kernel/iomv.c b/arch/ia64/sn/kernel/iomv.c deleted file mode 100644 index 2b22a71663c1..000000000000 --- a/arch/ia64/sn/kernel/iomv.c +++ /dev/null | |||
@@ -1,82 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2000-2003, 2006 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #include <linux/module.h> | ||
10 | #include <linux/acpi.h> | ||
11 | #include <asm/io.h> | ||
12 | #include <asm/delay.h> | ||
13 | #include <asm/vga.h> | ||
14 | #include <asm/sn/nodepda.h> | ||
15 | #include <asm/sn/simulator.h> | ||
16 | #include <asm/sn/pda.h> | ||
17 | #include <asm/sn/sn_cpuid.h> | ||
18 | #include <asm/sn/shub_mmr.h> | ||
19 | #include <asm/sn/acpi.h> | ||
20 | |||
21 | #define IS_LEGACY_VGA_IOPORT(p) \ | ||
22 | (((p) >= 0x3b0 && (p) <= 0x3bb) || ((p) >= 0x3c0 && (p) <= 0x3df)) | ||
23 | |||
24 | /** | ||
25 | * sn_io_addr - convert an in/out port to an i/o address | ||
26 | * @port: port to convert | ||
27 | * | ||
28 | * Legacy in/out instructions are converted to ld/st instructions | ||
29 | * on IA64. This routine will convert a port number into a valid | ||
30 | * SN i/o address. Used by sn_in*() and sn_out*(). | ||
31 | */ | ||
32 | |||
33 | void *sn_io_addr(unsigned long port) | ||
34 | { | ||
35 | if (!IS_RUNNING_ON_SIMULATOR()) { | ||
36 | if (IS_LEGACY_VGA_IOPORT(port)) | ||
37 | return (__ia64_mk_io_addr(port)); | ||
38 | /* On sn2, legacy I/O ports don't point at anything */ | ||
39 | if (port < (64 * 1024)) | ||
40 | return NULL; | ||
41 | if (SN_ACPI_BASE_SUPPORT()) | ||
42 | return (__ia64_mk_io_addr(port)); | ||
43 | else | ||
44 | return ((void *)(port | __IA64_UNCACHED_OFFSET)); | ||
45 | } else { | ||
46 | /* but the simulator uses them... */ | ||
47 | unsigned long addr; | ||
48 | |||
49 | /* | ||
50 | * word align port, but need more than 10 bits | ||
51 | * for accessing registers in bedrock local block | ||
52 | * (so we don't do port&0xfff) | ||
53 | */ | ||
54 | addr = (is_shub2() ? 0xc00000028c000000UL : 0xc0000087cc000000UL) | ((port >> 2) << 12); | ||
55 | if ((port >= 0x1f0 && port <= 0x1f7) || port == 0x3f6 || port == 0x3f7) | ||
56 | addr |= port; | ||
57 | return (void *)addr; | ||
58 | } | ||
59 | } | ||
60 | |||
61 | EXPORT_SYMBOL(sn_io_addr); | ||
62 | |||
63 | /** | ||
64 | * __sn_mmiowb - I/O space memory barrier | ||
65 | * | ||
66 | * See arch/ia64/include/asm/io.h and Documentation/driver-api/device-io.rst | ||
67 | * for details. | ||
68 | * | ||
69 | * On SN2, we wait for the PIO_WRITE_STATUS SHub register to clear. | ||
70 | * See PV 871084 for details about the WAR about zero value. | ||
71 | * | ||
72 | */ | ||
73 | void __sn_mmiowb(void) | ||
74 | { | ||
75 | volatile unsigned long *adr = pda->pio_write_status_addr; | ||
76 | unsigned long val = pda->pio_write_status_val; | ||
77 | |||
78 | while ((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != val) | ||
79 | cpu_relax(); | ||
80 | } | ||
81 | |||
82 | EXPORT_SYMBOL(__sn_mmiowb); | ||
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c deleted file mode 100644 index d9b576df4f82..000000000000 --- a/arch/ia64/sn/kernel/irq.c +++ /dev/null | |||
@@ -1,489 +0,0 @@ | |||
1 | /* | ||
2 | * Platform dependent support for SGI SN | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (c) 2000-2008 Silicon Graphics, Inc. All Rights Reserved. | ||
9 | */ | ||
10 | |||
11 | #include <linux/irq.h> | ||
12 | #include <linux/spinlock.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/rculist.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <asm/sn/addrs.h> | ||
17 | #include <asm/sn/arch.h> | ||
18 | #include <asm/sn/intr.h> | ||
19 | #include <asm/sn/pcibr_provider.h> | ||
20 | #include <asm/sn/pcibus_provider_defs.h> | ||
21 | #include <asm/sn/pcidev.h> | ||
22 | #include <asm/sn/shub_mmr.h> | ||
23 | #include <asm/sn/sn_sal.h> | ||
24 | #include <asm/sn/sn_feature_sets.h> | ||
25 | |||
26 | static void register_intr_pda(struct sn_irq_info *sn_irq_info); | ||
27 | static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); | ||
28 | |||
29 | extern int sn_ioif_inited; | ||
30 | struct list_head **sn_irq_lh; | ||
31 | static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */ | ||
32 | |||
33 | u64 sn_intr_alloc(nasid_t local_nasid, int local_widget, | ||
34 | struct sn_irq_info *sn_irq_info, | ||
35 | int req_irq, nasid_t req_nasid, | ||
36 | int req_slice) | ||
37 | { | ||
38 | struct ia64_sal_retval ret_stuff; | ||
39 | ret_stuff.status = 0; | ||
40 | ret_stuff.v0 = 0; | ||
41 | |||
42 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, | ||
43 | (u64) SAL_INTR_ALLOC, (u64) local_nasid, | ||
44 | (u64) local_widget, __pa(sn_irq_info), (u64) req_irq, | ||
45 | (u64) req_nasid, (u64) req_slice); | ||
46 | |||
47 | return ret_stuff.status; | ||
48 | } | ||
49 | |||
50 | void sn_intr_free(nasid_t local_nasid, int local_widget, | ||
51 | struct sn_irq_info *sn_irq_info) | ||
52 | { | ||
53 | struct ia64_sal_retval ret_stuff; | ||
54 | ret_stuff.status = 0; | ||
55 | ret_stuff.v0 = 0; | ||
56 | |||
57 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, | ||
58 | (u64) SAL_INTR_FREE, (u64) local_nasid, | ||
59 | (u64) local_widget, (u64) sn_irq_info->irq_irq, | ||
60 | (u64) sn_irq_info->irq_cookie, 0, 0); | ||
61 | } | ||
62 | |||
63 | u64 sn_intr_redirect(nasid_t local_nasid, int local_widget, | ||
64 | struct sn_irq_info *sn_irq_info, | ||
65 | nasid_t req_nasid, int req_slice) | ||
66 | { | ||
67 | struct ia64_sal_retval ret_stuff; | ||
68 | ret_stuff.status = 0; | ||
69 | ret_stuff.v0 = 0; | ||
70 | |||
71 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, | ||
72 | (u64) SAL_INTR_REDIRECT, (u64) local_nasid, | ||
73 | (u64) local_widget, __pa(sn_irq_info), | ||
74 | (u64) req_nasid, (u64) req_slice, 0); | ||
75 | |||
76 | return ret_stuff.status; | ||
77 | } | ||
78 | |||
79 | static unsigned int sn_startup_irq(struct irq_data *data) | ||
80 | { | ||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | static void sn_shutdown_irq(struct irq_data *data) | ||
85 | { | ||
86 | } | ||
87 | |||
88 | extern void ia64_mca_register_cpev(int); | ||
89 | |||
90 | static void sn_disable_irq(struct irq_data *data) | ||
91 | { | ||
92 | if (data->irq == local_vector_to_irq(IA64_CPE_VECTOR)) | ||
93 | ia64_mca_register_cpev(0); | ||
94 | } | ||
95 | |||
96 | static void sn_enable_irq(struct irq_data *data) | ||
97 | { | ||
98 | if (data->irq == local_vector_to_irq(IA64_CPE_VECTOR)) | ||
99 | ia64_mca_register_cpev(data->irq); | ||
100 | } | ||
101 | |||
102 | static void sn_ack_irq(struct irq_data *data) | ||
103 | { | ||
104 | u64 event_occurred, mask; | ||
105 | unsigned int irq = data->irq & 0xff; | ||
106 | |||
107 | event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)); | ||
108 | mask = event_occurred & SH_ALL_INT_MASK; | ||
109 | HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask); | ||
110 | __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs); | ||
111 | |||
112 | irq_move_irq(data); | ||
113 | } | ||
114 | |||
115 | struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, | ||
116 | nasid_t nasid, int slice) | ||
117 | { | ||
118 | int vector; | ||
119 | int cpuid; | ||
120 | #ifdef CONFIG_SMP | ||
121 | int cpuphys; | ||
122 | #endif | ||
123 | int64_t bridge; | ||
124 | int local_widget, status; | ||
125 | nasid_t local_nasid; | ||
126 | struct sn_irq_info *new_irq_info; | ||
127 | struct sn_pcibus_provider *pci_provider; | ||
128 | |||
129 | bridge = (u64) sn_irq_info->irq_bridge; | ||
130 | if (!bridge) { | ||
131 | return NULL; /* irq is not a device interrupt */ | ||
132 | } | ||
133 | |||
134 | local_nasid = NASID_GET(bridge); | ||
135 | |||
136 | if (local_nasid & 1) | ||
137 | local_widget = TIO_SWIN_WIDGETNUM(bridge); | ||
138 | else | ||
139 | local_widget = SWIN_WIDGETNUM(bridge); | ||
140 | vector = sn_irq_info->irq_irq; | ||
141 | |||
142 | /* Make use of SAL_INTR_REDIRECT if PROM supports it */ | ||
143 | status = sn_intr_redirect(local_nasid, local_widget, sn_irq_info, nasid, slice); | ||
144 | if (!status) { | ||
145 | new_irq_info = sn_irq_info; | ||
146 | goto finish_up; | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * PROM does not support SAL_INTR_REDIRECT, or it failed. | ||
151 | * Revert to old method. | ||
152 | */ | ||
153 | new_irq_info = kmemdup(sn_irq_info, sizeof(struct sn_irq_info), | ||
154 | GFP_ATOMIC); | ||
155 | if (new_irq_info == NULL) | ||
156 | return NULL; | ||
157 | |||
158 | /* Free the old PROM new_irq_info structure */ | ||
159 | sn_intr_free(local_nasid, local_widget, new_irq_info); | ||
160 | unregister_intr_pda(new_irq_info); | ||
161 | |||
162 | /* allocate a new PROM new_irq_info struct */ | ||
163 | status = sn_intr_alloc(local_nasid, local_widget, | ||
164 | new_irq_info, vector, | ||
165 | nasid, slice); | ||
166 | |||
167 | /* SAL call failed */ | ||
168 | if (status) { | ||
169 | kfree(new_irq_info); | ||
170 | return NULL; | ||
171 | } | ||
172 | |||
173 | register_intr_pda(new_irq_info); | ||
174 | spin_lock(&sn_irq_info_lock); | ||
175 | list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); | ||
176 | spin_unlock(&sn_irq_info_lock); | ||
177 | kfree_rcu(sn_irq_info, rcu); | ||
178 | |||
179 | |||
180 | finish_up: | ||
181 | /* Update kernels new_irq_info with new target info */ | ||
182 | cpuid = nasid_slice_to_cpuid(new_irq_info->irq_nasid, | ||
183 | new_irq_info->irq_slice); | ||
184 | new_irq_info->irq_cpuid = cpuid; | ||
185 | |||
186 | pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type]; | ||
187 | |||
188 | /* | ||
189 | * If this represents a line interrupt, target it. If it's | ||
190 | * an msi (irq_int_bit < 0), it's already targeted. | ||
191 | */ | ||
192 | if (new_irq_info->irq_int_bit >= 0 && | ||
193 | pci_provider && pci_provider->target_interrupt) | ||
194 | (pci_provider->target_interrupt)(new_irq_info); | ||
195 | |||
196 | #ifdef CONFIG_SMP | ||
197 | cpuphys = cpu_physical_id(cpuid); | ||
198 | set_irq_affinity_info((vector & 0xff), cpuphys, 0); | ||
199 | #endif | ||
200 | |||
201 | return new_irq_info; | ||
202 | } | ||
203 | |||
204 | static int sn_set_affinity_irq(struct irq_data *data, | ||
205 | const struct cpumask *mask, bool force) | ||
206 | { | ||
207 | struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; | ||
208 | unsigned int irq = data->irq; | ||
209 | nasid_t nasid; | ||
210 | int slice; | ||
211 | |||
212 | nasid = cpuid_to_nasid(cpumask_first_and(mask, cpu_online_mask)); | ||
213 | slice = cpuid_to_slice(cpumask_first_and(mask, cpu_online_mask)); | ||
214 | |||
215 | list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, | ||
216 | sn_irq_lh[irq], list) | ||
217 | (void)sn_retarget_vector(sn_irq_info, nasid, slice); | ||
218 | |||
219 | return 0; | ||
220 | } | ||
221 | |||
222 | #ifdef CONFIG_SMP | ||
223 | void sn_set_err_irq_affinity(unsigned int irq) | ||
224 | { | ||
225 | /* | ||
226 | * On systems which support CPU disabling (SHub2), all error interrupts | ||
227 | * are targeted at the boot CPU. | ||
228 | */ | ||
229 | if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT)) | ||
230 | set_irq_affinity_info(irq, cpu_physical_id(0), 0); | ||
231 | } | ||
232 | #else | ||
233 | void sn_set_err_irq_affinity(unsigned int irq) { } | ||
234 | #endif | ||
235 | |||
236 | static void | ||
237 | sn_mask_irq(struct irq_data *data) | ||
238 | { | ||
239 | } | ||
240 | |||
241 | static void | ||
242 | sn_unmask_irq(struct irq_data *data) | ||
243 | { | ||
244 | } | ||
245 | |||
246 | struct irq_chip irq_type_sn = { | ||
247 | .name = "SN hub", | ||
248 | .irq_startup = sn_startup_irq, | ||
249 | .irq_shutdown = sn_shutdown_irq, | ||
250 | .irq_enable = sn_enable_irq, | ||
251 | .irq_disable = sn_disable_irq, | ||
252 | .irq_ack = sn_ack_irq, | ||
253 | .irq_mask = sn_mask_irq, | ||
254 | .irq_unmask = sn_unmask_irq, | ||
255 | .irq_set_affinity = sn_set_affinity_irq | ||
256 | }; | ||
257 | |||
258 | ia64_vector sn_irq_to_vector(int irq) | ||
259 | { | ||
260 | if (irq >= IA64_NUM_VECTORS) | ||
261 | return 0; | ||
262 | return (ia64_vector)irq; | ||
263 | } | ||
264 | |||
265 | unsigned int sn_local_vector_to_irq(u8 vector) | ||
266 | { | ||
267 | return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector)); | ||
268 | } | ||
269 | |||
270 | void sn_irq_init(void) | ||
271 | { | ||
272 | int i; | ||
273 | |||
274 | ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR; | ||
275 | ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR; | ||
276 | |||
277 | for (i = 0; i < NR_IRQS; i++) { | ||
278 | if (irq_get_chip(i) == &no_irq_chip) | ||
279 | irq_set_chip(i, &irq_type_sn); | ||
280 | } | ||
281 | } | ||
282 | |||
283 | static void register_intr_pda(struct sn_irq_info *sn_irq_info) | ||
284 | { | ||
285 | int irq = sn_irq_info->irq_irq; | ||
286 | int cpu = sn_irq_info->irq_cpuid; | ||
287 | |||
288 | if (pdacpu(cpu)->sn_last_irq < irq) { | ||
289 | pdacpu(cpu)->sn_last_irq = irq; | ||
290 | } | ||
291 | |||
292 | if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) | ||
293 | pdacpu(cpu)->sn_first_irq = irq; | ||
294 | } | ||
295 | |||
296 | static void unregister_intr_pda(struct sn_irq_info *sn_irq_info) | ||
297 | { | ||
298 | int irq = sn_irq_info->irq_irq; | ||
299 | int cpu = sn_irq_info->irq_cpuid; | ||
300 | struct sn_irq_info *tmp_irq_info; | ||
301 | int i, foundmatch; | ||
302 | |||
303 | rcu_read_lock(); | ||
304 | if (pdacpu(cpu)->sn_last_irq == irq) { | ||
305 | foundmatch = 0; | ||
306 | for (i = pdacpu(cpu)->sn_last_irq - 1; | ||
307 | i && !foundmatch; i--) { | ||
308 | list_for_each_entry_rcu(tmp_irq_info, | ||
309 | sn_irq_lh[i], | ||
310 | list) { | ||
311 | if (tmp_irq_info->irq_cpuid == cpu) { | ||
312 | foundmatch = 1; | ||
313 | break; | ||
314 | } | ||
315 | } | ||
316 | } | ||
317 | pdacpu(cpu)->sn_last_irq = i; | ||
318 | } | ||
319 | |||
320 | if (pdacpu(cpu)->sn_first_irq == irq) { | ||
321 | foundmatch = 0; | ||
322 | for (i = pdacpu(cpu)->sn_first_irq + 1; | ||
323 | i < NR_IRQS && !foundmatch; i++) { | ||
324 | list_for_each_entry_rcu(tmp_irq_info, | ||
325 | sn_irq_lh[i], | ||
326 | list) { | ||
327 | if (tmp_irq_info->irq_cpuid == cpu) { | ||
328 | foundmatch = 1; | ||
329 | break; | ||
330 | } | ||
331 | } | ||
332 | } | ||
333 | pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i); | ||
334 | } | ||
335 | rcu_read_unlock(); | ||
336 | } | ||
337 | |||
338 | void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) | ||
339 | { | ||
340 | nasid_t nasid = sn_irq_info->irq_nasid; | ||
341 | int slice = sn_irq_info->irq_slice; | ||
342 | int cpu = nasid_slice_to_cpuid(nasid, slice); | ||
343 | #ifdef CONFIG_SMP | ||
344 | int cpuphys; | ||
345 | #endif | ||
346 | |||
347 | pci_dev_get(pci_dev); | ||
348 | sn_irq_info->irq_cpuid = cpu; | ||
349 | sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev); | ||
350 | |||
351 | /* link it into the sn_irq[irq] list */ | ||
352 | spin_lock(&sn_irq_info_lock); | ||
353 | list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]); | ||
354 | reserve_irq_vector(sn_irq_info->irq_irq); | ||
355 | if (sn_irq_info->irq_int_bit != -1) | ||
356 | irq_set_handler(sn_irq_info->irq_irq, handle_level_irq); | ||
357 | spin_unlock(&sn_irq_info_lock); | ||
358 | |||
359 | register_intr_pda(sn_irq_info); | ||
360 | #ifdef CONFIG_SMP | ||
361 | cpuphys = cpu_physical_id(cpu); | ||
362 | set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0); | ||
363 | /* | ||
364 | * Affinity was set by the PROM, prevent it from | ||
365 | * being reset by the request_irq() path. | ||
366 | */ | ||
367 | irqd_mark_affinity_was_set(irq_get_irq_data(sn_irq_info->irq_irq)); | ||
368 | #endif | ||
369 | } | ||
370 | |||
371 | void sn_irq_unfixup(struct pci_dev *pci_dev) | ||
372 | { | ||
373 | struct sn_irq_info *sn_irq_info; | ||
374 | |||
375 | /* Only cleanup IRQ stuff if this device has a host bus context */ | ||
376 | if (!SN_PCIDEV_BUSSOFT(pci_dev)) | ||
377 | return; | ||
378 | |||
379 | sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info; | ||
380 | if (!sn_irq_info) | ||
381 | return; | ||
382 | if (!sn_irq_info->irq_irq) { | ||
383 | kfree(sn_irq_info); | ||
384 | return; | ||
385 | } | ||
386 | |||
387 | unregister_intr_pda(sn_irq_info); | ||
388 | spin_lock(&sn_irq_info_lock); | ||
389 | list_del_rcu(&sn_irq_info->list); | ||
390 | spin_unlock(&sn_irq_info_lock); | ||
391 | if (list_empty(sn_irq_lh[sn_irq_info->irq_irq])) | ||
392 | free_irq_vector(sn_irq_info->irq_irq); | ||
393 | kfree_rcu(sn_irq_info, rcu); | ||
394 | pci_dev_put(pci_dev); | ||
395 | |||
396 | } | ||
397 | |||
398 | static inline void | ||
399 | sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info) | ||
400 | { | ||
401 | struct sn_pcibus_provider *pci_provider; | ||
402 | |||
403 | pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type]; | ||
404 | |||
405 | /* Don't force an interrupt if the irq has been disabled */ | ||
406 | if (!irqd_irq_disabled(irq_get_irq_data(sn_irq_info->irq_irq)) && | ||
407 | pci_provider && pci_provider->force_interrupt) | ||
408 | (*pci_provider->force_interrupt)(sn_irq_info); | ||
409 | } | ||
410 | |||
411 | /* | ||
412 | * Check for lost interrupts. If the PIC int_status reg. says that | ||
413 | * an interrupt has been sent, but not handled, and the interrupt | ||
414 | * is not pending in either the cpu irr regs or in the soft irr regs, | ||
415 | * and the interrupt is not in service, then the interrupt may have | ||
416 | * been lost. Force an interrupt on that pin. It is possible that | ||
417 | * the interrupt is in flight, so we may generate a spurious interrupt, | ||
418 | * but we should never miss a real lost interrupt. | ||
419 | */ | ||
420 | static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info) | ||
421 | { | ||
422 | u64 regval; | ||
423 | struct pcidev_info *pcidev_info; | ||
424 | struct pcibus_info *pcibus_info; | ||
425 | |||
426 | /* | ||
427 | * Bridge types attached to TIO (anything but PIC) do not need this WAR | ||
428 | * since they do not target Shub II interrupt registers. If that | ||
429 | * ever changes, this check needs to accommodate. | ||
430 | */ | ||
431 | if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC) | ||
432 | return; | ||
433 | |||
434 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; | ||
435 | if (!pcidev_info) | ||
436 | return; | ||
437 | |||
438 | pcibus_info = | ||
439 | (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info-> | ||
440 | pdi_pcibus_info; | ||
441 | regval = pcireg_intr_status_get(pcibus_info); | ||
442 | |||
443 | if (!ia64_get_irr(irq_to_vector(irq))) { | ||
444 | if (!test_bit(irq, pda->sn_in_service_ivecs)) { | ||
445 | regval &= 0xff; | ||
446 | if (sn_irq_info->irq_int_bit & regval & | ||
447 | sn_irq_info->irq_last_intr) { | ||
448 | regval &= ~(sn_irq_info->irq_int_bit & regval); | ||
449 | sn_call_force_intr_provider(sn_irq_info); | ||
450 | } | ||
451 | } | ||
452 | } | ||
453 | sn_irq_info->irq_last_intr = regval; | ||
454 | } | ||
455 | |||
456 | void sn_lb_int_war_check(void) | ||
457 | { | ||
458 | struct sn_irq_info *sn_irq_info; | ||
459 | int i; | ||
460 | |||
461 | if (!sn_ioif_inited || pda->sn_first_irq == 0) | ||
462 | return; | ||
463 | |||
464 | rcu_read_lock(); | ||
465 | for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) { | ||
466 | list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) { | ||
467 | sn_check_intr(i, sn_irq_info); | ||
468 | } | ||
469 | } | ||
470 | rcu_read_unlock(); | ||
471 | } | ||
472 | |||
473 | void __init sn_irq_lh_init(void) | ||
474 | { | ||
475 | int i; | ||
476 | |||
477 | sn_irq_lh = kmalloc_array(NR_IRQS, sizeof(struct list_head *), | ||
478 | GFP_KERNEL); | ||
479 | if (!sn_irq_lh) | ||
480 | panic("SN PCI INIT: Failed to allocate memory for PCI init\n"); | ||
481 | |||
482 | for (i = 0; i < NR_IRQS; i++) { | ||
483 | sn_irq_lh[i] = kmalloc(sizeof(struct list_head), GFP_KERNEL); | ||
484 | if (!sn_irq_lh[i]) | ||
485 | panic("SN PCI INIT: Failed IRQ memory allocation\n"); | ||
486 | |||
487 | INIT_LIST_HEAD(sn_irq_lh[i]); | ||
488 | } | ||
489 | } | ||
diff --git a/arch/ia64/sn/kernel/klconflib.c b/arch/ia64/sn/kernel/klconflib.c deleted file mode 100644 index 87682b48ef83..000000000000 --- a/arch/ia64/sn/kernel/klconflib.c +++ /dev/null | |||
@@ -1,107 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <linux/ctype.h> | ||
11 | #include <linux/string.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <asm/sn/types.h> | ||
14 | #include <asm/sn/module.h> | ||
15 | #include <asm/sn/l1.h> | ||
16 | |||
17 | char brick_types[MAX_BRICK_TYPES + 1] = "cri.xdpn%#=vo^kjbf890123456789..."; | ||
18 | /* | ||
19 | * Format a module id for printing. | ||
20 | * | ||
21 | * There are three possible formats: | ||
22 | * | ||
23 | * MODULE_FORMAT_BRIEF is the brief 6-character format, including | ||
24 | * the actual brick-type as recorded in the | ||
25 | * moduleid_t, eg. 002c15 for a C-brick, or | ||
26 | * 101#17 for a PX-brick. | ||
27 | * | ||
28 | * MODULE_FORMAT_LONG is the hwgraph format, eg. rack/002/bay/15 | ||
29 | * of rack/101/bay/17 (note that the brick | ||
30 | * type does not appear in this format). | ||
31 | * | ||
32 | * MODULE_FORMAT_LCD is like MODULE_FORMAT_BRIEF, except that it | ||
33 | * ensures that the module id provided appears | ||
34 | * exactly as it would on the LCD display of | ||
35 | * the corresponding brick, eg. still 002c15 | ||
36 | * for a C-brick, but 101p17 for a PX-brick. | ||
37 | * | ||
38 | * maule (9/13/04): Removed top-level check for (fmt == MODULE_FORMAT_LCD) | ||
39 | * making MODULE_FORMAT_LCD equivalent to MODULE_FORMAT_BRIEF. It was | ||
40 | * decided that all callers should assume the returned string should be what | ||
41 | * is displayed on the brick L1 LCD. | ||
42 | */ | ||
43 | void | ||
44 | format_module_id(char *buffer, moduleid_t m, int fmt) | ||
45 | { | ||
46 | int rack, position; | ||
47 | unsigned char brickchar; | ||
48 | |||
49 | rack = MODULE_GET_RACK(m); | ||
50 | brickchar = MODULE_GET_BTCHAR(m); | ||
51 | |||
52 | /* Be sure we use the same brick type character as displayed | ||
53 | * on the brick's LCD | ||
54 | */ | ||
55 | switch (brickchar) | ||
56 | { | ||
57 | case L1_BRICKTYPE_GA: | ||
58 | case L1_BRICKTYPE_OPUS_TIO: | ||
59 | brickchar = L1_BRICKTYPE_C; | ||
60 | break; | ||
61 | |||
62 | case L1_BRICKTYPE_PX: | ||
63 | case L1_BRICKTYPE_PE: | ||
64 | case L1_BRICKTYPE_PA: | ||
65 | case L1_BRICKTYPE_SA: /* we can move this to the "I's" later | ||
66 | * if that makes more sense | ||
67 | */ | ||
68 | brickchar = L1_BRICKTYPE_P; | ||
69 | break; | ||
70 | |||
71 | case L1_BRICKTYPE_IX: | ||
72 | case L1_BRICKTYPE_IA: | ||
73 | |||
74 | brickchar = L1_BRICKTYPE_I; | ||
75 | break; | ||
76 | } | ||
77 | |||
78 | position = MODULE_GET_BPOS(m); | ||
79 | |||
80 | if ((fmt == MODULE_FORMAT_BRIEF) || (fmt == MODULE_FORMAT_LCD)) { | ||
81 | /* Brief module number format, eg. 002c15 */ | ||
82 | |||
83 | /* Decompress the rack number */ | ||
84 | *buffer++ = '0' + RACK_GET_CLASS(rack); | ||
85 | *buffer++ = '0' + RACK_GET_GROUP(rack); | ||
86 | *buffer++ = '0' + RACK_GET_NUM(rack); | ||
87 | |||
88 | /* Add the brick type */ | ||
89 | *buffer++ = brickchar; | ||
90 | } | ||
91 | else if (fmt == MODULE_FORMAT_LONG) { | ||
92 | /* Fuller hwgraph format, eg. rack/002/bay/15 */ | ||
93 | |||
94 | strcpy(buffer, "rack" "/"); buffer += strlen(buffer); | ||
95 | |||
96 | *buffer++ = '0' + RACK_GET_CLASS(rack); | ||
97 | *buffer++ = '0' + RACK_GET_GROUP(rack); | ||
98 | *buffer++ = '0' + RACK_GET_NUM(rack); | ||
99 | |||
100 | strcpy(buffer, "/" "bay" "/"); buffer += strlen(buffer); | ||
101 | } | ||
102 | |||
103 | /* Add the bay position, using at least two digits */ | ||
104 | if (position < 10) | ||
105 | *buffer++ = '0'; | ||
106 | sprintf(buffer, "%d", position); | ||
107 | } | ||
diff --git a/arch/ia64/sn/kernel/machvec.c b/arch/ia64/sn/kernel/machvec.c deleted file mode 100644 index 02bb9155840c..000000000000 --- a/arch/ia64/sn/kernel/machvec.c +++ /dev/null | |||
@@ -1,11 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | */ | ||
8 | |||
9 | #define MACHVEC_PLATFORM_NAME sn2 | ||
10 | #define MACHVEC_PLATFORM_HEADER <asm/machvec_sn2.h> | ||
11 | #include <asm/machvec_init.h> | ||
diff --git a/arch/ia64/sn/kernel/mca.c b/arch/ia64/sn/kernel/mca.c deleted file mode 100644 index bc3bd930c74c..000000000000 --- a/arch/ia64/sn/kernel/mca.c +++ /dev/null | |||
@@ -1,144 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | */ | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/timer.h> | ||
12 | #include <linux/vmalloc.h> | ||
13 | #include <linux/mutex.h> | ||
14 | #include <asm/mca.h> | ||
15 | #include <asm/sal.h> | ||
16 | #include <asm/sn/sn_sal.h> | ||
17 | |||
18 | /* | ||
19 | * Interval for calling SAL to poll for errors that do NOT cause error | ||
20 | * interrupts. SAL will raise a CPEI if any errors are present that | ||
21 | * need to be logged. | ||
22 | */ | ||
23 | #define CPEI_INTERVAL (5*HZ) | ||
24 | |||
25 | struct timer_list sn_cpei_timer; | ||
26 | void sn_init_cpei_timer(void); | ||
27 | |||
28 | /* Printing oemdata from mca uses data that is not passed through SAL, it is | ||
29 | * global. Only one user at a time. | ||
30 | */ | ||
31 | static DEFINE_MUTEX(sn_oemdata_mutex); | ||
32 | static u8 **sn_oemdata; | ||
33 | static u64 *sn_oemdata_size, sn_oemdata_bufsize; | ||
34 | |||
35 | /* | ||
36 | * print_hook | ||
37 | * | ||
38 | * This function is the callback routine that SAL calls to log error | ||
39 | * info for platform errors. buf is appended to sn_oemdata, resizing as | ||
40 | * required. | ||
41 | * Note: this is a SAL to OS callback, running under the same rules as the SAL | ||
42 | * code. SAL calls are run with preempt disabled so this routine must not | ||
43 | * sleep. vmalloc can sleep so print_hook cannot resize the output buffer | ||
44 | * itself, instead it must set the required size and return to let the caller | ||
45 | * resize the buffer then redrive the SAL call. | ||
46 | */ | ||
47 | static int print_hook(const char *fmt, ...) | ||
48 | { | ||
49 | char buf[400]; | ||
50 | int len; | ||
51 | va_list args; | ||
52 | va_start(args, fmt); | ||
53 | vsnprintf(buf, sizeof(buf), fmt, args); | ||
54 | va_end(args); | ||
55 | len = strlen(buf); | ||
56 | if (*sn_oemdata_size + len <= sn_oemdata_bufsize) | ||
57 | memcpy(*sn_oemdata + *sn_oemdata_size, buf, len); | ||
58 | *sn_oemdata_size += len; | ||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | static void sn_cpei_handler(int irq, void *devid, struct pt_regs *regs) | ||
63 | { | ||
64 | /* | ||
65 | * this function's sole purpose is to call SAL when we receive | ||
66 | * a CE interrupt from SHUB or when the timer routine decides | ||
67 | * we need to call SAL to check for CEs. | ||
68 | */ | ||
69 | |||
70 | /* CALL SAL_LOG_CE */ | ||
71 | |||
72 | ia64_sn_plat_cpei_handler(); | ||
73 | } | ||
74 | |||
75 | static void sn_cpei_timer_handler(struct timer_list *unused) | ||
76 | { | ||
77 | sn_cpei_handler(-1, NULL, NULL); | ||
78 | mod_timer(&sn_cpei_timer, jiffies + CPEI_INTERVAL); | ||
79 | } | ||
80 | |||
81 | void sn_init_cpei_timer(void) | ||
82 | { | ||
83 | timer_setup(&sn_cpei_timer, sn_cpei_timer_handler, 0); | ||
84 | sn_cpei_timer.expires = jiffies + CPEI_INTERVAL; | ||
85 | add_timer(&sn_cpei_timer); | ||
86 | } | ||
87 | |||
88 | static int | ||
89 | sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata, | ||
90 | u64 * oemdata_size) | ||
91 | { | ||
92 | mutex_lock(&sn_oemdata_mutex); | ||
93 | sn_oemdata = oemdata; | ||
94 | sn_oemdata_size = oemdata_size; | ||
95 | sn_oemdata_bufsize = 0; | ||
96 | *sn_oemdata_size = PAGE_SIZE; /* first guess at how much data will be generated */ | ||
97 | while (*sn_oemdata_size > sn_oemdata_bufsize) { | ||
98 | u8 *newbuf = vmalloc(*sn_oemdata_size); | ||
99 | if (!newbuf) { | ||
100 | mutex_unlock(&sn_oemdata_mutex); | ||
101 | printk(KERN_ERR "%s: unable to extend sn_oemdata\n", | ||
102 | __func__); | ||
103 | return 1; | ||
104 | } | ||
105 | vfree(*sn_oemdata); | ||
106 | *sn_oemdata = newbuf; | ||
107 | sn_oemdata_bufsize = *sn_oemdata_size; | ||
108 | *sn_oemdata_size = 0; | ||
109 | ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header); | ||
110 | } | ||
111 | mutex_unlock(&sn_oemdata_mutex); | ||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | /* Callback when userspace salinfo wants to decode oem data via the platform | ||
116 | * kernel and/or prom. | ||
117 | */ | ||
118 | int sn_salinfo_platform_oemdata(const u8 *sect_header, u8 **oemdata, u64 *oemdata_size) | ||
119 | { | ||
120 | efi_guid_t guid = *(efi_guid_t *)sect_header; | ||
121 | int valid = 0; | ||
122 | *oemdata_size = 0; | ||
123 | vfree(*oemdata); | ||
124 | *oemdata = NULL; | ||
125 | if (efi_guidcmp(guid, SAL_PLAT_SPECIFIC_ERR_SECT_GUID) == 0) { | ||
126 | sal_log_plat_specific_err_info_t *psei = (sal_log_plat_specific_err_info_t *)sect_header; | ||
127 | valid = psei->valid.oem_data; | ||
128 | } else if (efi_guidcmp(guid, SAL_PLAT_MEM_DEV_ERR_SECT_GUID) == 0) { | ||
129 | sal_log_mem_dev_err_info_t *mdei = (sal_log_mem_dev_err_info_t *)sect_header; | ||
130 | valid = mdei->valid.oem_data; | ||
131 | } | ||
132 | if (valid) | ||
133 | return sn_platform_plat_specific_err_print(sect_header, oemdata, oemdata_size); | ||
134 | else | ||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | static int __init sn_salinfo_init(void) | ||
139 | { | ||
140 | if (ia64_platform_is("sn2")) | ||
141 | salinfo_platform_oemdata = &sn_salinfo_platform_oemdata; | ||
142 | return 0; | ||
143 | } | ||
144 | device_initcall(sn_salinfo_init); | ||
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c deleted file mode 100644 index fb25065b22c6..000000000000 --- a/arch/ia64/sn/kernel/msi_sn.c +++ /dev/null | |||
@@ -1,238 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2006 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | */ | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <linux/irq.h> | ||
11 | #include <linux/pci.h> | ||
12 | #include <linux/cpumask.h> | ||
13 | #include <linux/msi.h> | ||
14 | #include <linux/slab.h> | ||
15 | |||
16 | #include <asm/sn/addrs.h> | ||
17 | #include <asm/sn/intr.h> | ||
18 | #include <asm/sn/pcibus_provider_defs.h> | ||
19 | #include <asm/sn/pcidev.h> | ||
20 | #include <asm/sn/nodepda.h> | ||
21 | |||
22 | struct sn_msi_info { | ||
23 | u64 pci_addr; | ||
24 | struct sn_irq_info *sn_irq_info; | ||
25 | }; | ||
26 | |||
27 | static struct sn_msi_info sn_msi_info[NR_IRQS]; | ||
28 | |||
29 | static struct irq_chip sn_msi_chip; | ||
30 | |||
31 | void sn_teardown_msi_irq(unsigned int irq) | ||
32 | { | ||
33 | nasid_t nasid; | ||
34 | int widget; | ||
35 | struct pci_dev *pdev; | ||
36 | struct pcidev_info *sn_pdev; | ||
37 | struct sn_irq_info *sn_irq_info; | ||
38 | struct pcibus_bussoft *bussoft; | ||
39 | struct sn_pcibus_provider *provider; | ||
40 | |||
41 | sn_irq_info = sn_msi_info[irq].sn_irq_info; | ||
42 | if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) | ||
43 | return; | ||
44 | |||
45 | sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; | ||
46 | pdev = sn_pdev->pdi_linux_pcidev; | ||
47 | provider = SN_PCIDEV_BUSPROVIDER(pdev); | ||
48 | |||
49 | (*provider->dma_unmap)(pdev, | ||
50 | sn_msi_info[irq].pci_addr, | ||
51 | PCI_DMA_FROMDEVICE); | ||
52 | sn_msi_info[irq].pci_addr = 0; | ||
53 | |||
54 | bussoft = SN_PCIDEV_BUSSOFT(pdev); | ||
55 | nasid = NASID_GET(bussoft->bs_base); | ||
56 | widget = (nasid & 1) ? | ||
57 | TIO_SWIN_WIDGETNUM(bussoft->bs_base) : | ||
58 | SWIN_WIDGETNUM(bussoft->bs_base); | ||
59 | |||
60 | sn_intr_free(nasid, widget, sn_irq_info); | ||
61 | sn_msi_info[irq].sn_irq_info = NULL; | ||
62 | |||
63 | destroy_irq(irq); | ||
64 | } | ||
65 | |||
66 | int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry) | ||
67 | { | ||
68 | struct msi_msg msg; | ||
69 | int widget; | ||
70 | int status; | ||
71 | nasid_t nasid; | ||
72 | u64 bus_addr; | ||
73 | struct sn_irq_info *sn_irq_info; | ||
74 | struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev); | ||
75 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | ||
76 | int irq; | ||
77 | |||
78 | if (!entry->msi_attrib.is_64) | ||
79 | return -EINVAL; | ||
80 | |||
81 | if (bussoft == NULL) | ||
82 | return -EINVAL; | ||
83 | |||
84 | if (provider == NULL || provider->dma_map_consistent == NULL) | ||
85 | return -EINVAL; | ||
86 | |||
87 | irq = create_irq(); | ||
88 | if (irq < 0) | ||
89 | return irq; | ||
90 | |||
91 | /* | ||
92 | * Set up the vector plumbing. Let the prom (via sn_intr_alloc) | ||
93 | * decide which cpu to direct this msi at by default. | ||
94 | */ | ||
95 | |||
96 | nasid = NASID_GET(bussoft->bs_base); | ||
97 | widget = (nasid & 1) ? | ||
98 | TIO_SWIN_WIDGETNUM(bussoft->bs_base) : | ||
99 | SWIN_WIDGETNUM(bussoft->bs_base); | ||
100 | |||
101 | sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL); | ||
102 | if (! sn_irq_info) { | ||
103 | destroy_irq(irq); | ||
104 | return -ENOMEM; | ||
105 | } | ||
106 | |||
107 | status = sn_intr_alloc(nasid, widget, sn_irq_info, irq, -1, -1); | ||
108 | if (status) { | ||
109 | kfree(sn_irq_info); | ||
110 | destroy_irq(irq); | ||
111 | return -ENOMEM; | ||
112 | } | ||
113 | |||
114 | sn_irq_info->irq_int_bit = -1; /* mark this as an MSI irq */ | ||
115 | sn_irq_fixup(pdev, sn_irq_info); | ||
116 | |||
117 | /* Prom probably should fill these in, but doesn't ... */ | ||
118 | sn_irq_info->irq_bridge_type = bussoft->bs_asic_type; | ||
119 | sn_irq_info->irq_bridge = (void *)bussoft->bs_base; | ||
120 | |||
121 | /* | ||
122 | * Map the xio address into bus space | ||
123 | */ | ||
124 | bus_addr = (*provider->dma_map_consistent)(pdev, | ||
125 | sn_irq_info->irq_xtalkaddr, | ||
126 | sizeof(sn_irq_info->irq_xtalkaddr), | ||
127 | SN_DMA_MSI|SN_DMA_ADDR_XIO); | ||
128 | if (! bus_addr) { | ||
129 | sn_intr_free(nasid, widget, sn_irq_info); | ||
130 | kfree(sn_irq_info); | ||
131 | destroy_irq(irq); | ||
132 | return -ENOMEM; | ||
133 | } | ||
134 | |||
135 | sn_msi_info[irq].sn_irq_info = sn_irq_info; | ||
136 | sn_msi_info[irq].pci_addr = bus_addr; | ||
137 | |||
138 | msg.address_hi = (u32)(bus_addr >> 32); | ||
139 | msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); | ||
140 | |||
141 | /* | ||
142 | * In the SN platform, bit 16 is a "send vector" bit which | ||
143 | * must be present in order to move the vector through the system. | ||
144 | */ | ||
145 | msg.data = 0x100 + irq; | ||
146 | |||
147 | irq_set_msi_desc(irq, entry); | ||
148 | pci_write_msi_msg(irq, &msg); | ||
149 | irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq); | ||
150 | |||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | #ifdef CONFIG_SMP | ||
155 | static int sn_set_msi_irq_affinity(struct irq_data *data, | ||
156 | const struct cpumask *cpu_mask, bool force) | ||
157 | { | ||
158 | struct msi_msg msg; | ||
159 | int slice; | ||
160 | nasid_t nasid; | ||
161 | u64 bus_addr; | ||
162 | struct pci_dev *pdev; | ||
163 | struct pcidev_info *sn_pdev; | ||
164 | struct sn_irq_info *sn_irq_info; | ||
165 | struct sn_irq_info *new_irq_info; | ||
166 | struct sn_pcibus_provider *provider; | ||
167 | unsigned int cpu, irq = data->irq; | ||
168 | |||
169 | cpu = cpumask_first_and(cpu_mask, cpu_online_mask); | ||
170 | sn_irq_info = sn_msi_info[irq].sn_irq_info; | ||
171 | if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) | ||
172 | return -1; | ||
173 | |||
174 | /* | ||
175 | * Release XIO resources for the old MSI PCI address | ||
176 | */ | ||
177 | |||
178 | __get_cached_msi_msg(irq_data_get_msi_desc(data), &msg); | ||
179 | sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; | ||
180 | pdev = sn_pdev->pdi_linux_pcidev; | ||
181 | provider = SN_PCIDEV_BUSPROVIDER(pdev); | ||
182 | |||
183 | bus_addr = (u64)(msg.address_hi) << 32 | (u64)(msg.address_lo); | ||
184 | (*provider->dma_unmap)(pdev, bus_addr, PCI_DMA_FROMDEVICE); | ||
185 | sn_msi_info[irq].pci_addr = 0; | ||
186 | |||
187 | nasid = cpuid_to_nasid(cpu); | ||
188 | slice = cpuid_to_slice(cpu); | ||
189 | |||
190 | new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice); | ||
191 | sn_msi_info[irq].sn_irq_info = new_irq_info; | ||
192 | if (new_irq_info == NULL) | ||
193 | return -1; | ||
194 | |||
195 | /* | ||
196 | * Map the xio address into bus space | ||
197 | */ | ||
198 | |||
199 | bus_addr = (*provider->dma_map_consistent)(pdev, | ||
200 | new_irq_info->irq_xtalkaddr, | ||
201 | sizeof(new_irq_info->irq_xtalkaddr), | ||
202 | SN_DMA_MSI|SN_DMA_ADDR_XIO); | ||
203 | |||
204 | sn_msi_info[irq].pci_addr = bus_addr; | ||
205 | msg.address_hi = (u32)(bus_addr >> 32); | ||
206 | msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); | ||
207 | |||
208 | pci_write_msi_msg(irq, &msg); | ||
209 | cpumask_copy(irq_data_get_affinity_mask(data), cpu_mask); | ||
210 | |||
211 | return 0; | ||
212 | } | ||
213 | #endif /* CONFIG_SMP */ | ||
214 | |||
215 | static void sn_ack_msi_irq(struct irq_data *data) | ||
216 | { | ||
217 | irq_move_irq(data); | ||
218 | ia64_eoi(); | ||
219 | } | ||
220 | |||
221 | static int sn_msi_retrigger_irq(struct irq_data *data) | ||
222 | { | ||
223 | unsigned int vector = data->irq; | ||
224 | ia64_resend_irq(vector); | ||
225 | |||
226 | return 1; | ||
227 | } | ||
228 | |||
229 | static struct irq_chip sn_msi_chip = { | ||
230 | .name = "PCI-MSI", | ||
231 | .irq_mask = pci_msi_mask_irq, | ||
232 | .irq_unmask = pci_msi_unmask_irq, | ||
233 | .irq_ack = sn_ack_msi_irq, | ||
234 | #ifdef CONFIG_SMP | ||
235 | .irq_set_affinity = sn_set_msi_irq_affinity, | ||
236 | #endif | ||
237 | .irq_retrigger = sn_msi_retrigger_irq, | ||
238 | }; | ||
diff --git a/arch/ia64/sn/kernel/pio_phys.S b/arch/ia64/sn/kernel/pio_phys.S deleted file mode 100644 index 3c7d48d6ecb8..000000000000 --- a/arch/ia64/sn/kernel/pio_phys.S +++ /dev/null | |||
@@ -1,71 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved. | ||
7 | * | ||
8 | * This file contains macros used to access MMR registers via | ||
9 | * uncached physical addresses. | ||
10 | * pio_phys_read_mmr - read an MMR | ||
11 | * pio_phys_write_mmr - write an MMR | ||
12 | * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0 | ||
13 | * Second MMR will be skipped if address is NULL | ||
14 | * | ||
15 | * Addresses passed to these routines should be uncached physical addresses | ||
16 | * ie., 0x80000.... | ||
17 | */ | ||
18 | |||
19 | |||
20 | |||
21 | #include <asm/asmmacro.h> | ||
22 | #include <asm/page.h> | ||
23 | |||
24 | GLOBAL_ENTRY(pio_phys_read_mmr) | ||
25 | .prologue | ||
26 | .regstk 1,0,0,0 | ||
27 | .body | ||
28 | mov r2=psr | ||
29 | rsm psr.i | psr.dt | ||
30 | ;; | ||
31 | srlz.d | ||
32 | ld8.acq r8=[r32] | ||
33 | ;; | ||
34 | mov psr.l=r2;; | ||
35 | srlz.d | ||
36 | br.ret.sptk.many rp | ||
37 | END(pio_phys_read_mmr) | ||
38 | |||
39 | GLOBAL_ENTRY(pio_phys_write_mmr) | ||
40 | .prologue | ||
41 | .regstk 2,0,0,0 | ||
42 | .body | ||
43 | mov r2=psr | ||
44 | rsm psr.i | psr.dt | ||
45 | ;; | ||
46 | srlz.d | ||
47 | st8.rel [r32]=r33 | ||
48 | ;; | ||
49 | mov psr.l=r2;; | ||
50 | srlz.d | ||
51 | br.ret.sptk.many rp | ||
52 | END(pio_phys_write_mmr) | ||
53 | |||
54 | GLOBAL_ENTRY(pio_atomic_phys_write_mmrs) | ||
55 | .prologue | ||
56 | .regstk 4,0,0,0 | ||
57 | .body | ||
58 | mov r2=psr | ||
59 | cmp.ne p9,p0=r34,r0; | ||
60 | rsm psr.i | psr.dt | psr.ic | ||
61 | ;; | ||
62 | srlz.d | ||
63 | st8.rel [r32]=r33 | ||
64 | (p9) st8.rel [r34]=r35 | ||
65 | ;; | ||
66 | mov psr.l=r2;; | ||
67 | srlz.d | ||
68 | br.ret.sptk.many rp | ||
69 | END(pio_atomic_phys_write_mmrs) | ||
70 | |||
71 | |||
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c deleted file mode 100644 index e6a5049ef503..000000000000 --- a/arch/ia64/sn/kernel/setup.c +++ /dev/null | |||
@@ -1,786 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1999,2001-2006 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #include <linux/module.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/delay.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/kdev_t.h> | ||
14 | #include <linux/string.h> | ||
15 | #include <linux/screen_info.h> | ||
16 | #include <linux/console.h> | ||
17 | #include <linux/timex.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/ioport.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/serial.h> | ||
22 | #include <linux/irq.h> | ||
23 | #include <linux/memblock.h> | ||
24 | #include <linux/mmzone.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/acpi.h> | ||
27 | #include <linux/compiler.h> | ||
28 | #include <linux/root_dev.h> | ||
29 | #include <linux/nodemask.h> | ||
30 | #include <linux/pm.h> | ||
31 | #include <linux/efi.h> | ||
32 | |||
33 | #include <asm/io.h> | ||
34 | #include <asm/sal.h> | ||
35 | #include <asm/machvec.h> | ||
36 | #include <asm/processor.h> | ||
37 | #include <asm/vga.h> | ||
38 | #include <asm/setup.h> | ||
39 | #include <asm/sn/arch.h> | ||
40 | #include <asm/sn/addrs.h> | ||
41 | #include <asm/sn/pda.h> | ||
42 | #include <asm/sn/nodepda.h> | ||
43 | #include <asm/sn/sn_cpuid.h> | ||
44 | #include <asm/sn/simulator.h> | ||
45 | #include <asm/sn/leds.h> | ||
46 | #include <asm/sn/bte.h> | ||
47 | #include <asm/sn/shub_mmr.h> | ||
48 | #include <asm/sn/clksupport.h> | ||
49 | #include <asm/sn/sn_sal.h> | ||
50 | #include <asm/sn/geo.h> | ||
51 | #include <asm/sn/sn_feature_sets.h> | ||
52 | #include "xtalk/xwidgetdev.h" | ||
53 | #include "xtalk/hubdev.h" | ||
54 | #include <asm/sn/klconfig.h> | ||
55 | |||
56 | |||
57 | DEFINE_PER_CPU(struct pda_s, pda_percpu); | ||
58 | |||
59 | #define MAX_PHYS_MEMORY (1UL << IA64_MAX_PHYS_BITS) /* Max physical address supported */ | ||
60 | |||
61 | extern void bte_init_node(nodepda_t *, cnodeid_t); | ||
62 | |||
63 | extern void sn_timer_init(void); | ||
64 | extern unsigned long last_time_offset; | ||
65 | extern void (*ia64_mark_idle) (int); | ||
66 | extern void snidle(int); | ||
67 | |||
68 | unsigned long sn_rtc_cycles_per_second; | ||
69 | EXPORT_SYMBOL(sn_rtc_cycles_per_second); | ||
70 | |||
71 | DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); | ||
72 | EXPORT_PER_CPU_SYMBOL(__sn_hub_info); | ||
73 | |||
74 | DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]); | ||
75 | EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); | ||
76 | |||
77 | DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda); | ||
78 | EXPORT_PER_CPU_SYMBOL(__sn_nodepda); | ||
79 | |||
80 | char sn_system_serial_number_string[128]; | ||
81 | EXPORT_SYMBOL(sn_system_serial_number_string); | ||
82 | u64 sn_partition_serial_number; | ||
83 | EXPORT_SYMBOL(sn_partition_serial_number); | ||
84 | u8 sn_partition_id; | ||
85 | EXPORT_SYMBOL(sn_partition_id); | ||
86 | u8 sn_system_size; | ||
87 | EXPORT_SYMBOL(sn_system_size); | ||
88 | u8 sn_sharing_domain_size; | ||
89 | EXPORT_SYMBOL(sn_sharing_domain_size); | ||
90 | u8 sn_coherency_id; | ||
91 | EXPORT_SYMBOL(sn_coherency_id); | ||
92 | u8 sn_region_size; | ||
93 | EXPORT_SYMBOL(sn_region_size); | ||
94 | int sn_prom_type; /* 0=hardware, 1=medusa/realprom, 2=medusa/fakeprom */ | ||
95 | |||
96 | short physical_node_map[MAX_NUMALINK_NODES]; | ||
97 | static unsigned long sn_prom_features[MAX_PROM_FEATURE_SETS]; | ||
98 | |||
99 | EXPORT_SYMBOL(physical_node_map); | ||
100 | |||
101 | int num_cnodes; | ||
102 | |||
103 | static void sn_init_pdas(char **); | ||
104 | static void build_cnode_tables(void); | ||
105 | |||
106 | static nodepda_t *nodepdaindr[MAX_COMPACT_NODES]; | ||
107 | |||
108 | /* | ||
109 | * The format of "screen_info" is strange, and due to early i386-setup | ||
110 | * code. This is just enough to make the console code think we're on a | ||
111 | * VGA color display. | ||
112 | */ | ||
113 | struct screen_info sn_screen_info = { | ||
114 | .orig_x = 0, | ||
115 | .orig_y = 0, | ||
116 | .orig_video_mode = 3, | ||
117 | .orig_video_cols = 80, | ||
118 | .orig_video_ega_bx = 3, | ||
119 | .orig_video_lines = 25, | ||
120 | .orig_video_isVGA = 1, | ||
121 | .orig_video_points = 16 | ||
122 | }; | ||
123 | |||
124 | /* | ||
125 | * This routine can only be used during init, since | ||
126 | * smp_boot_data is an init data structure. | ||
127 | * We have to use smp_boot_data.cpu_phys_id to find | ||
128 | * the physical id of the processor because the normal | ||
129 | * cpu_physical_id() relies on data structures that | ||
130 | * may not be initialized yet. | ||
131 | */ | ||
132 | |||
133 | static int __init pxm_to_nasid(int pxm) | ||
134 | { | ||
135 | int i; | ||
136 | int nid; | ||
137 | |||
138 | nid = pxm_to_node(pxm); | ||
139 | for (i = 0; i < num_node_memblks; i++) { | ||
140 | if (node_memblk[i].nid == nid) { | ||
141 | return NASID_GET(node_memblk[i].start_paddr); | ||
142 | } | ||
143 | } | ||
144 | return -1; | ||
145 | } | ||
146 | |||
147 | /** | ||
148 | * early_sn_setup - early setup routine for SN platforms | ||
149 | * | ||
150 | * Sets up an initial console to aid debugging. Intended primarily | ||
151 | * for bringup. See start_kernel() in init/main.c. | ||
152 | */ | ||
153 | |||
154 | void __init early_sn_setup(void) | ||
155 | { | ||
156 | efi_system_table_t *efi_systab; | ||
157 | efi_config_table_t *config_tables; | ||
158 | struct ia64_sal_systab *sal_systab; | ||
159 | struct ia64_sal_desc_entry_point *ep; | ||
160 | char *p; | ||
161 | int i, j; | ||
162 | |||
163 | /* | ||
164 | * Parse enough of the SAL tables to locate the SAL entry point. Since, console | ||
165 | * IO on SN2 is done via SAL calls, early_printk won't work without this. | ||
166 | * | ||
167 | * This code duplicates some of the ACPI table parsing that is in efi.c & sal.c. | ||
168 | * Any changes to those file may have to be made here as well. | ||
169 | */ | ||
170 | efi_systab = (efi_system_table_t *) __va(ia64_boot_param->efi_systab); | ||
171 | config_tables = __va(efi_systab->tables); | ||
172 | for (i = 0; i < efi_systab->nr_tables; i++) { | ||
173 | if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == | ||
174 | 0) { | ||
175 | sal_systab = __va(config_tables[i].table); | ||
176 | p = (char *)(sal_systab + 1); | ||
177 | for (j = 0; j < sal_systab->entry_count; j++) { | ||
178 | if (*p == SAL_DESC_ENTRY_POINT) { | ||
179 | ep = (struct ia64_sal_desc_entry_point | ||
180 | *)p; | ||
181 | ia64_sal_handler_init(__va | ||
182 | (ep->sal_proc), | ||
183 | __va(ep->gp)); | ||
184 | return; | ||
185 | } | ||
186 | p += SAL_DESC_SIZE(*p); | ||
187 | } | ||
188 | } | ||
189 | } | ||
190 | /* Uh-oh, SAL not available?? */ | ||
191 | printk(KERN_ERR "failed to find SAL entry point\n"); | ||
192 | } | ||
193 | |||
194 | extern int platform_intr_list[]; | ||
195 | static int shub_1_1_found; | ||
196 | |||
197 | /* | ||
198 | * sn_check_for_wars | ||
199 | * | ||
200 | * Set flag for enabling shub specific wars | ||
201 | */ | ||
202 | |||
203 | static inline int is_shub_1_1(int nasid) | ||
204 | { | ||
205 | unsigned long id; | ||
206 | int rev; | ||
207 | |||
208 | if (is_shub2()) | ||
209 | return 0; | ||
210 | id = REMOTE_HUB_L(nasid, SH1_SHUB_ID); | ||
211 | rev = (id & SH1_SHUB_ID_REVISION_MASK) >> SH1_SHUB_ID_REVISION_SHFT; | ||
212 | return rev <= 2; | ||
213 | } | ||
214 | |||
215 | static void sn_check_for_wars(void) | ||
216 | { | ||
217 | int cnode; | ||
218 | |||
219 | if (is_shub2()) { | ||
220 | /* none yet */ | ||
221 | } else { | ||
222 | for_each_online_node(cnode) { | ||
223 | if (is_shub_1_1(cnodeid_to_nasid(cnode))) | ||
224 | shub_1_1_found = 1; | ||
225 | } | ||
226 | } | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * Scan the EFI PCDP table (if it exists) for an acceptable VGA console | ||
231 | * output device. If one exists, pick it and set sn_legacy_{io,mem} to | ||
232 | * reflect the bus offsets needed to address it. | ||
233 | * | ||
234 | * Since pcdp support in SN is not supported in the 2.4 kernel (or at least | ||
235 | * the one lbs is based on) just declare the needed structs here. | ||
236 | * | ||
237 | * Reference spec http://www.dig64.org/specifications/DIG64_PCDPv20.pdf | ||
238 | * | ||
239 | * Returns 0 if no acceptable vga is found, !0 otherwise. | ||
240 | * | ||
241 | * Note: This stuff is duped here because Altix requires the PCDP to | ||
242 | * locate a usable VGA device due to lack of proper ACPI support. Structures | ||
243 | * could be used from drivers/firmware/pcdp.h, but it was decided that moving | ||
244 | * this file to a more public location just for Altix use was undesirable. | ||
245 | */ | ||
246 | |||
247 | struct hcdp_uart_desc { | ||
248 | u8 pad[45]; | ||
249 | }; | ||
250 | |||
251 | struct pcdp { | ||
252 | u8 signature[4]; /* should be 'HCDP' */ | ||
253 | u32 length; | ||
254 | u8 rev; /* should be >=3 for pcdp, <3 for hcdp */ | ||
255 | u8 sum; | ||
256 | u8 oem_id[6]; | ||
257 | u64 oem_tableid; | ||
258 | u32 oem_rev; | ||
259 | u32 creator_id; | ||
260 | u32 creator_rev; | ||
261 | u32 num_type0; | ||
262 | struct hcdp_uart_desc uart[0]; /* num_type0 of these */ | ||
263 | /* pcdp descriptors follow */ | ||
264 | } __attribute__((packed)); | ||
265 | |||
266 | struct pcdp_device_desc { | ||
267 | u8 type; | ||
268 | u8 primary; | ||
269 | u16 length; | ||
270 | u16 index; | ||
271 | /* interconnect specific structure follows */ | ||
272 | /* device specific structure follows that */ | ||
273 | } __attribute__((packed)); | ||
274 | |||
275 | struct pcdp_interface_pci { | ||
276 | u8 type; /* 1 == pci */ | ||
277 | u8 reserved; | ||
278 | u16 length; | ||
279 | u8 segment; | ||
280 | u8 bus; | ||
281 | u8 dev; | ||
282 | u8 fun; | ||
283 | u16 devid; | ||
284 | u16 vendid; | ||
285 | u32 acpi_interrupt; | ||
286 | u64 mmio_tra; | ||
287 | u64 ioport_tra; | ||
288 | u8 flags; | ||
289 | u8 translation; | ||
290 | } __attribute__((packed)); | ||
291 | |||
292 | struct pcdp_vga_device { | ||
293 | u8 num_eas_desc; | ||
294 | /* ACPI Extended Address Space Desc follows */ | ||
295 | } __attribute__((packed)); | ||
296 | |||
297 | /* from pcdp_device_desc.primary */ | ||
298 | #define PCDP_PRIMARY_CONSOLE 0x01 | ||
299 | |||
300 | /* from pcdp_device_desc.type */ | ||
301 | #define PCDP_CONSOLE_INOUT 0x0 | ||
302 | #define PCDP_CONSOLE_DEBUG 0x1 | ||
303 | #define PCDP_CONSOLE_OUT 0x2 | ||
304 | #define PCDP_CONSOLE_IN 0x3 | ||
305 | #define PCDP_CONSOLE_TYPE_VGA 0x8 | ||
306 | |||
307 | #define PCDP_CONSOLE_VGA (PCDP_CONSOLE_TYPE_VGA | PCDP_CONSOLE_OUT) | ||
308 | |||
309 | /* from pcdp_interface_pci.type */ | ||
310 | #define PCDP_IF_PCI 1 | ||
311 | |||
312 | /* from pcdp_interface_pci.translation */ | ||
313 | #define PCDP_PCI_TRANS_IOPORT 0x02 | ||
314 | #define PCDP_PCI_TRANS_MMIO 0x01 | ||
315 | |||
316 | #if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) | ||
317 | static void | ||
318 | sn_scan_pcdp(void) | ||
319 | { | ||
320 | u8 *bp; | ||
321 | struct pcdp *pcdp; | ||
322 | struct pcdp_device_desc device; | ||
323 | struct pcdp_interface_pci if_pci; | ||
324 | extern struct efi efi; | ||
325 | |||
326 | if (efi.hcdp == EFI_INVALID_TABLE_ADDR) | ||
327 | return; /* no hcdp/pcdp table */ | ||
328 | |||
329 | pcdp = __va(efi.hcdp); | ||
330 | |||
331 | if (pcdp->rev < 3) | ||
332 | return; /* only support PCDP (rev >= 3) */ | ||
333 | |||
334 | for (bp = (u8 *)&pcdp->uart[pcdp->num_type0]; | ||
335 | bp < (u8 *)pcdp + pcdp->length; | ||
336 | bp += device.length) { | ||
337 | memcpy(&device, bp, sizeof(device)); | ||
338 | if (! (device.primary & PCDP_PRIMARY_CONSOLE)) | ||
339 | continue; /* not primary console */ | ||
340 | |||
341 | if (device.type != PCDP_CONSOLE_VGA) | ||
342 | continue; /* not VGA descriptor */ | ||
343 | |||
344 | memcpy(&if_pci, bp+sizeof(device), sizeof(if_pci)); | ||
345 | if (if_pci.type != PCDP_IF_PCI) | ||
346 | continue; /* not PCI interconnect */ | ||
347 | |||
348 | if (if_pci.translation & PCDP_PCI_TRANS_IOPORT) | ||
349 | vga_console_iobase = if_pci.ioport_tra; | ||
350 | |||
351 | if (if_pci.translation & PCDP_PCI_TRANS_MMIO) | ||
352 | vga_console_membase = | ||
353 | if_pci.mmio_tra | __IA64_UNCACHED_OFFSET; | ||
354 | |||
355 | break; /* once we find the primary, we're done */ | ||
356 | } | ||
357 | } | ||
358 | #endif | ||
359 | |||
360 | static unsigned long sn2_rtc_initial; | ||
361 | |||
362 | /** | ||
363 | * sn_setup - SN platform setup routine | ||
364 | * @cmdline_p: kernel command line | ||
365 | * | ||
366 | * Handles platform setup for SN machines. This includes determining | ||
367 | * the RTC frequency (via a SAL call), initializing secondary CPUs, and | ||
368 | * setting up per-node data areas. The console is also initialized here. | ||
369 | */ | ||
370 | void __init sn_setup(char **cmdline_p) | ||
371 | { | ||
372 | long status, ticks_per_sec, drift; | ||
373 | u32 version = sn_sal_rev(); | ||
374 | extern void sn_cpu_init(void); | ||
375 | |||
376 | sn2_rtc_initial = rtc_time(); | ||
377 | ia64_sn_plat_set_error_handling_features(); // obsolete | ||
378 | ia64_sn_set_os_feature(OSF_MCA_SLV_TO_OS_INIT_SLV); | ||
379 | ia64_sn_set_os_feature(OSF_FEAT_LOG_SBES); | ||
380 | /* | ||
381 | * Note: The calls to notify the PROM of ACPI and PCI Segment | ||
382 | * support must be done prior to acpi_load_tables(), as | ||
383 | * an ACPI capable PROM will rebuild the DSDT as result | ||
384 | * of the call. | ||
385 | */ | ||
386 | ia64_sn_set_os_feature(OSF_PCISEGMENT_ENABLE); | ||
387 | ia64_sn_set_os_feature(OSF_ACPI_ENABLE); | ||
388 | |||
389 | /* Load the new DSDT and SSDT tables into the global table list. */ | ||
390 | acpi_table_init(); | ||
391 | |||
392 | #if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) | ||
393 | /* | ||
394 | * Handle SN vga console. | ||
395 | * | ||
396 | * SN systems do not have enough ACPI table information | ||
397 | * being passed from prom to identify VGA adapters and the legacy | ||
398 | * addresses to access them. Until that is done, SN systems rely | ||
399 | * on the PCDP table to identify the primary VGA console if one | ||
400 | * exists. | ||
401 | * | ||
402 | * However, kernel PCDP support is optional, and even if it is built | ||
403 | * into the kernel, it will not be used if the boot cmdline contains | ||
404 | * console= directives. | ||
405 | * | ||
406 | * So, to work around this mess, we duplicate some of the PCDP code | ||
407 | * here so that the primary VGA console (as defined by PCDP) will | ||
408 | * work on SN systems even if a different console (e.g. serial) is | ||
409 | * selected on the boot line (or CONFIG_EFI_PCDP is off). | ||
410 | */ | ||
411 | |||
412 | if (! vga_console_membase) | ||
413 | sn_scan_pcdp(); | ||
414 | |||
415 | /* | ||
416 | * Setup legacy IO space. | ||
417 | * vga_console_iobase maps to PCI IO Space address 0 on the | ||
418 | * bus containing the VGA console. | ||
419 | */ | ||
420 | if (vga_console_iobase) { | ||
421 | io_space[0].mmio_base = | ||
422 | (unsigned long) ioremap(vga_console_iobase, 0); | ||
423 | io_space[0].sparse = 0; | ||
424 | } | ||
425 | |||
426 | if (vga_console_membase) { | ||
427 | /* usable vga ... make tty0 the preferred default console */ | ||
428 | if (!strstr(*cmdline_p, "console=")) | ||
429 | add_preferred_console("tty", 0, NULL); | ||
430 | } else { | ||
431 | printk(KERN_DEBUG "SGI: Disabling VGA console\n"); | ||
432 | if (!strstr(*cmdline_p, "console=")) | ||
433 | add_preferred_console("ttySG", 0, NULL); | ||
434 | #ifdef CONFIG_DUMMY_CONSOLE | ||
435 | conswitchp = &dummy_con; | ||
436 | #else | ||
437 | conswitchp = NULL; | ||
438 | #endif /* CONFIG_DUMMY_CONSOLE */ | ||
439 | } | ||
440 | #endif /* def(CONFIG_VT) && def(CONFIG_VGA_CONSOLE) */ | ||
441 | |||
442 | MAX_DMA_ADDRESS = PAGE_OFFSET + MAX_PHYS_MEMORY; | ||
443 | |||
444 | /* | ||
445 | * Build the tables for managing cnodes. | ||
446 | */ | ||
447 | build_cnode_tables(); | ||
448 | |||
449 | status = | ||
450 | ia64_sal_freq_base(SAL_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec, | ||
451 | &drift); | ||
452 | if (status != 0 || ticks_per_sec < 100000) { | ||
453 | printk(KERN_WARNING | ||
454 | "unable to determine platform RTC clock frequency, guessing.\n"); | ||
455 | /* PROM gives wrong value for clock freq. so guess */ | ||
456 | sn_rtc_cycles_per_second = 1000000000000UL / 30000UL; | ||
457 | } else | ||
458 | sn_rtc_cycles_per_second = ticks_per_sec; | ||
459 | |||
460 | platform_intr_list[ACPI_INTERRUPT_CPEI] = IA64_CPE_VECTOR; | ||
461 | |||
462 | printk("SGI SAL version %x.%02x\n", version >> 8, version & 0x00FF); | ||
463 | |||
464 | /* | ||
465 | * we set the default root device to /dev/hda | ||
466 | * to make simulation easy | ||
467 | */ | ||
468 | ROOT_DEV = Root_HDA1; | ||
469 | |||
470 | /* | ||
471 | * Create the PDAs and NODEPDAs for all the cpus. | ||
472 | */ | ||
473 | sn_init_pdas(cmdline_p); | ||
474 | |||
475 | ia64_mark_idle = &snidle; | ||
476 | |||
477 | /* | ||
478 | * For the bootcpu, we do this here. All other cpus will make the | ||
479 | * call as part of cpu_init in slave cpu initialization. | ||
480 | */ | ||
481 | sn_cpu_init(); | ||
482 | |||
483 | #ifdef CONFIG_SMP | ||
484 | init_smp_config(); | ||
485 | #endif | ||
486 | screen_info = sn_screen_info; | ||
487 | |||
488 | sn_timer_init(); | ||
489 | |||
490 | /* | ||
491 | * set pm_power_off to a SAL call to allow | ||
492 | * sn machines to power off. The SAL call can be replaced | ||
493 | * by an ACPI interface call when ACPI is fully implemented | ||
494 | * for sn. | ||
495 | */ | ||
496 | pm_power_off = ia64_sn_power_down; | ||
497 | current->thread.flags |= IA64_THREAD_MIGRATION; | ||
498 | } | ||
499 | |||
500 | /** | ||
501 | * sn_init_pdas - setup node data areas | ||
502 | * | ||
503 | * One time setup for Node Data Area. Called by sn_setup(). | ||
504 | */ | ||
505 | static void __init sn_init_pdas(char **cmdline_p) | ||
506 | { | ||
507 | cnodeid_t cnode; | ||
508 | |||
509 | /* | ||
510 | * Allocate & initialize the nodepda for each node. | ||
511 | */ | ||
512 | for_each_online_node(cnode) { | ||
513 | nodepdaindr[cnode] = | ||
514 | memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES, | ||
515 | cnode); | ||
516 | if (!nodepdaindr[cnode]) | ||
517 | panic("%s: Failed to allocate %lu bytes align=0x%x nid=%d\n", | ||
518 | __func__, sizeof(nodepda_t), SMP_CACHE_BYTES, | ||
519 | cnode); | ||
520 | memset(nodepdaindr[cnode]->phys_cpuid, -1, | ||
521 | sizeof(nodepdaindr[cnode]->phys_cpuid)); | ||
522 | spin_lock_init(&nodepdaindr[cnode]->ptc_lock); | ||
523 | } | ||
524 | |||
525 | /* | ||
526 | * Allocate & initialize nodepda for TIOs. For now, put them on node 0. | ||
527 | */ | ||
528 | for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++) { | ||
529 | nodepdaindr[cnode] = | ||
530 | memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES, 0); | ||
531 | if (!nodepdaindr[cnode]) | ||
532 | panic("%s: Failed to allocate %lu bytes align=0x%x nid=%d\n", | ||
533 | __func__, sizeof(nodepda_t), SMP_CACHE_BYTES, | ||
534 | cnode); | ||
535 | } | ||
536 | |||
537 | |||
538 | /* | ||
539 | * Now copy the array of nodepda pointers to each nodepda. | ||
540 | */ | ||
541 | for (cnode = 0; cnode < num_cnodes; cnode++) | ||
542 | memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr, | ||
543 | sizeof(nodepdaindr)); | ||
544 | |||
545 | /* | ||
546 | * Set up IO related platform-dependent nodepda fields. | ||
547 | * The following routine actually sets up the hubinfo struct | ||
548 | * in nodepda. | ||
549 | */ | ||
550 | for_each_online_node(cnode) { | ||
551 | bte_init_node(nodepdaindr[cnode], cnode); | ||
552 | } | ||
553 | |||
554 | /* | ||
555 | * Initialize the per node hubdev. This includes IO Nodes and | ||
556 | * headless/memless nodes. | ||
557 | */ | ||
558 | for (cnode = 0; cnode < num_cnodes; cnode++) { | ||
559 | hubdev_init_node(nodepdaindr[cnode], cnode); | ||
560 | } | ||
561 | } | ||
562 | |||
563 | /** | ||
564 | * sn_cpu_init - initialize per-cpu data areas | ||
565 | * @cpuid: cpuid of the caller | ||
566 | * | ||
567 | * Called during cpu initialization on each cpu as it starts. | ||
568 | * Currently, initializes the per-cpu data area for SNIA. | ||
569 | * Also sets up a few fields in the nodepda. Also known as | ||
570 | * platform_cpu_init() by the ia64 machvec code. | ||
571 | */ | ||
572 | void sn_cpu_init(void) | ||
573 | { | ||
574 | int cpuid; | ||
575 | int cpuphyid; | ||
576 | int nasid; | ||
577 | int subnode; | ||
578 | int slice; | ||
579 | int cnode; | ||
580 | int i; | ||
581 | static int wars_have_been_checked, set_cpu0_number; | ||
582 | |||
583 | cpuid = smp_processor_id(); | ||
584 | if (cpuid == 0 && IS_MEDUSA()) { | ||
585 | if (ia64_sn_is_fake_prom()) | ||
586 | sn_prom_type = 2; | ||
587 | else | ||
588 | sn_prom_type = 1; | ||
589 | printk(KERN_INFO "Running on medusa with %s PROM\n", | ||
590 | (sn_prom_type == 1) ? "real" : "fake"); | ||
591 | } | ||
592 | |||
593 | memset(pda, 0, sizeof(*pda)); | ||
594 | if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2, | ||
595 | &sn_hub_info->nasid_bitmask, | ||
596 | &sn_hub_info->nasid_shift, | ||
597 | &sn_system_size, &sn_sharing_domain_size, | ||
598 | &sn_partition_id, &sn_coherency_id, | ||
599 | &sn_region_size)) | ||
600 | BUG(); | ||
601 | sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2; | ||
602 | |||
603 | /* | ||
604 | * Don't check status. The SAL call is not supported on all PROMs | ||
605 | * but a failure is harmless. | ||
606 | * Architecturally, cpu_init is always called twice on cpu 0. We | ||
607 | * should set cpu_number on cpu 0 once. | ||
608 | */ | ||
609 | if (cpuid == 0) { | ||
610 | if (!set_cpu0_number) { | ||
611 | (void) ia64_sn_set_cpu_number(cpuid); | ||
612 | set_cpu0_number = 1; | ||
613 | } | ||
614 | } else | ||
615 | (void) ia64_sn_set_cpu_number(cpuid); | ||
616 | |||
617 | /* | ||
618 | * The boot cpu makes this call again after platform initialization is | ||
619 | * complete. | ||
620 | */ | ||
621 | if (nodepdaindr[0] == NULL) | ||
622 | return; | ||
623 | |||
624 | for (i = 0; i < MAX_PROM_FEATURE_SETS; i++) | ||
625 | if (ia64_sn_get_prom_feature_set(i, &sn_prom_features[i]) != 0) | ||
626 | break; | ||
627 | |||
628 | cpuphyid = get_sapicid(); | ||
629 | |||
630 | if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice)) | ||
631 | BUG(); | ||
632 | |||
633 | for (i=0; i < MAX_NUMNODES; i++) { | ||
634 | if (nodepdaindr[i]) { | ||
635 | nodepdaindr[i]->phys_cpuid[cpuid].nasid = nasid; | ||
636 | nodepdaindr[i]->phys_cpuid[cpuid].slice = slice; | ||
637 | nodepdaindr[i]->phys_cpuid[cpuid].subnode = subnode; | ||
638 | } | ||
639 | } | ||
640 | |||
641 | cnode = nasid_to_cnodeid(nasid); | ||
642 | |||
643 | __this_cpu_write(__sn_nodepda, nodepdaindr[cnode]); | ||
644 | |||
645 | pda->led_address = | ||
646 | (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT)); | ||
647 | pda->led_state = LED_ALWAYS_SET; | ||
648 | pda->hb_count = HZ / 2; | ||
649 | pda->hb_state = 0; | ||
650 | pda->idle_flag = 0; | ||
651 | |||
652 | if (cpuid != 0) { | ||
653 | /* copy cpu 0's sn_cnodeid_to_nasid table to this cpu's */ | ||
654 | memcpy(sn_cnodeid_to_nasid, | ||
655 | (&per_cpu(__sn_cnodeid_to_nasid, 0)), | ||
656 | sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid))); | ||
657 | } | ||
658 | |||
659 | /* | ||
660 | * Check for WARs. | ||
661 | * Only needs to be done once, on BSP. | ||
662 | * Has to be done after loop above, because it uses this cpu's | ||
663 | * sn_cnodeid_to_nasid table which was just initialized if this | ||
664 | * isn't cpu 0. | ||
665 | * Has to be done before assignment below. | ||
666 | */ | ||
667 | if (!wars_have_been_checked) { | ||
668 | sn_check_for_wars(); | ||
669 | wars_have_been_checked = 1; | ||
670 | } | ||
671 | sn_hub_info->shub_1_1_found = shub_1_1_found; | ||
672 | |||
673 | /* | ||
674 | * Set up addresses of PIO/MEM write status registers. | ||
675 | */ | ||
676 | { | ||
677 | u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0}; | ||
678 | u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_2, | ||
679 | SH2_PIO_WRITE_STATUS_1, SH2_PIO_WRITE_STATUS_3}; | ||
680 | u64 *pio; | ||
681 | pio = is_shub1() ? pio1 : pio2; | ||
682 | pda->pio_write_status_addr = | ||
683 | (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid, pio[slice]); | ||
684 | pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0; | ||
685 | } | ||
686 | |||
687 | /* | ||
688 | * WAR addresses for SHUB 1.x. | ||
689 | */ | ||
690 | if (local_node_data->active_cpu_count++ == 0 && is_shub1()) { | ||
691 | int buddy_nasid; | ||
692 | buddy_nasid = | ||
693 | cnodeid_to_nasid(numa_node_id() == | ||
694 | num_online_nodes() - 1 ? 0 : numa_node_id() + 1); | ||
695 | pda->pio_shub_war_cam_addr = | ||
696 | (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid, | ||
697 | SH1_PI_CAM_CONTROL); | ||
698 | } | ||
699 | } | ||
700 | |||
701 | /* | ||
702 | * Build tables for converting between NASIDs and cnodes. | ||
703 | */ | ||
704 | static inline int __init board_needs_cnode(int type) | ||
705 | { | ||
706 | return (type == KLTYPE_SNIA || type == KLTYPE_TIO); | ||
707 | } | ||
708 | |||
709 | void __init build_cnode_tables(void) | ||
710 | { | ||
711 | int nasid; | ||
712 | int node; | ||
713 | lboard_t *brd; | ||
714 | |||
715 | memset(physical_node_map, -1, sizeof(physical_node_map)); | ||
716 | memset(sn_cnodeid_to_nasid, -1, | ||
717 | sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid))); | ||
718 | |||
719 | /* | ||
720 | * First populate the tables with C/M bricks. This ensures that | ||
721 | * cnode == node for all C & M bricks. | ||
722 | */ | ||
723 | for_each_online_node(node) { | ||
724 | nasid = pxm_to_nasid(node_to_pxm(node)); | ||
725 | sn_cnodeid_to_nasid[node] = nasid; | ||
726 | physical_node_map[nasid] = node; | ||
727 | } | ||
728 | |||
729 | /* | ||
730 | * num_cnodes is total number of C/M/TIO bricks. Because of the 256 node | ||
731 | * limit on the number of nodes, we can't use the generic node numbers | ||
732 | * for this. Note that num_cnodes is incremented below as TIOs or | ||
733 | * headless/memoryless nodes are discovered. | ||
734 | */ | ||
735 | num_cnodes = num_online_nodes(); | ||
736 | |||
737 | /* fakeprom does not support klgraph */ | ||
738 | if (IS_RUNNING_ON_FAKE_PROM()) | ||
739 | return; | ||
740 | |||
741 | /* Find TIOs & headless/memoryless nodes and add them to the tables */ | ||
742 | for_each_online_node(node) { | ||
743 | kl_config_hdr_t *klgraph_header; | ||
744 | nasid = cnodeid_to_nasid(node); | ||
745 | klgraph_header = ia64_sn_get_klconfig_addr(nasid); | ||
746 | BUG_ON(klgraph_header == NULL); | ||
747 | brd = NODE_OFFSET_TO_LBOARD(nasid, klgraph_header->ch_board_info); | ||
748 | while (brd) { | ||
749 | if (board_needs_cnode(brd->brd_type) && physical_node_map[brd->brd_nasid] < 0) { | ||
750 | sn_cnodeid_to_nasid[num_cnodes] = brd->brd_nasid; | ||
751 | physical_node_map[brd->brd_nasid] = num_cnodes++; | ||
752 | } | ||
753 | brd = find_lboard_next(brd); | ||
754 | } | ||
755 | } | ||
756 | } | ||
757 | |||
758 | int | ||
759 | nasid_slice_to_cpuid(int nasid, int slice) | ||
760 | { | ||
761 | long cpu; | ||
762 | |||
763 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) | ||
764 | if (cpuid_to_nasid(cpu) == nasid && | ||
765 | cpuid_to_slice(cpu) == slice) | ||
766 | return cpu; | ||
767 | |||
768 | return -1; | ||
769 | } | ||
770 | |||
771 | int sn_prom_feature_available(int id) | ||
772 | { | ||
773 | if (id >= BITS_PER_LONG * MAX_PROM_FEATURE_SETS) | ||
774 | return 0; | ||
775 | return test_bit(id, sn_prom_features); | ||
776 | } | ||
777 | |||
778 | void | ||
779 | sn_kernel_launch_event(void) | ||
780 | { | ||
781 | /* ignore status until we understand possible failure, if any*/ | ||
782 | if (ia64_sn_kernel_launch_event()) | ||
783 | printk(KERN_ERR "KEXEC is not supported in this PROM, Please update the PROM.\n"); | ||
784 | } | ||
785 | EXPORT_SYMBOL(sn_prom_feature_available); | ||
786 | |||
diff --git a/arch/ia64/sn/kernel/sn2/Makefile b/arch/ia64/sn/kernel/sn2/Makefile deleted file mode 100644 index 170bde4549da..000000000000 --- a/arch/ia64/sn/kernel/sn2/Makefile +++ /dev/null | |||
@@ -1,13 +0,0 @@ | |||
1 | # arch/ia64/sn/kernel/sn2/Makefile | ||
2 | # | ||
3 | # This file is subject to the terms and conditions of the GNU General Public | ||
4 | # License. See the file "COPYING" in the main directory of this archive | ||
5 | # for more details. | ||
6 | # | ||
7 | # Copyright (C) 1999,2001-2002 Silicon Graphics, Inc. All rights reserved. | ||
8 | # | ||
9 | # sn2 specific kernel files | ||
10 | # | ||
11 | |||
12 | obj-y += cache.o io.o ptc_deadlock.o sn2_smp.o sn_proc_fs.o \ | ||
13 | prominfo_proc.o timer.o timer_interrupt.o sn_hwperf.o | ||
diff --git a/arch/ia64/sn/kernel/sn2/cache.c b/arch/ia64/sn/kernel/sn2/cache.c deleted file mode 100644 index 2862cb33026d..000000000000 --- a/arch/ia64/sn/kernel/sn2/cache.c +++ /dev/null | |||
@@ -1,41 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2001-2003, 2006 Silicon Graphics, Inc. All rights reserved. | ||
7 | * | ||
8 | */ | ||
9 | #include <linux/module.h> | ||
10 | #include <asm/pgalloc.h> | ||
11 | #include <asm/sn/arch.h> | ||
12 | |||
13 | /** | ||
14 | * sn_flush_all_caches - flush a range of address from all caches (incl. L4) | ||
15 | * @flush_addr: identity mapped region 7 address to start flushing | ||
16 | * @bytes: number of bytes to flush | ||
17 | * | ||
18 | * Flush a range of addresses from all caches including L4. | ||
19 | * All addresses fully or partially contained within | ||
20 | * @flush_addr to @flush_addr + @bytes are flushed | ||
21 | * from all caches. | ||
22 | */ | ||
23 | void | ||
24 | sn_flush_all_caches(long flush_addr, long bytes) | ||
25 | { | ||
26 | unsigned long addr = flush_addr; | ||
27 | |||
28 | /* SHub1 requires a cached address */ | ||
29 | if (is_shub1() && (addr & RGN_BITS) == RGN_BASE(RGN_UNCACHED)) | ||
30 | addr = (addr - RGN_BASE(RGN_UNCACHED)) + RGN_BASE(RGN_KERNEL); | ||
31 | |||
32 | flush_icache_range(addr, addr + bytes); | ||
33 | /* | ||
34 | * The last call may have returned before the caches | ||
35 | * were actually flushed, so we call it again to make | ||
36 | * sure. | ||
37 | */ | ||
38 | flush_icache_range(addr, addr + bytes); | ||
39 | mb(); | ||
40 | } | ||
41 | EXPORT_SYMBOL(sn_flush_all_caches); | ||
diff --git a/arch/ia64/sn/kernel/sn2/io.c b/arch/ia64/sn/kernel/sn2/io.c deleted file mode 100644 index a12c0586de38..000000000000 --- a/arch/ia64/sn/kernel/sn2/io.c +++ /dev/null | |||
@@ -1,101 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved. | ||
7 | * | ||
8 | * The generic kernel requires function pointers to these routines, so | ||
9 | * we wrap the inlines from asm/ia64/sn/sn2/io.h here. | ||
10 | */ | ||
11 | |||
12 | #include <asm/sn/io.h> | ||
13 | |||
14 | #ifdef CONFIG_IA64_GENERIC | ||
15 | |||
16 | #undef __sn_inb | ||
17 | #undef __sn_inw | ||
18 | #undef __sn_inl | ||
19 | #undef __sn_outb | ||
20 | #undef __sn_outw | ||
21 | #undef __sn_outl | ||
22 | #undef __sn_readb | ||
23 | #undef __sn_readw | ||
24 | #undef __sn_readl | ||
25 | #undef __sn_readq | ||
26 | #undef __sn_readb_relaxed | ||
27 | #undef __sn_readw_relaxed | ||
28 | #undef __sn_readl_relaxed | ||
29 | #undef __sn_readq_relaxed | ||
30 | |||
31 | unsigned int __sn_inb(unsigned long port) | ||
32 | { | ||
33 | return ___sn_inb(port); | ||
34 | } | ||
35 | |||
36 | unsigned int __sn_inw(unsigned long port) | ||
37 | { | ||
38 | return ___sn_inw(port); | ||
39 | } | ||
40 | |||
41 | unsigned int __sn_inl(unsigned long port) | ||
42 | { | ||
43 | return ___sn_inl(port); | ||
44 | } | ||
45 | |||
46 | void __sn_outb(unsigned char val, unsigned long port) | ||
47 | { | ||
48 | ___sn_outb(val, port); | ||
49 | } | ||
50 | |||
51 | void __sn_outw(unsigned short val, unsigned long port) | ||
52 | { | ||
53 | ___sn_outw(val, port); | ||
54 | } | ||
55 | |||
56 | void __sn_outl(unsigned int val, unsigned long port) | ||
57 | { | ||
58 | ___sn_outl(val, port); | ||
59 | } | ||
60 | |||
61 | unsigned char __sn_readb(void __iomem *addr) | ||
62 | { | ||
63 | return ___sn_readb(addr); | ||
64 | } | ||
65 | |||
66 | unsigned short __sn_readw(void __iomem *addr) | ||
67 | { | ||
68 | return ___sn_readw(addr); | ||
69 | } | ||
70 | |||
71 | unsigned int __sn_readl(void __iomem *addr) | ||
72 | { | ||
73 | return ___sn_readl(addr); | ||
74 | } | ||
75 | |||
76 | unsigned long __sn_readq(void __iomem *addr) | ||
77 | { | ||
78 | return ___sn_readq(addr); | ||
79 | } | ||
80 | |||
81 | unsigned char __sn_readb_relaxed(void __iomem *addr) | ||
82 | { | ||
83 | return ___sn_readb_relaxed(addr); | ||
84 | } | ||
85 | |||
86 | unsigned short __sn_readw_relaxed(void __iomem *addr) | ||
87 | { | ||
88 | return ___sn_readw_relaxed(addr); | ||
89 | } | ||
90 | |||
91 | unsigned int __sn_readl_relaxed(void __iomem *addr) | ||
92 | { | ||
93 | return ___sn_readl_relaxed(addr); | ||
94 | } | ||
95 | |||
96 | unsigned long __sn_readq_relaxed(void __iomem *addr) | ||
97 | { | ||
98 | return ___sn_readq_relaxed(addr); | ||
99 | } | ||
100 | |||
101 | #endif | ||
diff --git a/arch/ia64/sn/kernel/sn2/prominfo_proc.c b/arch/ia64/sn/kernel/sn2/prominfo_proc.c deleted file mode 100644 index e15457bf21ac..000000000000 --- a/arch/ia64/sn/kernel/sn2/prominfo_proc.c +++ /dev/null | |||
@@ -1,207 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1999,2001-2004, 2006 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | * | ||
8 | * Module to export the system's Firmware Interface Tables, including | ||
9 | * PROM revision numbers and banners, in /proc | ||
10 | */ | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/proc_fs.h> | ||
14 | #include <linux/seq_file.h> | ||
15 | #include <linux/nodemask.h> | ||
16 | #include <asm/io.h> | ||
17 | #include <asm/sn/sn_sal.h> | ||
18 | #include <asm/sn/sn_cpuid.h> | ||
19 | #include <asm/sn/addrs.h> | ||
20 | |||
21 | MODULE_DESCRIPTION("PROM version reporting for /proc"); | ||
22 | MODULE_AUTHOR("Chad Talbott"); | ||
23 | MODULE_LICENSE("GPL"); | ||
24 | |||
25 | /* Standard Intel FIT entry types */ | ||
26 | #define FIT_ENTRY_FIT_HEADER 0x00 /* FIT header entry */ | ||
27 | #define FIT_ENTRY_PAL_B 0x01 /* PAL_B entry */ | ||
28 | /* Entries 0x02 through 0x0D reserved by Intel */ | ||
29 | #define FIT_ENTRY_PAL_A_PROC 0x0E /* Processor-specific PAL_A entry */ | ||
30 | #define FIT_ENTRY_PAL_A 0x0F /* PAL_A entry, same as... */ | ||
31 | #define FIT_ENTRY_PAL_A_GEN 0x0F /* ...Generic PAL_A entry */ | ||
32 | #define FIT_ENTRY_UNUSED 0x7F /* Unused (reserved by Intel?) */ | ||
33 | /* OEM-defined entries range from 0x10 to 0x7E. */ | ||
34 | #define FIT_ENTRY_SAL_A 0x10 /* SAL_A entry */ | ||
35 | #define FIT_ENTRY_SAL_B 0x11 /* SAL_B entry */ | ||
36 | #define FIT_ENTRY_SALRUNTIME 0x12 /* SAL runtime entry */ | ||
37 | #define FIT_ENTRY_EFI 0x1F /* EFI entry */ | ||
38 | #define FIT_ENTRY_FPSWA 0x20 /* embedded fpswa entry */ | ||
39 | #define FIT_ENTRY_VMLINUX 0x21 /* embedded vmlinux entry */ | ||
40 | |||
41 | #define FIT_MAJOR_SHIFT (32 + 8) | ||
42 | #define FIT_MAJOR_MASK ((1 << 8) - 1) | ||
43 | #define FIT_MINOR_SHIFT 32 | ||
44 | #define FIT_MINOR_MASK ((1 << 8) - 1) | ||
45 | |||
46 | #define FIT_MAJOR(q) \ | ||
47 | ((unsigned) ((q) >> FIT_MAJOR_SHIFT) & FIT_MAJOR_MASK) | ||
48 | #define FIT_MINOR(q) \ | ||
49 | ((unsigned) ((q) >> FIT_MINOR_SHIFT) & FIT_MINOR_MASK) | ||
50 | |||
51 | #define FIT_TYPE_SHIFT (32 + 16) | ||
52 | #define FIT_TYPE_MASK ((1 << 7) - 1) | ||
53 | |||
54 | #define FIT_TYPE(q) \ | ||
55 | ((unsigned) ((q) >> FIT_TYPE_SHIFT) & FIT_TYPE_MASK) | ||
56 | |||
57 | struct fit_type_map_t { | ||
58 | unsigned char type; | ||
59 | const char *name; | ||
60 | }; | ||
61 | |||
62 | static const struct fit_type_map_t fit_entry_types[] = { | ||
63 | {FIT_ENTRY_FIT_HEADER, "FIT Header"}, | ||
64 | {FIT_ENTRY_PAL_A_GEN, "Generic PAL_A"}, | ||
65 | {FIT_ENTRY_PAL_A_PROC, "Processor-specific PAL_A"}, | ||
66 | {FIT_ENTRY_PAL_A, "PAL_A"}, | ||
67 | {FIT_ENTRY_PAL_B, "PAL_B"}, | ||
68 | {FIT_ENTRY_SAL_A, "SAL_A"}, | ||
69 | {FIT_ENTRY_SAL_B, "SAL_B"}, | ||
70 | {FIT_ENTRY_SALRUNTIME, "SAL runtime"}, | ||
71 | {FIT_ENTRY_EFI, "EFI"}, | ||
72 | {FIT_ENTRY_VMLINUX, "Embedded Linux"}, | ||
73 | {FIT_ENTRY_FPSWA, "Embedded FPSWA"}, | ||
74 | {FIT_ENTRY_UNUSED, "Unused"}, | ||
75 | {0xff, "Error"}, | ||
76 | }; | ||
77 | |||
78 | static const char *fit_type_name(unsigned char type) | ||
79 | { | ||
80 | struct fit_type_map_t const *mapp; | ||
81 | |||
82 | for (mapp = fit_entry_types; mapp->type != 0xff; mapp++) | ||
83 | if (type == mapp->type) | ||
84 | return mapp->name; | ||
85 | |||
86 | if ((type > FIT_ENTRY_PAL_A) && (type < FIT_ENTRY_UNUSED)) | ||
87 | return "OEM type"; | ||
88 | if ((type > FIT_ENTRY_PAL_B) && (type < FIT_ENTRY_PAL_A)) | ||
89 | return "Reserved"; | ||
90 | |||
91 | return "Unknown type"; | ||
92 | } | ||
93 | |||
94 | static int | ||
95 | get_fit_entry(unsigned long nasid, int index, unsigned long *fentry, | ||
96 | char *banner, int banlen) | ||
97 | { | ||
98 | return ia64_sn_get_fit_compt(nasid, index, fentry, banner, banlen); | ||
99 | } | ||
100 | |||
101 | |||
102 | /* | ||
103 | * These two routines display the FIT table for each node. | ||
104 | */ | ||
105 | static void dump_fit_entry(struct seq_file *m, unsigned long *fentry) | ||
106 | { | ||
107 | unsigned type; | ||
108 | |||
109 | type = FIT_TYPE(fentry[1]); | ||
110 | seq_printf(m, "%02x %-25s %x.%02x %016lx %u\n", | ||
111 | type, | ||
112 | fit_type_name(type), | ||
113 | FIT_MAJOR(fentry[1]), FIT_MINOR(fentry[1]), | ||
114 | fentry[0], | ||
115 | /* mult by sixteen to get size in bytes */ | ||
116 | (unsigned)(fentry[1] & 0xffffff) * 16); | ||
117 | } | ||
118 | |||
119 | |||
120 | /* | ||
121 | * We assume that the fit table will be small enough that we can print | ||
122 | * the whole thing into one page. (This is true for our default 16kB | ||
123 | * pages -- each entry is about 60 chars wide when printed.) I read | ||
124 | * somewhere that the maximum size of the FIT is 128 entries, so we're | ||
125 | * OK except for 4kB pages (and no one is going to do that on SN | ||
126 | * anyway). | ||
127 | */ | ||
128 | static int proc_fit_show(struct seq_file *m, void *v) | ||
129 | { | ||
130 | unsigned long nasid = (unsigned long)m->private; | ||
131 | unsigned long fentry[2]; | ||
132 | int index; | ||
133 | |||
134 | for (index=0;;index++) { | ||
135 | BUG_ON(index * 60 > PAGE_SIZE); | ||
136 | if (get_fit_entry(nasid, index, fentry, NULL, 0)) | ||
137 | break; | ||
138 | dump_fit_entry(m, fentry); | ||
139 | } | ||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | static int proc_version_show(struct seq_file *m, void *v) | ||
144 | { | ||
145 | unsigned long nasid = (unsigned long)m->private; | ||
146 | unsigned long fentry[2]; | ||
147 | char banner[128]; | ||
148 | int index; | ||
149 | |||
150 | for (index = 0; ; index++) { | ||
151 | if (get_fit_entry(nasid, index, fentry, banner, | ||
152 | sizeof(banner))) | ||
153 | return 0; | ||
154 | if (FIT_TYPE(fentry[1]) == FIT_ENTRY_SAL_A) | ||
155 | break; | ||
156 | } | ||
157 | |||
158 | seq_printf(m, "%x.%02x\n", FIT_MAJOR(fentry[1]), FIT_MINOR(fentry[1])); | ||
159 | |||
160 | if (banner[0]) | ||
161 | seq_printf(m, "%s\n", banner); | ||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | /* module entry points */ | ||
166 | int __init prominfo_init(void); | ||
167 | void __exit prominfo_exit(void); | ||
168 | |||
169 | module_init(prominfo_init); | ||
170 | module_exit(prominfo_exit); | ||
171 | |||
172 | #define NODE_NAME_LEN 11 | ||
173 | |||
174 | int __init prominfo_init(void) | ||
175 | { | ||
176 | struct proc_dir_entry *sgi_prominfo_entry; | ||
177 | cnodeid_t cnodeid; | ||
178 | |||
179 | if (!ia64_platform_is("sn2")) | ||
180 | return 0; | ||
181 | |||
182 | sgi_prominfo_entry = proc_mkdir("sgi_prominfo", NULL); | ||
183 | if (!sgi_prominfo_entry) | ||
184 | return -ENOMEM; | ||
185 | |||
186 | for_each_online_node(cnodeid) { | ||
187 | struct proc_dir_entry *dir; | ||
188 | unsigned long nasid; | ||
189 | char name[NODE_NAME_LEN]; | ||
190 | |||
191 | sprintf(name, "node%d", cnodeid); | ||
192 | dir = proc_mkdir(name, sgi_prominfo_entry); | ||
193 | if (!dir) | ||
194 | continue; | ||
195 | nasid = cnodeid_to_nasid(cnodeid); | ||
196 | proc_create_single_data("fit", 0, dir, proc_fit_show, | ||
197 | (void *)nasid); | ||
198 | proc_create_single_data("version", 0, dir, proc_version_show, | ||
199 | (void *)nasid); | ||
200 | } | ||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | void __exit prominfo_exit(void) | ||
205 | { | ||
206 | remove_proc_subtree("sgi_prominfo", NULL); | ||
207 | } | ||
diff --git a/arch/ia64/sn/kernel/sn2/ptc_deadlock.S b/arch/ia64/sn/kernel/sn2/ptc_deadlock.S deleted file mode 100644 index bebbcc4f8dd4..000000000000 --- a/arch/ia64/sn/kernel/sn2/ptc_deadlock.S +++ /dev/null | |||
@@ -1,92 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #include <asm/types.h> | ||
10 | #include <asm/sn/shub_mmr.h> | ||
11 | |||
12 | #define DEADLOCKBIT SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT | ||
13 | #define WRITECOUNTMASK SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK | ||
14 | #define ALIAS_OFFSET 8 | ||
15 | |||
16 | |||
17 | .global sn2_ptc_deadlock_recovery_core | ||
18 | .proc sn2_ptc_deadlock_recovery_core | ||
19 | |||
20 | sn2_ptc_deadlock_recovery_core: | ||
21 | .regstk 6,0,0,0 | ||
22 | |||
23 | ptc0 = in0 | ||
24 | data0 = in1 | ||
25 | ptc1 = in2 | ||
26 | data1 = in3 | ||
27 | piowc = in4 | ||
28 | zeroval = in5 | ||
29 | piowcphy = r30 | ||
30 | psrsave = r2 | ||
31 | scr1 = r16 | ||
32 | scr2 = r17 | ||
33 | mask = r18 | ||
34 | |||
35 | |||
36 | extr.u piowcphy=piowc,0,61;; // Convert piowc to uncached physical address | ||
37 | dep piowcphy=-1,piowcphy,63,1 | ||
38 | movl mask=WRITECOUNTMASK | ||
39 | mov r8=r0 | ||
40 | |||
41 | 1: | ||
42 | cmp.ne p8,p9=r0,ptc1 // Test for shub type (ptc1 non-null on shub1) | ||
43 | // p8 = 1 if shub1, p9 = 1 if shub2 | ||
44 | |||
45 | add scr2=ALIAS_OFFSET,piowc // Address of WRITE_STATUS alias register | ||
46 | mov scr1=7;; // Clear DEADLOCK, WRITE_ERROR, MULTI_WRITE_ERROR | ||
47 | (p8) st8.rel [scr2]=scr1;; | ||
48 | (p9) ld8.acq scr1=[scr2];; | ||
49 | |||
50 | 5: ld8.acq scr1=[piowc];; // Wait for PIOs to complete. | ||
51 | hint @pause | ||
52 | and scr2=scr1,mask;; // mask of writecount bits | ||
53 | cmp.ne p6,p0=zeroval,scr2 | ||
54 | (p6) br.cond.sptk 5b | ||
55 | |||
56 | |||
57 | |||
58 | ////////////// BEGIN PHYSICAL MODE //////////////////// | ||
59 | mov psrsave=psr // Disable IC (no PMIs) | ||
60 | rsm psr.i | psr.dt | psr.ic;; | ||
61 | srlz.i;; | ||
62 | |||
63 | st8.rel [ptc0]=data0 // Write PTC0 & wait for completion. | ||
64 | |||
65 | 5: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete. | ||
66 | hint @pause | ||
67 | and scr2=scr1,mask;; // mask of writecount bits | ||
68 | cmp.ne p6,p0=zeroval,scr2 | ||
69 | (p6) br.cond.sptk 5b;; | ||
70 | |||
71 | tbit.nz p8,p7=scr1,DEADLOCKBIT;;// Test for DEADLOCK | ||
72 | (p7) cmp.ne p7,p0=r0,ptc1;; // Test for non-null ptc1 | ||
73 | |||
74 | (p7) st8.rel [ptc1]=data1;; // Now write PTC1. | ||
75 | |||
76 | 5: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete. | ||
77 | hint @pause | ||
78 | and scr2=scr1,mask;; // mask of writecount bits | ||
79 | cmp.ne p6,p0=zeroval,scr2 | ||
80 | (p6) br.cond.sptk 5b | ||
81 | |||
82 | tbit.nz p8,p0=scr1,DEADLOCKBIT;;// Test for DEADLOCK | ||
83 | |||
84 | mov psr.l=psrsave;; // Reenable IC | ||
85 | srlz.i;; | ||
86 | ////////////// END PHYSICAL MODE //////////////////// | ||
87 | |||
88 | (p8) add r8=1,r8 | ||
89 | (p8) br.cond.spnt 1b;; // Repeat if DEADLOCK occurred. | ||
90 | |||
91 | br.ret.sptk rp | ||
92 | .endp sn2_ptc_deadlock_recovery_core | ||
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c deleted file mode 100644 index b510f4f17fd4..000000000000 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ /dev/null | |||
@@ -1,577 +0,0 @@ | |||
1 | /* | ||
2 | * SN2 Platform specific SMP Support | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 2000-2006 Silicon Graphics, Inc. All rights reserved. | ||
9 | */ | ||
10 | |||
11 | #include <linux/init.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/spinlock.h> | ||
14 | #include <linux/threads.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/mm_types.h> | ||
17 | #include <linux/smp.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/irq.h> | ||
20 | #include <linux/mmzone.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/bitops.h> | ||
23 | #include <linux/nodemask.h> | ||
24 | #include <linux/proc_fs.h> | ||
25 | #include <linux/seq_file.h> | ||
26 | |||
27 | #include <asm/processor.h> | ||
28 | #include <asm/irq.h> | ||
29 | #include <asm/sal.h> | ||
30 | #include <asm/delay.h> | ||
31 | #include <asm/io.h> | ||
32 | #include <asm/smp.h> | ||
33 | #include <asm/tlb.h> | ||
34 | #include <asm/numa.h> | ||
35 | #include <asm/hw_irq.h> | ||
36 | #include <asm/current.h> | ||
37 | #include <asm/sn/sn_cpuid.h> | ||
38 | #include <asm/sn/sn_sal.h> | ||
39 | #include <asm/sn/addrs.h> | ||
40 | #include <asm/sn/shub_mmr.h> | ||
41 | #include <asm/sn/nodepda.h> | ||
42 | #include <asm/sn/rw_mmr.h> | ||
43 | #include <asm/sn/sn_feature_sets.h> | ||
44 | |||
45 | DEFINE_PER_CPU(struct ptc_stats, ptcstats); | ||
46 | DECLARE_PER_CPU(struct ptc_stats, ptcstats); | ||
47 | |||
48 | static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock); | ||
49 | |||
50 | /* 0 = old algorithm (no IPI flushes), 1 = ipi deadlock flush, 2 = ipi instead of SHUB ptc, >2 = always ipi */ | ||
51 | static int sn2_flush_opt = 0; | ||
52 | |||
53 | extern unsigned long | ||
54 | sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long, | ||
55 | volatile unsigned long *, unsigned long, | ||
56 | volatile unsigned long *, unsigned long); | ||
57 | void | ||
58 | sn2_ptc_deadlock_recovery(nodemask_t, short, short, int, | ||
59 | volatile unsigned long *, unsigned long, | ||
60 | volatile unsigned long *, unsigned long); | ||
61 | |||
62 | /* | ||
63 | * Note: some is the following is captured here to make degugging easier | ||
64 | * (the macros make more sense if you see the debug patch - not posted) | ||
65 | */ | ||
66 | #define sn2_ptctest 0 | ||
67 | #define local_node_uses_ptc_ga(sh1) ((sh1) ? 1 : 0) | ||
68 | #define max_active_pio(sh1) ((sh1) ? 32 : 7) | ||
69 | #define reset_max_active_on_deadlock() 1 | ||
70 | #define PTC_LOCK(sh1) ((sh1) ? &sn2_global_ptc_lock : &sn_nodepda->ptc_lock) | ||
71 | |||
72 | struct ptc_stats { | ||
73 | unsigned long ptc_l; | ||
74 | unsigned long change_rid; | ||
75 | unsigned long shub_ptc_flushes; | ||
76 | unsigned long nodes_flushed; | ||
77 | unsigned long deadlocks; | ||
78 | unsigned long deadlocks2; | ||
79 | unsigned long lock_itc_clocks; | ||
80 | unsigned long shub_itc_clocks; | ||
81 | unsigned long shub_itc_clocks_max; | ||
82 | unsigned long shub_ptc_flushes_not_my_mm; | ||
83 | unsigned long shub_ipi_flushes; | ||
84 | unsigned long shub_ipi_flushes_itc_clocks; | ||
85 | }; | ||
86 | |||
87 | #define sn2_ptctest 0 | ||
88 | |||
89 | static inline unsigned long wait_piowc(void) | ||
90 | { | ||
91 | volatile unsigned long *piows; | ||
92 | unsigned long zeroval, ws; | ||
93 | |||
94 | piows = pda->pio_write_status_addr; | ||
95 | zeroval = pda->pio_write_status_val; | ||
96 | do { | ||
97 | cpu_relax(); | ||
98 | } while (((ws = *piows) & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != zeroval); | ||
99 | return (ws & SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK) != 0; | ||
100 | } | ||
101 | |||
102 | /** | ||
103 | * sn_migrate - SN-specific task migration actions | ||
104 | * @task: Task being migrated to new CPU | ||
105 | * | ||
106 | * SN2 PIO writes from separate CPUs are not guaranteed to arrive in order. | ||
107 | * Context switching user threads which have memory-mapped MMIO may cause | ||
108 | * PIOs to issue from separate CPUs, thus the PIO writes must be drained | ||
109 | * from the previous CPU's Shub before execution resumes on the new CPU. | ||
110 | */ | ||
111 | void sn_migrate(struct task_struct *task) | ||
112 | { | ||
113 | pda_t *last_pda = pdacpu(task_thread_info(task)->last_cpu); | ||
114 | volatile unsigned long *adr = last_pda->pio_write_status_addr; | ||
115 | unsigned long val = last_pda->pio_write_status_val; | ||
116 | |||
117 | /* Drain PIO writes from old CPU's Shub */ | ||
118 | while (unlikely((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) | ||
119 | != val)) | ||
120 | cpu_relax(); | ||
121 | } | ||
122 | |||
123 | static void | ||
124 | sn2_ipi_flush_all_tlb(struct mm_struct *mm) | ||
125 | { | ||
126 | unsigned long itc; | ||
127 | |||
128 | itc = ia64_get_itc(); | ||
129 | smp_flush_tlb_cpumask(*mm_cpumask(mm)); | ||
130 | itc = ia64_get_itc() - itc; | ||
131 | __this_cpu_add(ptcstats.shub_ipi_flushes_itc_clocks, itc); | ||
132 | __this_cpu_inc(ptcstats.shub_ipi_flushes); | ||
133 | } | ||
134 | |||
135 | /** | ||
136 | * sn2_global_tlb_purge - globally purge translation cache of virtual address range | ||
137 | * @mm: mm_struct containing virtual address range | ||
138 | * @start: start of virtual address range | ||
139 | * @end: end of virtual address range | ||
140 | * @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits & 0xfc)) | ||
141 | * | ||
142 | * Purges the translation caches of all processors of the given virtual address | ||
143 | * range. | ||
144 | * | ||
145 | * Note: | ||
146 | * - cpu_vm_mask is a bit mask that indicates which cpus have loaded the context. | ||
147 | * - cpu_vm_mask is converted into a nodemask of the nodes containing the | ||
148 | * cpus in cpu_vm_mask. | ||
149 | * - if only one bit is set in cpu_vm_mask & it is the current cpu & the | ||
150 | * process is purging its own virtual address range, then only the | ||
151 | * local TLB needs to be flushed. This flushing can be done using | ||
152 | * ptc.l. This is the common case & avoids the global spinlock. | ||
153 | * - if multiple cpus have loaded the context, then flushing has to be | ||
154 | * done with ptc.g/MMRs under protection of the global ptc_lock. | ||
155 | */ | ||
156 | |||
157 | void | ||
158 | sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, | ||
159 | unsigned long end, unsigned long nbits) | ||
160 | { | ||
161 | int i, ibegin, shub1, cnode, mynasid, cpu, lcpu = 0, nasid; | ||
162 | int mymm = (mm == current->active_mm && mm == current->mm); | ||
163 | int use_cpu_ptcga; | ||
164 | volatile unsigned long *ptc0, *ptc1; | ||
165 | unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0; | ||
166 | short nix; | ||
167 | nodemask_t nodes_flushed; | ||
168 | int active, max_active, deadlock, flush_opt = sn2_flush_opt; | ||
169 | |||
170 | if (flush_opt > 2) { | ||
171 | sn2_ipi_flush_all_tlb(mm); | ||
172 | return; | ||
173 | } | ||
174 | |||
175 | nodes_clear(nodes_flushed); | ||
176 | i = 0; | ||
177 | |||
178 | for_each_cpu(cpu, mm_cpumask(mm)) { | ||
179 | cnode = cpu_to_node(cpu); | ||
180 | node_set(cnode, nodes_flushed); | ||
181 | lcpu = cpu; | ||
182 | i++; | ||
183 | } | ||
184 | |||
185 | if (i == 0) | ||
186 | return; | ||
187 | |||
188 | preempt_disable(); | ||
189 | |||
190 | if (likely(i == 1 && lcpu == smp_processor_id() && mymm)) { | ||
191 | do { | ||
192 | ia64_ptcl(start, nbits << 2); | ||
193 | start += (1UL << nbits); | ||
194 | } while (start < end); | ||
195 | ia64_srlz_i(); | ||
196 | __this_cpu_inc(ptcstats.ptc_l); | ||
197 | preempt_enable(); | ||
198 | return; | ||
199 | } | ||
200 | |||
201 | if (atomic_read(&mm->mm_users) == 1 && mymm) { | ||
202 | flush_tlb_mm(mm); | ||
203 | __this_cpu_inc(ptcstats.change_rid); | ||
204 | preempt_enable(); | ||
205 | return; | ||
206 | } | ||
207 | |||
208 | if (flush_opt == 2) { | ||
209 | sn2_ipi_flush_all_tlb(mm); | ||
210 | preempt_enable(); | ||
211 | return; | ||
212 | } | ||
213 | |||
214 | itc = ia64_get_itc(); | ||
215 | nix = nodes_weight(nodes_flushed); | ||
216 | |||
217 | rr_value = (mm->context << 3) | REGION_NUMBER(start); | ||
218 | |||
219 | shub1 = is_shub1(); | ||
220 | if (shub1) { | ||
221 | data0 = (1UL << SH1_PTC_0_A_SHFT) | | ||
222 | (nbits << SH1_PTC_0_PS_SHFT) | | ||
223 | (rr_value << SH1_PTC_0_RID_SHFT) | | ||
224 | (1UL << SH1_PTC_0_START_SHFT); | ||
225 | ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0); | ||
226 | ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1); | ||
227 | } else { | ||
228 | data0 = (1UL << SH2_PTC_A_SHFT) | | ||
229 | (nbits << SH2_PTC_PS_SHFT) | | ||
230 | (1UL << SH2_PTC_START_SHFT); | ||
231 | ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC + | ||
232 | (rr_value << SH2_PTC_RID_SHFT)); | ||
233 | ptc1 = NULL; | ||
234 | } | ||
235 | |||
236 | |||
237 | mynasid = get_nasid(); | ||
238 | use_cpu_ptcga = local_node_uses_ptc_ga(shub1); | ||
239 | max_active = max_active_pio(shub1); | ||
240 | |||
241 | itc = ia64_get_itc(); | ||
242 | spin_lock_irqsave(PTC_LOCK(shub1), flags); | ||
243 | itc2 = ia64_get_itc(); | ||
244 | |||
245 | __this_cpu_add(ptcstats.lock_itc_clocks, itc2 - itc); | ||
246 | __this_cpu_inc(ptcstats.shub_ptc_flushes); | ||
247 | __this_cpu_add(ptcstats.nodes_flushed, nix); | ||
248 | if (!mymm) | ||
249 | __this_cpu_inc(ptcstats.shub_ptc_flushes_not_my_mm); | ||
250 | |||
251 | if (use_cpu_ptcga && !mymm) { | ||
252 | old_rr = ia64_get_rr(start); | ||
253 | ia64_set_rr(start, (old_rr & 0xff) | (rr_value << 8)); | ||
254 | ia64_srlz_d(); | ||
255 | } | ||
256 | |||
257 | wait_piowc(); | ||
258 | do { | ||
259 | if (shub1) | ||
260 | data1 = start | (1UL << SH1_PTC_1_START_SHFT); | ||
261 | else | ||
262 | data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK); | ||
263 | deadlock = 0; | ||
264 | active = 0; | ||
265 | ibegin = 0; | ||
266 | i = 0; | ||
267 | for_each_node_mask(cnode, nodes_flushed) { | ||
268 | nasid = cnodeid_to_nasid(cnode); | ||
269 | if (use_cpu_ptcga && unlikely(nasid == mynasid)) { | ||
270 | ia64_ptcga(start, nbits << 2); | ||
271 | ia64_srlz_i(); | ||
272 | } else { | ||
273 | ptc0 = CHANGE_NASID(nasid, ptc0); | ||
274 | if (ptc1) | ||
275 | ptc1 = CHANGE_NASID(nasid, ptc1); | ||
276 | pio_atomic_phys_write_mmrs(ptc0, data0, ptc1, data1); | ||
277 | active++; | ||
278 | } | ||
279 | if (active >= max_active || i == (nix - 1)) { | ||
280 | if ((deadlock = wait_piowc())) { | ||
281 | if (flush_opt == 1) | ||
282 | goto done; | ||
283 | sn2_ptc_deadlock_recovery(nodes_flushed, ibegin, i, mynasid, ptc0, data0, ptc1, data1); | ||
284 | if (reset_max_active_on_deadlock()) | ||
285 | max_active = 1; | ||
286 | } | ||
287 | active = 0; | ||
288 | ibegin = i + 1; | ||
289 | } | ||
290 | i++; | ||
291 | } | ||
292 | start += (1UL << nbits); | ||
293 | } while (start < end); | ||
294 | |||
295 | done: | ||
296 | itc2 = ia64_get_itc() - itc2; | ||
297 | __this_cpu_add(ptcstats.shub_itc_clocks, itc2); | ||
298 | if (itc2 > __this_cpu_read(ptcstats.shub_itc_clocks_max)) | ||
299 | __this_cpu_write(ptcstats.shub_itc_clocks_max, itc2); | ||
300 | |||
301 | if (old_rr) { | ||
302 | ia64_set_rr(start, old_rr); | ||
303 | ia64_srlz_d(); | ||
304 | } | ||
305 | |||
306 | spin_unlock_irqrestore(PTC_LOCK(shub1), flags); | ||
307 | |||
308 | if (flush_opt == 1 && deadlock) { | ||
309 | __this_cpu_inc(ptcstats.deadlocks); | ||
310 | sn2_ipi_flush_all_tlb(mm); | ||
311 | } | ||
312 | |||
313 | preempt_enable(); | ||
314 | } | ||
315 | |||
316 | /* | ||
317 | * sn2_ptc_deadlock_recovery | ||
318 | * | ||
319 | * Recover from PTC deadlocks conditions. Recovery requires stepping thru each | ||
320 | * TLB flush transaction. The recovery sequence is somewhat tricky & is | ||
321 | * coded in assembly language. | ||
322 | */ | ||
323 | |||
324 | void | ||
325 | sn2_ptc_deadlock_recovery(nodemask_t nodes, short ib, short ie, int mynasid, | ||
326 | volatile unsigned long *ptc0, unsigned long data0, | ||
327 | volatile unsigned long *ptc1, unsigned long data1) | ||
328 | { | ||
329 | short nasid, i; | ||
330 | int cnode; | ||
331 | unsigned long *piows, zeroval, n; | ||
332 | |||
333 | __this_cpu_inc(ptcstats.deadlocks); | ||
334 | |||
335 | piows = (unsigned long *) pda->pio_write_status_addr; | ||
336 | zeroval = pda->pio_write_status_val; | ||
337 | |||
338 | i = 0; | ||
339 | for_each_node_mask(cnode, nodes) { | ||
340 | if (i < ib) | ||
341 | goto next; | ||
342 | |||
343 | if (i > ie) | ||
344 | break; | ||
345 | |||
346 | nasid = cnodeid_to_nasid(cnode); | ||
347 | if (local_node_uses_ptc_ga(is_shub1()) && nasid == mynasid) | ||
348 | goto next; | ||
349 | |||
350 | ptc0 = CHANGE_NASID(nasid, ptc0); | ||
351 | if (ptc1) | ||
352 | ptc1 = CHANGE_NASID(nasid, ptc1); | ||
353 | |||
354 | n = sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval); | ||
355 | __this_cpu_add(ptcstats.deadlocks2, n); | ||
356 | next: | ||
357 | i++; | ||
358 | } | ||
359 | |||
360 | } | ||
361 | |||
362 | /** | ||
363 | * sn_send_IPI_phys - send an IPI to a Nasid and slice | ||
364 | * @nasid: nasid to receive the interrupt (may be outside partition) | ||
365 | * @physid: physical cpuid to receive the interrupt. | ||
366 | * @vector: command to send | ||
367 | * @delivery_mode: delivery mechanism | ||
368 | * | ||
369 | * Sends an IPI (interprocessor interrupt) to the processor specified by | ||
370 | * @physid | ||
371 | * | ||
372 | * @delivery_mode can be one of the following | ||
373 | * | ||
374 | * %IA64_IPI_DM_INT - pend an interrupt | ||
375 | * %IA64_IPI_DM_PMI - pend a PMI | ||
376 | * %IA64_IPI_DM_NMI - pend an NMI | ||
377 | * %IA64_IPI_DM_INIT - pend an INIT interrupt | ||
378 | */ | ||
379 | void sn_send_IPI_phys(int nasid, long physid, int vector, int delivery_mode) | ||
380 | { | ||
381 | long val; | ||
382 | unsigned long flags = 0; | ||
383 | volatile long *p; | ||
384 | |||
385 | p = (long *)GLOBAL_MMR_PHYS_ADDR(nasid, SH_IPI_INT); | ||
386 | val = (1UL << SH_IPI_INT_SEND_SHFT) | | ||
387 | (physid << SH_IPI_INT_PID_SHFT) | | ||
388 | ((long)delivery_mode << SH_IPI_INT_TYPE_SHFT) | | ||
389 | ((long)vector << SH_IPI_INT_IDX_SHFT) | | ||
390 | (0x000feeUL << SH_IPI_INT_BASE_SHFT); | ||
391 | |||
392 | mb(); | ||
393 | if (enable_shub_wars_1_1()) { | ||
394 | spin_lock_irqsave(&sn2_global_ptc_lock, flags); | ||
395 | } | ||
396 | pio_phys_write_mmr(p, val); | ||
397 | if (enable_shub_wars_1_1()) { | ||
398 | wait_piowc(); | ||
399 | spin_unlock_irqrestore(&sn2_global_ptc_lock, flags); | ||
400 | } | ||
401 | |||
402 | } | ||
403 | |||
404 | EXPORT_SYMBOL(sn_send_IPI_phys); | ||
405 | |||
406 | /** | ||
407 | * sn2_send_IPI - send an IPI to a processor | ||
408 | * @cpuid: target of the IPI | ||
409 | * @vector: command to send | ||
410 | * @delivery_mode: delivery mechanism | ||
411 | * @redirect: redirect the IPI? | ||
412 | * | ||
413 | * Sends an IPI (InterProcessor Interrupt) to the processor specified by | ||
414 | * @cpuid. @vector specifies the command to send, while @delivery_mode can | ||
415 | * be one of the following | ||
416 | * | ||
417 | * %IA64_IPI_DM_INT - pend an interrupt | ||
418 | * %IA64_IPI_DM_PMI - pend a PMI | ||
419 | * %IA64_IPI_DM_NMI - pend an NMI | ||
420 | * %IA64_IPI_DM_INIT - pend an INIT interrupt | ||
421 | */ | ||
422 | void sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect) | ||
423 | { | ||
424 | long physid; | ||
425 | int nasid; | ||
426 | |||
427 | physid = cpu_physical_id(cpuid); | ||
428 | nasid = cpuid_to_nasid(cpuid); | ||
429 | |||
430 | /* the following is used only when starting cpus at boot time */ | ||
431 | if (unlikely(nasid == -1)) | ||
432 | ia64_sn_get_sapic_info(physid, &nasid, NULL, NULL); | ||
433 | |||
434 | sn_send_IPI_phys(nasid, physid, vector, delivery_mode); | ||
435 | } | ||
436 | |||
437 | #ifdef CONFIG_HOTPLUG_CPU | ||
438 | /** | ||
439 | * sn_cpu_disable_allowed - Determine if a CPU can be disabled. | ||
440 | * @cpu - CPU that is requested to be disabled. | ||
441 | * | ||
442 | * CPU disable is only allowed on SHub2 systems running with a PROM | ||
443 | * that supports CPU disable. It is not permitted to disable the boot processor. | ||
444 | */ | ||
445 | bool sn_cpu_disable_allowed(int cpu) | ||
446 | { | ||
447 | if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT)) { | ||
448 | if (cpu != 0) | ||
449 | return true; | ||
450 | else | ||
451 | printk(KERN_WARNING | ||
452 | "Disabling the boot processor is not allowed.\n"); | ||
453 | |||
454 | } else | ||
455 | printk(KERN_WARNING | ||
456 | "CPU disable is not supported on this system.\n"); | ||
457 | |||
458 | return false; | ||
459 | } | ||
460 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
461 | |||
462 | #ifdef CONFIG_PROC_FS | ||
463 | |||
464 | #define PTC_BASENAME "sgi_sn/ptc_statistics" | ||
465 | |||
466 | static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset) | ||
467 | { | ||
468 | if (*offset < nr_cpu_ids) | ||
469 | return offset; | ||
470 | return NULL; | ||
471 | } | ||
472 | |||
473 | static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset) | ||
474 | { | ||
475 | (*offset)++; | ||
476 | if (*offset < nr_cpu_ids) | ||
477 | return offset; | ||
478 | return NULL; | ||
479 | } | ||
480 | |||
481 | static void sn2_ptc_seq_stop(struct seq_file *file, void *data) | ||
482 | { | ||
483 | } | ||
484 | |||
485 | static int sn2_ptc_seq_show(struct seq_file *file, void *data) | ||
486 | { | ||
487 | struct ptc_stats *stat; | ||
488 | int cpu; | ||
489 | |||
490 | cpu = *(loff_t *) data; | ||
491 | |||
492 | if (!cpu) { | ||
493 | seq_printf(file, | ||
494 | "# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2 ipi_fluches ipi_nsec\n"); | ||
495 | seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt); | ||
496 | } | ||
497 | |||
498 | if (cpu < nr_cpu_ids && cpu_online(cpu)) { | ||
499 | stat = &per_cpu(ptcstats, cpu); | ||
500 | seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, | ||
501 | stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, | ||
502 | stat->deadlocks, | ||
503 | 1000 * stat->lock_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, | ||
504 | 1000 * stat->shub_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, | ||
505 | 1000 * stat->shub_itc_clocks_max / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, | ||
506 | stat->shub_ptc_flushes_not_my_mm, | ||
507 | stat->deadlocks2, | ||
508 | stat->shub_ipi_flushes, | ||
509 | 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec); | ||
510 | } | ||
511 | return 0; | ||
512 | } | ||
513 | |||
514 | static ssize_t sn2_ptc_proc_write(struct file *file, const char __user *user, size_t count, loff_t *data) | ||
515 | { | ||
516 | int cpu; | ||
517 | char optstr[64]; | ||
518 | |||
519 | if (count == 0 || count > sizeof(optstr)) | ||
520 | return -EINVAL; | ||
521 | if (copy_from_user(optstr, user, count)) | ||
522 | return -EFAULT; | ||
523 | optstr[count - 1] = '\0'; | ||
524 | sn2_flush_opt = simple_strtoul(optstr, NULL, 0); | ||
525 | |||
526 | for_each_online_cpu(cpu) | ||
527 | memset(&per_cpu(ptcstats, cpu), 0, sizeof(struct ptc_stats)); | ||
528 | |||
529 | return count; | ||
530 | } | ||
531 | |||
532 | static const struct seq_operations sn2_ptc_seq_ops = { | ||
533 | .start = sn2_ptc_seq_start, | ||
534 | .next = sn2_ptc_seq_next, | ||
535 | .stop = sn2_ptc_seq_stop, | ||
536 | .show = sn2_ptc_seq_show | ||
537 | }; | ||
538 | |||
539 | static int sn2_ptc_proc_open(struct inode *inode, struct file *file) | ||
540 | { | ||
541 | return seq_open(file, &sn2_ptc_seq_ops); | ||
542 | } | ||
543 | |||
544 | static const struct file_operations proc_sn2_ptc_operations = { | ||
545 | .open = sn2_ptc_proc_open, | ||
546 | .read = seq_read, | ||
547 | .write = sn2_ptc_proc_write, | ||
548 | .llseek = seq_lseek, | ||
549 | .release = seq_release, | ||
550 | }; | ||
551 | |||
552 | static struct proc_dir_entry *proc_sn2_ptc; | ||
553 | |||
554 | static int __init sn2_ptc_init(void) | ||
555 | { | ||
556 | if (!ia64_platform_is("sn2")) | ||
557 | return 0; | ||
558 | |||
559 | proc_sn2_ptc = proc_create(PTC_BASENAME, 0444, | ||
560 | NULL, &proc_sn2_ptc_operations); | ||
561 | if (!proc_sn2_ptc) { | ||
562 | printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME); | ||
563 | return -EINVAL; | ||
564 | } | ||
565 | spin_lock_init(&sn2_global_ptc_lock); | ||
566 | return 0; | ||
567 | } | ||
568 | |||
569 | static void __exit sn2_ptc_exit(void) | ||
570 | { | ||
571 | remove_proc_entry(PTC_BASENAME, NULL); | ||
572 | } | ||
573 | |||
574 | module_init(sn2_ptc_init); | ||
575 | module_exit(sn2_ptc_exit); | ||
576 | #endif /* CONFIG_PROC_FS */ | ||
577 | |||
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c deleted file mode 100644 index 55febd65911a..000000000000 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ /dev/null | |||
@@ -1,1004 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2004-2006 Silicon Graphics, Inc. All rights reserved. | ||
7 | * | ||
8 | * SGI Altix topology and hardware performance monitoring API. | ||
9 | * Mark Goodwin <markgw@sgi.com>. | ||
10 | * | ||
11 | * Creates /proc/sgi_sn/sn_topology (read-only) to export | ||
12 | * info about Altix nodes, routers, CPUs and NumaLink | ||
13 | * interconnection/topology. | ||
14 | * | ||
15 | * Also creates a dynamic misc device named "sn_hwperf" | ||
16 | * that supports an ioctl interface to call down into SAL | ||
17 | * to discover hw objects, topology and to read/write | ||
18 | * memory mapped registers, e.g. for performance monitoring. | ||
19 | * The "sn_hwperf" device is registered only after the procfs | ||
20 | * file is first opened, i.e. only if/when it's needed. | ||
21 | * | ||
22 | * This API is used by SGI Performance Co-Pilot and other | ||
23 | * tools, see http://oss.sgi.com/projects/pcp | ||
24 | */ | ||
25 | |||
26 | #include <linux/fs.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/export.h> | ||
29 | #include <linux/vmalloc.h> | ||
30 | #include <linux/seq_file.h> | ||
31 | #include <linux/miscdevice.h> | ||
32 | #include <linux/utsname.h> | ||
33 | #include <linux/cpumask.h> | ||
34 | #include <linux/nodemask.h> | ||
35 | #include <linux/smp.h> | ||
36 | #include <linux/mutex.h> | ||
37 | |||
38 | #include <asm/processor.h> | ||
39 | #include <asm/topology.h> | ||
40 | #include <linux/uaccess.h> | ||
41 | #include <asm/sal.h> | ||
42 | #include <asm/sn/io.h> | ||
43 | #include <asm/sn/sn_sal.h> | ||
44 | #include <asm/sn/module.h> | ||
45 | #include <asm/sn/geo.h> | ||
46 | #include <asm/sn/sn2/sn_hwperf.h> | ||
47 | #include <asm/sn/addrs.h> | ||
48 | |||
49 | static void *sn_hwperf_salheap = NULL; | ||
50 | static int sn_hwperf_obj_cnt = 0; | ||
51 | static nasid_t sn_hwperf_master_nasid = INVALID_NASID; | ||
52 | static int sn_hwperf_init(void); | ||
53 | static DEFINE_MUTEX(sn_hwperf_init_mutex); | ||
54 | |||
55 | #define cnode_possible(n) ((n) < num_cnodes) | ||
56 | |||
57 | static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret) | ||
58 | { | ||
59 | int e; | ||
60 | u64 sz; | ||
61 | struct sn_hwperf_object_info *objbuf = NULL; | ||
62 | |||
63 | if ((e = sn_hwperf_init()) < 0) { | ||
64 | printk(KERN_ERR "sn_hwperf_init failed: err %d\n", e); | ||
65 | goto out; | ||
66 | } | ||
67 | |||
68 | sz = sn_hwperf_obj_cnt * sizeof(struct sn_hwperf_object_info); | ||
69 | objbuf = vmalloc(sz); | ||
70 | if (objbuf == NULL) { | ||
71 | printk("sn_hwperf_enum_objects: vmalloc(%d) failed\n", (int)sz); | ||
72 | e = -ENOMEM; | ||
73 | goto out; | ||
74 | } | ||
75 | |||
76 | e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, SN_HWPERF_ENUM_OBJECTS, | ||
77 | 0, sz, (u64) objbuf, 0, 0, NULL); | ||
78 | if (e != SN_HWPERF_OP_OK) { | ||
79 | e = -EINVAL; | ||
80 | vfree(objbuf); | ||
81 | } | ||
82 | |||
83 | out: | ||
84 | *nobj = sn_hwperf_obj_cnt; | ||
85 | *ret = objbuf; | ||
86 | return e; | ||
87 | } | ||
88 | |||
89 | static int sn_hwperf_location_to_bpos(char *location, | ||
90 | int *rack, int *bay, int *slot, int *slab) | ||
91 | { | ||
92 | char type; | ||
93 | |||
94 | /* first scan for an old style geoid string */ | ||
95 | if (sscanf(location, "%03d%c%02d#%d", | ||
96 | rack, &type, bay, slab) == 4) | ||
97 | *slot = 0; | ||
98 | else /* scan for a new bladed geoid string */ | ||
99 | if (sscanf(location, "%03d%c%02d^%02d#%d", | ||
100 | rack, &type, bay, slot, slab) != 5) | ||
101 | return -1; | ||
102 | /* success */ | ||
103 | return 0; | ||
104 | } | ||
105 | |||
106 | static int sn_hwperf_geoid_to_cnode(char *location) | ||
107 | { | ||
108 | int cnode; | ||
109 | geoid_t geoid; | ||
110 | moduleid_t module_id; | ||
111 | int rack, bay, slot, slab; | ||
112 | int this_rack, this_bay, this_slot, this_slab; | ||
113 | |||
114 | if (sn_hwperf_location_to_bpos(location, &rack, &bay, &slot, &slab)) | ||
115 | return -1; | ||
116 | |||
117 | /* | ||
118 | * FIXME: replace with cleaner for_each_XXX macro which addresses | ||
119 | * both compute and IO nodes once ACPI3.0 is available. | ||
120 | */ | ||
121 | for (cnode = 0; cnode < num_cnodes; cnode++) { | ||
122 | geoid = cnodeid_get_geoid(cnode); | ||
123 | module_id = geo_module(geoid); | ||
124 | this_rack = MODULE_GET_RACK(module_id); | ||
125 | this_bay = MODULE_GET_BPOS(module_id); | ||
126 | this_slot = geo_slot(geoid); | ||
127 | this_slab = geo_slab(geoid); | ||
128 | if (rack == this_rack && bay == this_bay && | ||
129 | slot == this_slot && slab == this_slab) { | ||
130 | break; | ||
131 | } | ||
132 | } | ||
133 | |||
134 | return cnode_possible(cnode) ? cnode : -1; | ||
135 | } | ||
136 | |||
137 | static int sn_hwperf_obj_to_cnode(struct sn_hwperf_object_info * obj) | ||
138 | { | ||
139 | if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj)) | ||
140 | BUG(); | ||
141 | if (SN_HWPERF_FOREIGN(obj)) | ||
142 | return -1; | ||
143 | return sn_hwperf_geoid_to_cnode(obj->location); | ||
144 | } | ||
145 | |||
146 | static int sn_hwperf_generic_ordinal(struct sn_hwperf_object_info *obj, | ||
147 | struct sn_hwperf_object_info *objs) | ||
148 | { | ||
149 | int ordinal; | ||
150 | struct sn_hwperf_object_info *p; | ||
151 | |||
152 | for (ordinal=0, p=objs; p != obj; p++) { | ||
153 | if (SN_HWPERF_FOREIGN(p)) | ||
154 | continue; | ||
155 | if (SN_HWPERF_SAME_OBJTYPE(p, obj)) | ||
156 | ordinal++; | ||
157 | } | ||
158 | |||
159 | return ordinal; | ||
160 | } | ||
161 | |||
162 | static const char *slabname_node = "node"; /* SHub asic */ | ||
163 | static const char *slabname_ionode = "ionode"; /* TIO asic */ | ||
164 | static const char *slabname_router = "router"; /* NL3R or NL4R */ | ||
165 | static const char *slabname_other = "other"; /* unknown asic */ | ||
166 | |||
167 | static const char *sn_hwperf_get_slabname(struct sn_hwperf_object_info *obj, | ||
168 | struct sn_hwperf_object_info *objs, int *ordinal) | ||
169 | { | ||
170 | int isnode; | ||
171 | const char *slabname = slabname_other; | ||
172 | |||
173 | if ((isnode = SN_HWPERF_IS_NODE(obj)) || SN_HWPERF_IS_IONODE(obj)) { | ||
174 | slabname = isnode ? slabname_node : slabname_ionode; | ||
175 | *ordinal = sn_hwperf_obj_to_cnode(obj); | ||
176 | } | ||
177 | else { | ||
178 | *ordinal = sn_hwperf_generic_ordinal(obj, objs); | ||
179 | if (SN_HWPERF_IS_ROUTER(obj)) | ||
180 | slabname = slabname_router; | ||
181 | } | ||
182 | |||
183 | return slabname; | ||
184 | } | ||
185 | |||
186 | static void print_pci_topology(struct seq_file *s) | ||
187 | { | ||
188 | char *p; | ||
189 | size_t sz; | ||
190 | int e; | ||
191 | |||
192 | for (sz = PAGE_SIZE; sz < 16 * PAGE_SIZE; sz += PAGE_SIZE) { | ||
193 | if (!(p = kmalloc(sz, GFP_KERNEL))) | ||
194 | break; | ||
195 | e = ia64_sn_ioif_get_pci_topology(__pa(p), sz); | ||
196 | if (e == SALRET_OK) | ||
197 | seq_puts(s, p); | ||
198 | kfree(p); | ||
199 | if (e == SALRET_OK || e == SALRET_NOT_IMPLEMENTED) | ||
200 | break; | ||
201 | } | ||
202 | } | ||
203 | |||
204 | static inline int sn_hwperf_has_cpus(cnodeid_t node) | ||
205 | { | ||
206 | return node < MAX_NUMNODES && node_online(node) && nr_cpus_node(node); | ||
207 | } | ||
208 | |||
209 | static inline int sn_hwperf_has_mem(cnodeid_t node) | ||
210 | { | ||
211 | return node < MAX_NUMNODES && node_online(node) && NODE_DATA(node)->node_present_pages; | ||
212 | } | ||
213 | |||
214 | static struct sn_hwperf_object_info * | ||
215 | sn_hwperf_findobj_id(struct sn_hwperf_object_info *objbuf, | ||
216 | int nobj, int id) | ||
217 | { | ||
218 | int i; | ||
219 | struct sn_hwperf_object_info *p = objbuf; | ||
220 | |||
221 | for (i=0; i < nobj; i++, p++) { | ||
222 | if (p->id == id) | ||
223 | return p; | ||
224 | } | ||
225 | |||
226 | return NULL; | ||
227 | |||
228 | } | ||
229 | |||
230 | static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objbuf, | ||
231 | int nobj, cnodeid_t node, cnodeid_t *near_mem_node, cnodeid_t *near_cpu_node) | ||
232 | { | ||
233 | int e; | ||
234 | struct sn_hwperf_object_info *nodeobj = NULL; | ||
235 | struct sn_hwperf_object_info *op; | ||
236 | struct sn_hwperf_object_info *dest; | ||
237 | struct sn_hwperf_object_info *router; | ||
238 | struct sn_hwperf_port_info ptdata[16]; | ||
239 | int sz, i, j; | ||
240 | cnodeid_t c; | ||
241 | int found_mem = 0; | ||
242 | int found_cpu = 0; | ||
243 | |||
244 | if (!cnode_possible(node)) | ||
245 | return -EINVAL; | ||
246 | |||
247 | if (sn_hwperf_has_cpus(node)) { | ||
248 | if (near_cpu_node) | ||
249 | *near_cpu_node = node; | ||
250 | found_cpu++; | ||
251 | } | ||
252 | |||
253 | if (sn_hwperf_has_mem(node)) { | ||
254 | if (near_mem_node) | ||
255 | *near_mem_node = node; | ||
256 | found_mem++; | ||
257 | } | ||
258 | |||
259 | if (found_cpu && found_mem) | ||
260 | return 0; /* trivially successful */ | ||
261 | |||
262 | /* find the argument node object */ | ||
263 | for (i=0, op=objbuf; i < nobj; i++, op++) { | ||
264 | if (!SN_HWPERF_IS_NODE(op) && !SN_HWPERF_IS_IONODE(op)) | ||
265 | continue; | ||
266 | if (node == sn_hwperf_obj_to_cnode(op)) { | ||
267 | nodeobj = op; | ||
268 | break; | ||
269 | } | ||
270 | } | ||
271 | if (!nodeobj) { | ||
272 | e = -ENOENT; | ||
273 | goto err; | ||
274 | } | ||
275 | |||
276 | /* get it's interconnect topology */ | ||
277 | sz = op->ports * sizeof(struct sn_hwperf_port_info); | ||
278 | BUG_ON(sz > sizeof(ptdata)); | ||
279 | e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, | ||
280 | SN_HWPERF_ENUM_PORTS, nodeobj->id, sz, | ||
281 | (u64)&ptdata, 0, 0, NULL); | ||
282 | if (e != SN_HWPERF_OP_OK) { | ||
283 | e = -EINVAL; | ||
284 | goto err; | ||
285 | } | ||
286 | |||
287 | /* find nearest node with cpus and nearest memory */ | ||
288 | for (router=NULL, j=0; j < op->ports; j++) { | ||
289 | dest = sn_hwperf_findobj_id(objbuf, nobj, ptdata[j].conn_id); | ||
290 | if (dest && SN_HWPERF_IS_ROUTER(dest)) | ||
291 | router = dest; | ||
292 | if (!dest || SN_HWPERF_FOREIGN(dest) || | ||
293 | !SN_HWPERF_IS_NODE(dest) || SN_HWPERF_IS_IONODE(dest)) { | ||
294 | continue; | ||
295 | } | ||
296 | c = sn_hwperf_obj_to_cnode(dest); | ||
297 | if (!found_cpu && sn_hwperf_has_cpus(c)) { | ||
298 | if (near_cpu_node) | ||
299 | *near_cpu_node = c; | ||
300 | found_cpu++; | ||
301 | } | ||
302 | if (!found_mem && sn_hwperf_has_mem(c)) { | ||
303 | if (near_mem_node) | ||
304 | *near_mem_node = c; | ||
305 | found_mem++; | ||
306 | } | ||
307 | } | ||
308 | |||
309 | if (router && (!found_cpu || !found_mem)) { | ||
310 | /* search for a node connected to the same router */ | ||
311 | sz = router->ports * sizeof(struct sn_hwperf_port_info); | ||
312 | BUG_ON(sz > sizeof(ptdata)); | ||
313 | e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, | ||
314 | SN_HWPERF_ENUM_PORTS, router->id, sz, | ||
315 | (u64)&ptdata, 0, 0, NULL); | ||
316 | if (e != SN_HWPERF_OP_OK) { | ||
317 | e = -EINVAL; | ||
318 | goto err; | ||
319 | } | ||
320 | for (j=0; j < router->ports; j++) { | ||
321 | dest = sn_hwperf_findobj_id(objbuf, nobj, | ||
322 | ptdata[j].conn_id); | ||
323 | if (!dest || dest->id == node || | ||
324 | SN_HWPERF_FOREIGN(dest) || | ||
325 | !SN_HWPERF_IS_NODE(dest) || | ||
326 | SN_HWPERF_IS_IONODE(dest)) { | ||
327 | continue; | ||
328 | } | ||
329 | c = sn_hwperf_obj_to_cnode(dest); | ||
330 | if (!found_cpu && sn_hwperf_has_cpus(c)) { | ||
331 | if (near_cpu_node) | ||
332 | *near_cpu_node = c; | ||
333 | found_cpu++; | ||
334 | } | ||
335 | if (!found_mem && sn_hwperf_has_mem(c)) { | ||
336 | if (near_mem_node) | ||
337 | *near_mem_node = c; | ||
338 | found_mem++; | ||
339 | } | ||
340 | if (found_cpu && found_mem) | ||
341 | break; | ||
342 | } | ||
343 | } | ||
344 | |||
345 | if (!found_cpu || !found_mem) { | ||
346 | /* resort to _any_ node with CPUs and memory */ | ||
347 | for (i=0, op=objbuf; i < nobj; i++, op++) { | ||
348 | if (SN_HWPERF_FOREIGN(op) || | ||
349 | SN_HWPERF_IS_IONODE(op) || | ||
350 | !SN_HWPERF_IS_NODE(op)) { | ||
351 | continue; | ||
352 | } | ||
353 | c = sn_hwperf_obj_to_cnode(op); | ||
354 | if (!found_cpu && sn_hwperf_has_cpus(c)) { | ||
355 | if (near_cpu_node) | ||
356 | *near_cpu_node = c; | ||
357 | found_cpu++; | ||
358 | } | ||
359 | if (!found_mem && sn_hwperf_has_mem(c)) { | ||
360 | if (near_mem_node) | ||
361 | *near_mem_node = c; | ||
362 | found_mem++; | ||
363 | } | ||
364 | if (found_cpu && found_mem) | ||
365 | break; | ||
366 | } | ||
367 | } | ||
368 | |||
369 | if (!found_cpu || !found_mem) | ||
370 | e = -ENODATA; | ||
371 | |||
372 | err: | ||
373 | return e; | ||
374 | } | ||
375 | |||
376 | |||
377 | static int sn_topology_show(struct seq_file *s, void *d) | ||
378 | { | ||
379 | int sz; | ||
380 | int pt; | ||
381 | int e = 0; | ||
382 | int i; | ||
383 | int j; | ||
384 | const char *slabname; | ||
385 | int ordinal; | ||
386 | char slice; | ||
387 | struct cpuinfo_ia64 *c; | ||
388 | struct sn_hwperf_port_info *ptdata; | ||
389 | struct sn_hwperf_object_info *p; | ||
390 | struct sn_hwperf_object_info *obj = d; /* this object */ | ||
391 | struct sn_hwperf_object_info *objs = s->private; /* all objects */ | ||
392 | u8 shubtype; | ||
393 | u8 system_size; | ||
394 | u8 sharing_size; | ||
395 | u8 partid; | ||
396 | u8 coher; | ||
397 | u8 nasid_shift; | ||
398 | u8 region_size; | ||
399 | u16 nasid_mask; | ||
400 | int nasid_msb; | ||
401 | |||
402 | if (obj == objs) { | ||
403 | seq_printf(s, "# sn_topology version 2\n"); | ||
404 | seq_printf(s, "# objtype ordinal location partition" | ||
405 | " [attribute value [, ...]]\n"); | ||
406 | |||
407 | if (ia64_sn_get_sn_info(0, | ||
408 | &shubtype, &nasid_mask, &nasid_shift, &system_size, | ||
409 | &sharing_size, &partid, &coher, ®ion_size)) | ||
410 | BUG(); | ||
411 | for (nasid_msb=63; nasid_msb > 0; nasid_msb--) { | ||
412 | if (((u64)nasid_mask << nasid_shift) & (1ULL << nasid_msb)) | ||
413 | break; | ||
414 | } | ||
415 | seq_printf(s, "partition %u %s local " | ||
416 | "shubtype %s, " | ||
417 | "nasid_mask 0x%016llx, " | ||
418 | "nasid_bits %d:%d, " | ||
419 | "system_size %d, " | ||
420 | "sharing_size %d, " | ||
421 | "coherency_domain %d, " | ||
422 | "region_size %d\n", | ||
423 | |||
424 | partid, utsname()->nodename, | ||
425 | shubtype ? "shub2" : "shub1", | ||
426 | (u64)nasid_mask << nasid_shift, nasid_msb, nasid_shift, | ||
427 | system_size, sharing_size, coher, region_size); | ||
428 | |||
429 | print_pci_topology(s); | ||
430 | } | ||
431 | |||
432 | if (SN_HWPERF_FOREIGN(obj)) { | ||
433 | /* private in another partition: not interesting */ | ||
434 | return 0; | ||
435 | } | ||
436 | |||
437 | for (i = 0; i < SN_HWPERF_MAXSTRING && obj->name[i]; i++) { | ||
438 | if (obj->name[i] == ' ') | ||
439 | obj->name[i] = '_'; | ||
440 | } | ||
441 | |||
442 | slabname = sn_hwperf_get_slabname(obj, objs, &ordinal); | ||
443 | seq_printf(s, "%s %d %s %s asic %s", slabname, ordinal, obj->location, | ||
444 | obj->sn_hwp_this_part ? "local" : "shared", obj->name); | ||
445 | |||
446 | if (ordinal < 0 || (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj))) | ||
447 | seq_putc(s, '\n'); | ||
448 | else { | ||
449 | cnodeid_t near_mem = -1; | ||
450 | cnodeid_t near_cpu = -1; | ||
451 | |||
452 | seq_printf(s, ", nasid 0x%x", cnodeid_to_nasid(ordinal)); | ||
453 | |||
454 | if (sn_hwperf_get_nearest_node_objdata(objs, sn_hwperf_obj_cnt, | ||
455 | ordinal, &near_mem, &near_cpu) == 0) { | ||
456 | seq_printf(s, ", near_mem_nodeid %d, near_cpu_nodeid %d", | ||
457 | near_mem, near_cpu); | ||
458 | } | ||
459 | |||
460 | if (!SN_HWPERF_IS_IONODE(obj)) { | ||
461 | for_each_online_node(i) { | ||
462 | seq_printf(s, i ? ":%d" : ", dist %d", | ||
463 | node_distance(ordinal, i)); | ||
464 | } | ||
465 | } | ||
466 | |||
467 | seq_putc(s, '\n'); | ||
468 | |||
469 | /* | ||
470 | * CPUs on this node, if any | ||
471 | */ | ||
472 | if (!SN_HWPERF_IS_IONODE(obj)) { | ||
473 | for_each_cpu_and(i, cpu_online_mask, | ||
474 | cpumask_of_node(ordinal)) { | ||
475 | slice = 'a' + cpuid_to_slice(i); | ||
476 | c = cpu_data(i); | ||
477 | seq_printf(s, "cpu %d %s%c local" | ||
478 | " freq %luMHz, arch ia64", | ||
479 | i, obj->location, slice, | ||
480 | c->proc_freq / 1000000); | ||
481 | for_each_online_cpu(j) { | ||
482 | seq_printf(s, j ? ":%d" : ", dist %d", | ||
483 | node_distance( | ||
484 | cpu_to_node(i), | ||
485 | cpu_to_node(j))); | ||
486 | } | ||
487 | seq_putc(s, '\n'); | ||
488 | } | ||
489 | } | ||
490 | } | ||
491 | |||
492 | if (obj->ports) { | ||
493 | /* | ||
494 | * numalink ports | ||
495 | */ | ||
496 | sz = obj->ports * sizeof(struct sn_hwperf_port_info); | ||
497 | if ((ptdata = kmalloc(sz, GFP_KERNEL)) == NULL) | ||
498 | return -ENOMEM; | ||
499 | e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, | ||
500 | SN_HWPERF_ENUM_PORTS, obj->id, sz, | ||
501 | (u64) ptdata, 0, 0, NULL); | ||
502 | if (e != SN_HWPERF_OP_OK) | ||
503 | return -EINVAL; | ||
504 | for (ordinal=0, p=objs; p != obj; p++) { | ||
505 | if (!SN_HWPERF_FOREIGN(p)) | ||
506 | ordinal += p->ports; | ||
507 | } | ||
508 | for (pt = 0; pt < obj->ports; pt++) { | ||
509 | for (p = objs, i = 0; i < sn_hwperf_obj_cnt; i++, p++) { | ||
510 | if (ptdata[pt].conn_id == p->id) { | ||
511 | break; | ||
512 | } | ||
513 | } | ||
514 | seq_printf(s, "numalink %d %s-%d", | ||
515 | ordinal+pt, obj->location, ptdata[pt].port); | ||
516 | |||
517 | if (i >= sn_hwperf_obj_cnt) { | ||
518 | /* no connection */ | ||
519 | seq_puts(s, " local endpoint disconnected" | ||
520 | ", protocol unknown\n"); | ||
521 | continue; | ||
522 | } | ||
523 | |||
524 | if (obj->sn_hwp_this_part && p->sn_hwp_this_part) | ||
525 | /* both ends local to this partition */ | ||
526 | seq_puts(s, " local"); | ||
527 | else if (SN_HWPERF_FOREIGN(p)) | ||
528 | /* both ends of the link in foreign partition */ | ||
529 | seq_puts(s, " foreign"); | ||
530 | else | ||
531 | /* link straddles a partition */ | ||
532 | seq_puts(s, " shared"); | ||
533 | |||
534 | /* | ||
535 | * Unlikely, but strictly should query the LLP config | ||
536 | * registers because an NL4R can be configured to run | ||
537 | * NL3 protocol, even when not talking to an NL3 router. | ||
538 | * Ditto for node-node. | ||
539 | */ | ||
540 | seq_printf(s, " endpoint %s-%d, protocol %s\n", | ||
541 | p->location, ptdata[pt].conn_port, | ||
542 | (SN_HWPERF_IS_NL3ROUTER(obj) || | ||
543 | SN_HWPERF_IS_NL3ROUTER(p)) ? "LLP3" : "LLP4"); | ||
544 | } | ||
545 | kfree(ptdata); | ||
546 | } | ||
547 | |||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | static void *sn_topology_start(struct seq_file *s, loff_t * pos) | ||
552 | { | ||
553 | struct sn_hwperf_object_info *objs = s->private; | ||
554 | |||
555 | if (*pos < sn_hwperf_obj_cnt) | ||
556 | return (void *)(objs + *pos); | ||
557 | |||
558 | return NULL; | ||
559 | } | ||
560 | |||
561 | static void *sn_topology_next(struct seq_file *s, void *v, loff_t * pos) | ||
562 | { | ||
563 | ++*pos; | ||
564 | return sn_topology_start(s, pos); | ||
565 | } | ||
566 | |||
567 | static void sn_topology_stop(struct seq_file *m, void *v) | ||
568 | { | ||
569 | return; | ||
570 | } | ||
571 | |||
572 | /* | ||
573 | * /proc/sgi_sn/sn_topology, read-only using seq_file | ||
574 | */ | ||
575 | static const struct seq_operations sn_topology_seq_ops = { | ||
576 | .start = sn_topology_start, | ||
577 | .next = sn_topology_next, | ||
578 | .stop = sn_topology_stop, | ||
579 | .show = sn_topology_show | ||
580 | }; | ||
581 | |||
582 | struct sn_hwperf_op_info { | ||
583 | u64 op; | ||
584 | struct sn_hwperf_ioctl_args *a; | ||
585 | void *p; | ||
586 | int *v0; | ||
587 | int ret; | ||
588 | }; | ||
589 | |||
590 | static void sn_hwperf_call_sal(void *info) | ||
591 | { | ||
592 | struct sn_hwperf_op_info *op_info = info; | ||
593 | int r; | ||
594 | |||
595 | r = ia64_sn_hwperf_op(sn_hwperf_master_nasid, op_info->op, | ||
596 | op_info->a->arg, op_info->a->sz, | ||
597 | (u64) op_info->p, 0, 0, op_info->v0); | ||
598 | op_info->ret = r; | ||
599 | } | ||
600 | |||
601 | static long sn_hwperf_call_sal_work(void *info) | ||
602 | { | ||
603 | sn_hwperf_call_sal(info); | ||
604 | return 0; | ||
605 | } | ||
606 | |||
607 | static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info) | ||
608 | { | ||
609 | u32 cpu; | ||
610 | u32 use_ipi; | ||
611 | int r = 0; | ||
612 | |||
613 | cpu = (op_info->a->arg & SN_HWPERF_ARG_CPU_MASK) >> 32; | ||
614 | use_ipi = op_info->a->arg & SN_HWPERF_ARG_USE_IPI_MASK; | ||
615 | op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK; | ||
616 | |||
617 | if (cpu != SN_HWPERF_ARG_ANY_CPU) { | ||
618 | if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { | ||
619 | r = -EINVAL; | ||
620 | goto out; | ||
621 | } | ||
622 | } | ||
623 | |||
624 | if (cpu == SN_HWPERF_ARG_ANY_CPU) { | ||
625 | /* don't care which cpu */ | ||
626 | sn_hwperf_call_sal(op_info); | ||
627 | } else if (cpu == get_cpu()) { | ||
628 | /* already on correct cpu */ | ||
629 | sn_hwperf_call_sal(op_info); | ||
630 | put_cpu(); | ||
631 | } else { | ||
632 | put_cpu(); | ||
633 | if (use_ipi) { | ||
634 | /* use an interprocessor interrupt to call SAL */ | ||
635 | smp_call_function_single(cpu, sn_hwperf_call_sal, | ||
636 | op_info, 1); | ||
637 | } else { | ||
638 | /* Call on the target CPU */ | ||
639 | work_on_cpu_safe(cpu, sn_hwperf_call_sal_work, op_info); | ||
640 | } | ||
641 | } | ||
642 | r = op_info->ret; | ||
643 | |||
644 | out: | ||
645 | return r; | ||
646 | } | ||
647 | |||
648 | /* map SAL hwperf error code to system error code */ | ||
649 | static int sn_hwperf_map_err(int hwperf_err) | ||
650 | { | ||
651 | int e; | ||
652 | |||
653 | switch(hwperf_err) { | ||
654 | case SN_HWPERF_OP_OK: | ||
655 | e = 0; | ||
656 | break; | ||
657 | |||
658 | case SN_HWPERF_OP_NOMEM: | ||
659 | e = -ENOMEM; | ||
660 | break; | ||
661 | |||
662 | case SN_HWPERF_OP_NO_PERM: | ||
663 | e = -EPERM; | ||
664 | break; | ||
665 | |||
666 | case SN_HWPERF_OP_IO_ERROR: | ||
667 | e = -EIO; | ||
668 | break; | ||
669 | |||
670 | case SN_HWPERF_OP_BUSY: | ||
671 | e = -EBUSY; | ||
672 | break; | ||
673 | |||
674 | case SN_HWPERF_OP_RECONFIGURE: | ||
675 | e = -EAGAIN; | ||
676 | break; | ||
677 | |||
678 | case SN_HWPERF_OP_INVAL: | ||
679 | default: | ||
680 | e = -EINVAL; | ||
681 | break; | ||
682 | } | ||
683 | |||
684 | return e; | ||
685 | } | ||
686 | |||
687 | /* | ||
688 | * ioctl for "sn_hwperf" misc device | ||
689 | */ | ||
690 | static long sn_hwperf_ioctl(struct file *fp, u32 op, unsigned long arg) | ||
691 | { | ||
692 | struct sn_hwperf_ioctl_args a; | ||
693 | struct cpuinfo_ia64 *cdata; | ||
694 | struct sn_hwperf_object_info *objs; | ||
695 | struct sn_hwperf_object_info *cpuobj; | ||
696 | struct sn_hwperf_op_info op_info; | ||
697 | void *p = NULL; | ||
698 | int nobj; | ||
699 | char slice; | ||
700 | int node; | ||
701 | int r; | ||
702 | int v0; | ||
703 | int i; | ||
704 | int j; | ||
705 | |||
706 | /* only user requests are allowed here */ | ||
707 | if ((op & SN_HWPERF_OP_MASK) < 10) { | ||
708 | r = -EINVAL; | ||
709 | goto error; | ||
710 | } | ||
711 | r = copy_from_user(&a, (const void __user *)arg, | ||
712 | sizeof(struct sn_hwperf_ioctl_args)); | ||
713 | if (r != 0) { | ||
714 | r = -EFAULT; | ||
715 | goto error; | ||
716 | } | ||
717 | |||
718 | /* | ||
719 | * Allocate memory to hold a kernel copy of the user buffer. The | ||
720 | * buffer contents are either copied in or out (or both) of user | ||
721 | * space depending on the flags encoded in the requested operation. | ||
722 | */ | ||
723 | if (a.ptr) { | ||
724 | p = vmalloc(a.sz); | ||
725 | if (!p) { | ||
726 | r = -ENOMEM; | ||
727 | goto error; | ||
728 | } | ||
729 | } | ||
730 | |||
731 | if (op & SN_HWPERF_OP_MEM_COPYIN) { | ||
732 | r = copy_from_user(p, (const void __user *)a.ptr, a.sz); | ||
733 | if (r != 0) { | ||
734 | r = -EFAULT; | ||
735 | goto error; | ||
736 | } | ||
737 | } | ||
738 | |||
739 | switch (op) { | ||
740 | case SN_HWPERF_GET_CPU_INFO: | ||
741 | if (a.sz == sizeof(u64)) { | ||
742 | /* special case to get size needed */ | ||
743 | *(u64 *) p = (u64) num_online_cpus() * | ||
744 | sizeof(struct sn_hwperf_object_info); | ||
745 | } else | ||
746 | if (a.sz < num_online_cpus() * sizeof(struct sn_hwperf_object_info)) { | ||
747 | r = -ENOMEM; | ||
748 | goto error; | ||
749 | } else | ||
750 | if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) { | ||
751 | int cpuobj_index = 0; | ||
752 | |||
753 | memset(p, 0, a.sz); | ||
754 | for (i = 0; i < nobj; i++) { | ||
755 | if (!SN_HWPERF_IS_NODE(objs + i)) | ||
756 | continue; | ||
757 | node = sn_hwperf_obj_to_cnode(objs + i); | ||
758 | for_each_online_cpu(j) { | ||
759 | if (node != cpu_to_node(j)) | ||
760 | continue; | ||
761 | cpuobj = (struct sn_hwperf_object_info *) p + cpuobj_index++; | ||
762 | slice = 'a' + cpuid_to_slice(j); | ||
763 | cdata = cpu_data(j); | ||
764 | cpuobj->id = j; | ||
765 | snprintf(cpuobj->name, | ||
766 | sizeof(cpuobj->name), | ||
767 | "CPU %luMHz %s", | ||
768 | cdata->proc_freq / 1000000, | ||
769 | cdata->vendor); | ||
770 | snprintf(cpuobj->location, | ||
771 | sizeof(cpuobj->location), | ||
772 | "%s%c", objs[i].location, | ||
773 | slice); | ||
774 | } | ||
775 | } | ||
776 | |||
777 | vfree(objs); | ||
778 | } | ||
779 | break; | ||
780 | |||
781 | case SN_HWPERF_GET_NODE_NASID: | ||
782 | if (a.sz != sizeof(u64) || | ||
783 | (node = a.arg) < 0 || !cnode_possible(node)) { | ||
784 | r = -EINVAL; | ||
785 | goto error; | ||
786 | } | ||
787 | *(u64 *)p = (u64)cnodeid_to_nasid(node); | ||
788 | break; | ||
789 | |||
790 | case SN_HWPERF_GET_OBJ_NODE: | ||
791 | i = a.arg; | ||
792 | if (a.sz != sizeof(u64) || i < 0) { | ||
793 | r = -EINVAL; | ||
794 | goto error; | ||
795 | } | ||
796 | if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) { | ||
797 | if (i >= nobj) { | ||
798 | r = -EINVAL; | ||
799 | vfree(objs); | ||
800 | goto error; | ||
801 | } | ||
802 | if (objs[i].id != a.arg) { | ||
803 | for (i = 0; i < nobj; i++) { | ||
804 | if (objs[i].id == a.arg) | ||
805 | break; | ||
806 | } | ||
807 | } | ||
808 | if (i == nobj) { | ||
809 | r = -EINVAL; | ||
810 | vfree(objs); | ||
811 | goto error; | ||
812 | } | ||
813 | |||
814 | if (!SN_HWPERF_IS_NODE(objs + i) && | ||
815 | !SN_HWPERF_IS_IONODE(objs + i)) { | ||
816 | r = -ENOENT; | ||
817 | vfree(objs); | ||
818 | goto error; | ||
819 | } | ||
820 | |||
821 | *(u64 *)p = (u64)sn_hwperf_obj_to_cnode(objs + i); | ||
822 | vfree(objs); | ||
823 | } | ||
824 | break; | ||
825 | |||
826 | case SN_HWPERF_GET_MMRS: | ||
827 | case SN_HWPERF_SET_MMRS: | ||
828 | case SN_HWPERF_OBJECT_DISTANCE: | ||
829 | op_info.p = p; | ||
830 | op_info.a = &a; | ||
831 | op_info.v0 = &v0; | ||
832 | op_info.op = op; | ||
833 | r = sn_hwperf_op_cpu(&op_info); | ||
834 | if (r) { | ||
835 | r = sn_hwperf_map_err(r); | ||
836 | a.v0 = v0; | ||
837 | goto error; | ||
838 | } | ||
839 | break; | ||
840 | |||
841 | default: | ||
842 | /* all other ops are a direct SAL call */ | ||
843 | r = ia64_sn_hwperf_op(sn_hwperf_master_nasid, op, | ||
844 | a.arg, a.sz, (u64) p, 0, 0, &v0); | ||
845 | if (r) { | ||
846 | r = sn_hwperf_map_err(r); | ||
847 | goto error; | ||
848 | } | ||
849 | a.v0 = v0; | ||
850 | break; | ||
851 | } | ||
852 | |||
853 | if (op & SN_HWPERF_OP_MEM_COPYOUT) { | ||
854 | r = copy_to_user((void __user *)a.ptr, p, a.sz); | ||
855 | if (r != 0) { | ||
856 | r = -EFAULT; | ||
857 | goto error; | ||
858 | } | ||
859 | } | ||
860 | |||
861 | error: | ||
862 | vfree(p); | ||
863 | |||
864 | return r; | ||
865 | } | ||
866 | |||
867 | static const struct file_operations sn_hwperf_fops = { | ||
868 | .unlocked_ioctl = sn_hwperf_ioctl, | ||
869 | .llseek = noop_llseek, | ||
870 | }; | ||
871 | |||
872 | static struct miscdevice sn_hwperf_dev = { | ||
873 | MISC_DYNAMIC_MINOR, | ||
874 | "sn_hwperf", | ||
875 | &sn_hwperf_fops | ||
876 | }; | ||
877 | |||
878 | static int sn_hwperf_init(void) | ||
879 | { | ||
880 | u64 v; | ||
881 | int salr; | ||
882 | int e = 0; | ||
883 | |||
884 | /* single threaded, once-only initialization */ | ||
885 | mutex_lock(&sn_hwperf_init_mutex); | ||
886 | |||
887 | if (sn_hwperf_salheap) { | ||
888 | mutex_unlock(&sn_hwperf_init_mutex); | ||
889 | return e; | ||
890 | } | ||
891 | |||
892 | /* | ||
893 | * The PROM code needs a fixed reference node. For convenience the | ||
894 | * same node as the console I/O is used. | ||
895 | */ | ||
896 | sn_hwperf_master_nasid = (nasid_t) ia64_sn_get_console_nasid(); | ||
897 | |||
898 | /* | ||
899 | * Request the needed size and install the PROM scratch area. | ||
900 | * The PROM keeps various tracking bits in this memory area. | ||
901 | */ | ||
902 | salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid, | ||
903 | (u64) SN_HWPERF_GET_HEAPSIZE, 0, | ||
904 | (u64) sizeof(u64), (u64) &v, 0, 0, NULL); | ||
905 | if (salr != SN_HWPERF_OP_OK) { | ||
906 | e = -EINVAL; | ||
907 | goto out; | ||
908 | } | ||
909 | |||
910 | if ((sn_hwperf_salheap = vmalloc(v)) == NULL) { | ||
911 | e = -ENOMEM; | ||
912 | goto out; | ||
913 | } | ||
914 | salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid, | ||
915 | SN_HWPERF_INSTALL_HEAP, 0, v, | ||
916 | (u64) sn_hwperf_salheap, 0, 0, NULL); | ||
917 | if (salr != SN_HWPERF_OP_OK) { | ||
918 | e = -EINVAL; | ||
919 | goto out; | ||
920 | } | ||
921 | |||
922 | salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid, | ||
923 | SN_HWPERF_OBJECT_COUNT, 0, | ||
924 | sizeof(u64), (u64) &v, 0, 0, NULL); | ||
925 | if (salr != SN_HWPERF_OP_OK) { | ||
926 | e = -EINVAL; | ||
927 | goto out; | ||
928 | } | ||
929 | sn_hwperf_obj_cnt = (int)v; | ||
930 | |||
931 | out: | ||
932 | if (e < 0 && sn_hwperf_salheap) { | ||
933 | vfree(sn_hwperf_salheap); | ||
934 | sn_hwperf_salheap = NULL; | ||
935 | sn_hwperf_obj_cnt = 0; | ||
936 | } | ||
937 | mutex_unlock(&sn_hwperf_init_mutex); | ||
938 | return e; | ||
939 | } | ||
940 | |||
941 | int sn_topology_open(struct inode *inode, struct file *file) | ||
942 | { | ||
943 | int e; | ||
944 | struct seq_file *seq; | ||
945 | struct sn_hwperf_object_info *objbuf; | ||
946 | int nobj; | ||
947 | |||
948 | if ((e = sn_hwperf_enum_objects(&nobj, &objbuf)) == 0) { | ||
949 | e = seq_open(file, &sn_topology_seq_ops); | ||
950 | seq = file->private_data; | ||
951 | seq->private = objbuf; | ||
952 | } | ||
953 | |||
954 | return e; | ||
955 | } | ||
956 | |||
957 | int sn_topology_release(struct inode *inode, struct file *file) | ||
958 | { | ||
959 | struct seq_file *seq = file->private_data; | ||
960 | |||
961 | vfree(seq->private); | ||
962 | return seq_release(inode, file); | ||
963 | } | ||
964 | |||
965 | int sn_hwperf_get_nearest_node(cnodeid_t node, | ||
966 | cnodeid_t *near_mem_node, cnodeid_t *near_cpu_node) | ||
967 | { | ||
968 | int e; | ||
969 | int nobj; | ||
970 | struct sn_hwperf_object_info *objbuf; | ||
971 | |||
972 | if ((e = sn_hwperf_enum_objects(&nobj, &objbuf)) == 0) { | ||
973 | e = sn_hwperf_get_nearest_node_objdata(objbuf, nobj, | ||
974 | node, near_mem_node, near_cpu_node); | ||
975 | vfree(objbuf); | ||
976 | } | ||
977 | |||
978 | return e; | ||
979 | } | ||
980 | |||
981 | static int sn_hwperf_misc_register_init(void) | ||
982 | { | ||
983 | int e; | ||
984 | |||
985 | if (!ia64_platform_is("sn2")) | ||
986 | return 0; | ||
987 | |||
988 | sn_hwperf_init(); | ||
989 | |||
990 | /* | ||
991 | * Register a dynamic misc device for hwperf ioctls. Platforms | ||
992 | * supporting hotplug will create /dev/sn_hwperf, else user | ||
993 | * can to look up the minor number in /proc/misc. | ||
994 | */ | ||
995 | if ((e = misc_register(&sn_hwperf_dev)) != 0) { | ||
996 | printk(KERN_ERR "sn_hwperf_misc_register_init: failed to " | ||
997 | "register misc device for \"%s\"\n", sn_hwperf_dev.name); | ||
998 | } | ||
999 | |||
1000 | return e; | ||
1001 | } | ||
1002 | |||
1003 | device_initcall(sn_hwperf_misc_register_init); /* after misc_init() */ | ||
1004 | EXPORT_SYMBOL(sn_hwperf_get_nearest_node); | ||
diff --git a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c deleted file mode 100644 index c2a4d84297b0..000000000000 --- a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c +++ /dev/null | |||
@@ -1,69 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #ifdef CONFIG_PROC_FS | ||
10 | #include <linux/proc_fs.h> | ||
11 | #include <linux/seq_file.h> | ||
12 | #include <linux/uaccess.h> | ||
13 | #include <asm/sn/sn_sal.h> | ||
14 | |||
15 | static int partition_id_show(struct seq_file *s, void *p) | ||
16 | { | ||
17 | seq_printf(s, "%d\n", sn_partition_id); | ||
18 | return 0; | ||
19 | } | ||
20 | |||
21 | static int system_serial_number_show(struct seq_file *s, void *p) | ||
22 | { | ||
23 | seq_printf(s, "%s\n", sn_system_serial_number()); | ||
24 | return 0; | ||
25 | } | ||
26 | |||
27 | static int licenseID_show(struct seq_file *s, void *p) | ||
28 | { | ||
29 | seq_printf(s, "0x%llx\n", sn_partition_serial_number_val()); | ||
30 | return 0; | ||
31 | } | ||
32 | |||
33 | static int coherence_id_show(struct seq_file *s, void *p) | ||
34 | { | ||
35 | seq_printf(s, "%d\n", partition_coherence_id()); | ||
36 | |||
37 | return 0; | ||
38 | } | ||
39 | |||
40 | /* /proc/sgi_sn/sn_topology uses seq_file, see sn_hwperf.c */ | ||
41 | extern int sn_topology_open(struct inode *, struct file *); | ||
42 | extern int sn_topology_release(struct inode *, struct file *); | ||
43 | |||
44 | static const struct file_operations proc_sn_topo_fops = { | ||
45 | .open = sn_topology_open, | ||
46 | .read = seq_read, | ||
47 | .llseek = seq_lseek, | ||
48 | .release = sn_topology_release, | ||
49 | }; | ||
50 | |||
51 | void register_sn_procfs(void) | ||
52 | { | ||
53 | static struct proc_dir_entry *sgi_proc_dir = NULL; | ||
54 | |||
55 | BUG_ON(sgi_proc_dir != NULL); | ||
56 | if (!(sgi_proc_dir = proc_mkdir("sgi_sn", NULL))) | ||
57 | return; | ||
58 | |||
59 | proc_create_single("partition_id", 0444, sgi_proc_dir, | ||
60 | partition_id_show); | ||
61 | proc_create_single("system_serial_number", 0444, sgi_proc_dir, | ||
62 | system_serial_number_show); | ||
63 | proc_create_single("licenseID", 0444, sgi_proc_dir, licenseID_show); | ||
64 | proc_create_single("coherence_id", 0444, sgi_proc_dir, | ||
65 | coherence_id_show); | ||
66 | proc_create("sn_topology", 0444, sgi_proc_dir, &proc_sn_topo_fops); | ||
67 | } | ||
68 | |||
69 | #endif /* CONFIG_PROC_FS */ | ||
diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c deleted file mode 100644 index 3009d9d86f29..000000000000 --- a/arch/ia64/sn/kernel/sn2/timer.c +++ /dev/null | |||
@@ -1,61 +0,0 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * linux/arch/ia64/sn/kernel/sn2/timer.c | ||
4 | * | ||
5 | * Copyright (C) 2003 Silicon Graphics, Inc. | ||
6 | * Copyright (C) 2003 Hewlett-Packard Co | ||
7 | * David Mosberger <davidm@hpl.hp.com>: updated for new timer-interpolation infrastructure | ||
8 | */ | ||
9 | |||
10 | #include <linux/init.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/time.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/clocksource.h> | ||
16 | |||
17 | #include <asm/hw_irq.h> | ||
18 | #include <asm/timex.h> | ||
19 | |||
20 | #include <asm/sn/leds.h> | ||
21 | #include <asm/sn/shub_mmr.h> | ||
22 | #include <asm/sn/clksupport.h> | ||
23 | |||
24 | extern unsigned long sn_rtc_cycles_per_second; | ||
25 | |||
26 | static u64 read_sn2(struct clocksource *cs) | ||
27 | { | ||
28 | return (u64)readq(RTC_COUNTER_ADDR); | ||
29 | } | ||
30 | |||
31 | static struct clocksource clocksource_sn2 = { | ||
32 | .name = "sn2_rtc", | ||
33 | .rating = 450, | ||
34 | .read = read_sn2, | ||
35 | .mask = (1LL << 55) - 1, | ||
36 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
37 | }; | ||
38 | |||
39 | /* | ||
40 | * sn udelay uses the RTC instead of the ITC because the ITC is not | ||
41 | * synchronized across all CPUs, and the thread may migrate to another CPU | ||
42 | * if preemption is enabled. | ||
43 | */ | ||
44 | static void | ||
45 | ia64_sn_udelay (unsigned long usecs) | ||
46 | { | ||
47 | unsigned long start = rtc_time(); | ||
48 | unsigned long end = start + | ||
49 | usecs * sn_rtc_cycles_per_second / 1000000; | ||
50 | |||
51 | while (time_before((unsigned long)rtc_time(), end)) | ||
52 | cpu_relax(); | ||
53 | } | ||
54 | |||
55 | void __init sn_timer_init(void) | ||
56 | { | ||
57 | clocksource_sn2.archdata.fsys_mmio = RTC_COUNTER_ADDR; | ||
58 | clocksource_register_hz(&clocksource_sn2, sn_rtc_cycles_per_second); | ||
59 | |||
60 | ia64_udelay = &ia64_sn_udelay; | ||
61 | } | ||
diff --git a/arch/ia64/sn/kernel/sn2/timer_interrupt.c b/arch/ia64/sn/kernel/sn2/timer_interrupt.c deleted file mode 100644 index 103d6ea8e94b..000000000000 --- a/arch/ia64/sn/kernel/sn2/timer_interrupt.c +++ /dev/null | |||
@@ -1,60 +0,0 @@ | |||
1 | /* | ||
2 | * | ||
3 | * | ||
4 | * Copyright (c) 2005, 2006 Silicon Graphics, Inc. All Rights Reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of version 2 of the GNU General Public License | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it would be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
13 | * | ||
14 | * Further, this software is distributed without any warranty that it is | ||
15 | * free of the rightful claim of any third person regarding infringement | ||
16 | * or the like. Any license provided herein, whether implied or | ||
17 | * otherwise, applies only to this software file. Patent licenses, if | ||
18 | * any, provided herein do not apply to combinations of this program with | ||
19 | * other software, or any other product whatsoever. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public | ||
22 | * License along with this program; if not, write the Free Software | ||
23 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. | ||
24 | * | ||
25 | * For further information regarding this notice, see: | ||
26 | * | ||
27 | * http://oss.sgi.com/projects/GenInfo/NoticeExplan | ||
28 | */ | ||
29 | |||
30 | #include <linux/interrupt.h> | ||
31 | #include <asm/sn/pda.h> | ||
32 | #include <asm/sn/leds.h> | ||
33 | |||
34 | extern void sn_lb_int_war_check(void); | ||
35 | extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs); | ||
36 | |||
37 | #define SN_LB_INT_WAR_INTERVAL 100 | ||
38 | |||
39 | void sn_timer_interrupt(int irq, void *dev_id) | ||
40 | { | ||
41 | /* LED blinking */ | ||
42 | if (!pda->hb_count--) { | ||
43 | pda->hb_count = HZ / 2; | ||
44 | set_led_bits(pda->hb_state ^= | ||
45 | LED_CPU_HEARTBEAT, LED_CPU_HEARTBEAT); | ||
46 | } | ||
47 | |||
48 | if (is_shub1()) { | ||
49 | if (enable_shub_wars_1_1()) { | ||
50 | /* Bugfix code for SHUB 1.1 */ | ||
51 | if (pda->pio_shub_war_cam_addr) | ||
52 | *pda->pio_shub_war_cam_addr = 0x8000000000000010UL; | ||
53 | } | ||
54 | if (pda->sn_lb_int_war_ticks == 0) | ||
55 | sn_lb_int_war_check(); | ||
56 | pda->sn_lb_int_war_ticks++; | ||
57 | if (pda->sn_lb_int_war_ticks >= SN_LB_INT_WAR_INTERVAL) | ||
58 | pda->sn_lb_int_war_ticks = 0; | ||
59 | } | ||
60 | } | ||
diff --git a/arch/ia64/sn/pci/Makefile b/arch/ia64/sn/pci/Makefile deleted file mode 100644 index 321576b1b425..000000000000 --- a/arch/ia64/sn/pci/Makefile +++ /dev/null | |||
@@ -1,10 +0,0 @@ | |||
1 | # | ||
2 | # This file is subject to the terms and conditions of the GNU General Public | ||
3 | # License. See the file "COPYING" in the main directory of this archive | ||
4 | # for more details. | ||
5 | # | ||
6 | # Copyright (C) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | # | ||
8 | # Makefile for the sn pci general routines. | ||
9 | |||
10 | obj-y := pci_dma.o tioca_provider.o tioce_provider.o pcibr/ | ||
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c deleted file mode 100644 index b7d42e4edc1f..000000000000 --- a/arch/ia64/sn/pci/pci_dma.c +++ /dev/null | |||
@@ -1,446 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2000,2002-2005 Silicon Graphics, Inc. All rights reserved. | ||
7 | * | ||
8 | * Routines for PCI DMA mapping. See Documentation/DMA-API.txt for | ||
9 | * a description of how these routines should be used. | ||
10 | */ | ||
11 | |||
12 | #include <linux/gfp.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/dma-mapping.h> | ||
15 | #include <asm/dma.h> | ||
16 | #include <asm/sn/intr.h> | ||
17 | #include <asm/sn/pcibus_provider_defs.h> | ||
18 | #include <asm/sn/pcidev.h> | ||
19 | #include <asm/sn/sn_sal.h> | ||
20 | |||
21 | #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) | ||
22 | #define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) | ||
23 | |||
24 | /** | ||
25 | * sn_dma_supported - test a DMA mask | ||
26 | * @dev: device to test | ||
27 | * @mask: DMA mask to test | ||
28 | * | ||
29 | * Return whether the given PCI device DMA address mask can be supported | ||
30 | * properly. For example, if your device can only drive the low 24-bits | ||
31 | * during PCI bus mastering, then you would pass 0x00ffffff as the mask to | ||
32 | * this function. Of course, SN only supports devices that have 32 or more | ||
33 | * address bits when using the PMU. | ||
34 | */ | ||
35 | static int sn_dma_supported(struct device *dev, u64 mask) | ||
36 | { | ||
37 | BUG_ON(!dev_is_pci(dev)); | ||
38 | |||
39 | if (mask < 0x7fffffff) | ||
40 | return 0; | ||
41 | return 1; | ||
42 | } | ||
43 | |||
44 | /** | ||
45 | * sn_dma_set_mask - set the DMA mask | ||
46 | * @dev: device to set | ||
47 | * @dma_mask: new mask | ||
48 | * | ||
49 | * Set @dev's DMA mask if the hw supports it. | ||
50 | */ | ||
51 | int sn_dma_set_mask(struct device *dev, u64 dma_mask) | ||
52 | { | ||
53 | BUG_ON(!dev_is_pci(dev)); | ||
54 | |||
55 | if (!sn_dma_supported(dev, dma_mask)) | ||
56 | return 0; | ||
57 | |||
58 | *dev->dma_mask = dma_mask; | ||
59 | return 1; | ||
60 | } | ||
61 | EXPORT_SYMBOL(sn_dma_set_mask); | ||
62 | |||
63 | /** | ||
64 | * sn_dma_alloc_coherent - allocate memory for coherent DMA | ||
65 | * @dev: device to allocate for | ||
66 | * @size: size of the region | ||
67 | * @dma_handle: DMA (bus) address | ||
68 | * @flags: memory allocation flags | ||
69 | * | ||
70 | * dma_alloc_coherent() returns a pointer to a memory region suitable for | ||
71 | * coherent DMA traffic to/from a PCI device. On SN platforms, this means | ||
72 | * that @dma_handle will have the %PCIIO_DMA_CMD flag set. | ||
73 | * | ||
74 | * This interface is usually used for "command" streams (e.g. the command | ||
75 | * queue for a SCSI controller). See Documentation/DMA-API.txt for | ||
76 | * more information. | ||
77 | */ | ||
78 | static void *sn_dma_alloc_coherent(struct device *dev, size_t size, | ||
79 | dma_addr_t * dma_handle, gfp_t flags, | ||
80 | unsigned long attrs) | ||
81 | { | ||
82 | void *cpuaddr; | ||
83 | unsigned long phys_addr; | ||
84 | int node; | ||
85 | struct pci_dev *pdev = to_pci_dev(dev); | ||
86 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | ||
87 | |||
88 | BUG_ON(!dev_is_pci(dev)); | ||
89 | |||
90 | /* | ||
91 | * Allocate the memory. | ||
92 | */ | ||
93 | node = pcibus_to_node(pdev->bus); | ||
94 | if (likely(node >=0)) { | ||
95 | struct page *p = __alloc_pages_node(node, | ||
96 | flags, get_order(size)); | ||
97 | |||
98 | if (likely(p)) | ||
99 | cpuaddr = page_address(p); | ||
100 | else | ||
101 | return NULL; | ||
102 | } else | ||
103 | cpuaddr = (void *)__get_free_pages(flags, get_order(size)); | ||
104 | |||
105 | if (unlikely(!cpuaddr)) | ||
106 | return NULL; | ||
107 | |||
108 | memset(cpuaddr, 0x0, size); | ||
109 | |||
110 | /* physical addr. of the memory we just got */ | ||
111 | phys_addr = __pa(cpuaddr); | ||
112 | |||
113 | /* | ||
114 | * 64 bit address translations should never fail. | ||
115 | * 32 bit translations can fail if there are insufficient mapping | ||
116 | * resources. | ||
117 | */ | ||
118 | |||
119 | *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size, | ||
120 | SN_DMA_ADDR_PHYS); | ||
121 | if (!*dma_handle) { | ||
122 | printk(KERN_ERR "%s: out of ATEs\n", __func__); | ||
123 | free_pages((unsigned long)cpuaddr, get_order(size)); | ||
124 | return NULL; | ||
125 | } | ||
126 | |||
127 | return cpuaddr; | ||
128 | } | ||
129 | |||
130 | /** | ||
131 | * sn_pci_free_coherent - free memory associated with coherent DMAable region | ||
132 | * @dev: device to free for | ||
133 | * @size: size to free | ||
134 | * @cpu_addr: kernel virtual address to free | ||
135 | * @dma_handle: DMA address associated with this region | ||
136 | * | ||
137 | * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping | ||
138 | * any associated IOMMU mappings. | ||
139 | */ | ||
140 | static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | ||
141 | dma_addr_t dma_handle, unsigned long attrs) | ||
142 | { | ||
143 | struct pci_dev *pdev = to_pci_dev(dev); | ||
144 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | ||
145 | |||
146 | BUG_ON(!dev_is_pci(dev)); | ||
147 | |||
148 | provider->dma_unmap(pdev, dma_handle, 0); | ||
149 | free_pages((unsigned long)cpu_addr, get_order(size)); | ||
150 | } | ||
151 | |||
152 | /** | ||
153 | * sn_dma_map_single_attrs - map a single page for DMA | ||
154 | * @dev: device to map for | ||
155 | * @cpu_addr: kernel virtual address of the region to map | ||
156 | * @size: size of the region | ||
157 | * @direction: DMA direction | ||
158 | * @attrs: optional dma attributes | ||
159 | * | ||
160 | * Map the region pointed to by @cpu_addr for DMA and return the | ||
161 | * DMA address. | ||
162 | * | ||
163 | * We map this to the one step pcibr_dmamap_trans interface rather than | ||
164 | * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have | ||
165 | * no way of saving the dmamap handle from the alloc to later free | ||
166 | * (which is pretty much unacceptable). | ||
167 | * | ||
168 | * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with | ||
169 | * dma_map_consistent() so that writes force a flush of pending DMA. | ||
170 | * (See "SGI Altix Architecture Considerations for Linux Device Drivers", | ||
171 | * Document Number: 007-4763-001) | ||
172 | * | ||
173 | * TODO: simplify our interface; | ||
174 | * figure out how to save dmamap handle so can use two step. | ||
175 | */ | ||
176 | static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, | ||
177 | unsigned long offset, size_t size, | ||
178 | enum dma_data_direction dir, | ||
179 | unsigned long attrs) | ||
180 | { | ||
181 | void *cpu_addr = page_address(page) + offset; | ||
182 | dma_addr_t dma_addr; | ||
183 | unsigned long phys_addr; | ||
184 | struct pci_dev *pdev = to_pci_dev(dev); | ||
185 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | ||
186 | |||
187 | BUG_ON(!dev_is_pci(dev)); | ||
188 | |||
189 | phys_addr = __pa(cpu_addr); | ||
190 | if (attrs & DMA_ATTR_WRITE_BARRIER) | ||
191 | dma_addr = provider->dma_map_consistent(pdev, phys_addr, | ||
192 | size, SN_DMA_ADDR_PHYS); | ||
193 | else | ||
194 | dma_addr = provider->dma_map(pdev, phys_addr, size, | ||
195 | SN_DMA_ADDR_PHYS); | ||
196 | |||
197 | if (!dma_addr) { | ||
198 | printk(KERN_ERR "%s: out of ATEs\n", __func__); | ||
199 | return DMA_MAPPING_ERROR; | ||
200 | } | ||
201 | return dma_addr; | ||
202 | } | ||
203 | |||
204 | /** | ||
205 | * sn_dma_unmap_single_attrs - unamp a DMA mapped page | ||
206 | * @dev: device to sync | ||
207 | * @dma_addr: DMA address to sync | ||
208 | * @size: size of region | ||
209 | * @direction: DMA direction | ||
210 | * @attrs: optional dma attributes | ||
211 | * | ||
212 | * This routine is supposed to sync the DMA region specified | ||
213 | * by @dma_handle into the coherence domain. On SN, we're always cache | ||
214 | * coherent, so we just need to free any ATEs associated with this mapping. | ||
215 | */ | ||
216 | static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, | ||
217 | size_t size, enum dma_data_direction dir, | ||
218 | unsigned long attrs) | ||
219 | { | ||
220 | struct pci_dev *pdev = to_pci_dev(dev); | ||
221 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | ||
222 | |||
223 | BUG_ON(!dev_is_pci(dev)); | ||
224 | |||
225 | provider->dma_unmap(pdev, dma_addr, dir); | ||
226 | } | ||
227 | |||
228 | /** | ||
229 | * sn_dma_unmap_sg - unmap a DMA scatterlist | ||
230 | * @dev: device to unmap | ||
231 | * @sg: scatterlist to unmap | ||
232 | * @nhwentries: number of scatterlist entries | ||
233 | * @direction: DMA direction | ||
234 | * @attrs: optional dma attributes | ||
235 | * | ||
236 | * Unmap a set of streaming mode DMA translations. | ||
237 | */ | ||
238 | static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, | ||
239 | int nhwentries, enum dma_data_direction dir, | ||
240 | unsigned long attrs) | ||
241 | { | ||
242 | int i; | ||
243 | struct pci_dev *pdev = to_pci_dev(dev); | ||
244 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | ||
245 | struct scatterlist *sg; | ||
246 | |||
247 | BUG_ON(!dev_is_pci(dev)); | ||
248 | |||
249 | for_each_sg(sgl, sg, nhwentries, i) { | ||
250 | provider->dma_unmap(pdev, sg->dma_address, dir); | ||
251 | sg->dma_address = (dma_addr_t) NULL; | ||
252 | sg->dma_length = 0; | ||
253 | } | ||
254 | } | ||
255 | |||
256 | /** | ||
257 | * sn_dma_map_sg - map a scatterlist for DMA | ||
258 | * @dev: device to map for | ||
259 | * @sg: scatterlist to map | ||
260 | * @nhwentries: number of entries | ||
261 | * @direction: direction of the DMA transaction | ||
262 | * @attrs: optional dma attributes | ||
263 | * | ||
264 | * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with | ||
265 | * dma_map_consistent() so that writes force a flush of pending DMA. | ||
266 | * (See "SGI Altix Architecture Considerations for Linux Device Drivers", | ||
267 | * Document Number: 007-4763-001) | ||
268 | * | ||
269 | * Maps each entry of @sg for DMA. | ||
270 | */ | ||
271 | static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, | ||
272 | int nhwentries, enum dma_data_direction dir, | ||
273 | unsigned long attrs) | ||
274 | { | ||
275 | unsigned long phys_addr; | ||
276 | struct scatterlist *saved_sg = sgl, *sg; | ||
277 | struct pci_dev *pdev = to_pci_dev(dev); | ||
278 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | ||
279 | int i; | ||
280 | |||
281 | BUG_ON(!dev_is_pci(dev)); | ||
282 | |||
283 | /* | ||
284 | * Setup a DMA address for each entry in the scatterlist. | ||
285 | */ | ||
286 | for_each_sg(sgl, sg, nhwentries, i) { | ||
287 | dma_addr_t dma_addr; | ||
288 | phys_addr = SG_ENT_PHYS_ADDRESS(sg); | ||
289 | if (attrs & DMA_ATTR_WRITE_BARRIER) | ||
290 | dma_addr = provider->dma_map_consistent(pdev, | ||
291 | phys_addr, | ||
292 | sg->length, | ||
293 | SN_DMA_ADDR_PHYS); | ||
294 | else | ||
295 | dma_addr = provider->dma_map(pdev, phys_addr, | ||
296 | sg->length, | ||
297 | SN_DMA_ADDR_PHYS); | ||
298 | |||
299 | sg->dma_address = dma_addr; | ||
300 | if (!sg->dma_address) { | ||
301 | printk(KERN_ERR "%s: out of ATEs\n", __func__); | ||
302 | |||
303 | /* | ||
304 | * Free any successfully allocated entries. | ||
305 | */ | ||
306 | if (i > 0) | ||
307 | sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs); | ||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | sg->dma_length = sg->length; | ||
312 | } | ||
313 | |||
314 | return nhwentries; | ||
315 | } | ||
316 | |||
317 | static u64 sn_dma_get_required_mask(struct device *dev) | ||
318 | { | ||
319 | return DMA_BIT_MASK(64); | ||
320 | } | ||
321 | |||
322 | char *sn_pci_get_legacy_mem(struct pci_bus *bus) | ||
323 | { | ||
324 | if (!SN_PCIBUS_BUSSOFT(bus)) | ||
325 | return ERR_PTR(-ENODEV); | ||
326 | |||
327 | return (char *)(SN_PCIBUS_BUSSOFT(bus)->bs_legacy_mem | __IA64_UNCACHED_OFFSET); | ||
328 | } | ||
329 | |||
330 | int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size) | ||
331 | { | ||
332 | unsigned long addr; | ||
333 | int ret; | ||
334 | struct ia64_sal_retval isrv; | ||
335 | |||
336 | /* | ||
337 | * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work | ||
338 | * around hw issues at the pci bus level. SGI proms older than | ||
339 | * 4.10 don't implement this. | ||
340 | */ | ||
341 | |||
342 | SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE, | ||
343 | pci_domain_nr(bus), bus->number, | ||
344 | 0, /* io */ | ||
345 | 0, /* read */ | ||
346 | port, size, __pa(val)); | ||
347 | |||
348 | if (isrv.status == 0) | ||
349 | return size; | ||
350 | |||
351 | /* | ||
352 | * If the above failed, retry using the SAL_PROBE call which should | ||
353 | * be present in all proms (but which cannot work round PCI chipset | ||
354 | * bugs). This code is retained for compatibility with old | ||
355 | * pre-4.10 proms, and should be removed at some point in the future. | ||
356 | */ | ||
357 | |||
358 | if (!SN_PCIBUS_BUSSOFT(bus)) | ||
359 | return -ENODEV; | ||
360 | |||
361 | addr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET; | ||
362 | addr += port; | ||
363 | |||
364 | ret = ia64_sn_probe_mem(addr, (long)size, (void *)val); | ||
365 | |||
366 | if (ret == 2) | ||
367 | return -EINVAL; | ||
368 | |||
369 | if (ret == 1) | ||
370 | *val = -1; | ||
371 | |||
372 | return size; | ||
373 | } | ||
374 | |||
375 | int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size) | ||
376 | { | ||
377 | int ret = size; | ||
378 | unsigned long paddr; | ||
379 | unsigned long *addr; | ||
380 | struct ia64_sal_retval isrv; | ||
381 | |||
382 | /* | ||
383 | * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work | ||
384 | * around hw issues at the pci bus level. SGI proms older than | ||
385 | * 4.10 don't implement this. | ||
386 | */ | ||
387 | |||
388 | SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE, | ||
389 | pci_domain_nr(bus), bus->number, | ||
390 | 0, /* io */ | ||
391 | 1, /* write */ | ||
392 | port, size, __pa(&val)); | ||
393 | |||
394 | if (isrv.status == 0) | ||
395 | return size; | ||
396 | |||
397 | /* | ||
398 | * If the above failed, retry using the SAL_PROBE call which should | ||
399 | * be present in all proms (but which cannot work round PCI chipset | ||
400 | * bugs). This code is retained for compatibility with old | ||
401 | * pre-4.10 proms, and should be removed at some point in the future. | ||
402 | */ | ||
403 | |||
404 | if (!SN_PCIBUS_BUSSOFT(bus)) { | ||
405 | ret = -ENODEV; | ||
406 | goto out; | ||
407 | } | ||
408 | |||
409 | /* Put the phys addr in uncached space */ | ||
410 | paddr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET; | ||
411 | paddr += port; | ||
412 | addr = (unsigned long *)paddr; | ||
413 | |||
414 | switch (size) { | ||
415 | case 1: | ||
416 | *(volatile u8 *)(addr) = (u8)(val); | ||
417 | break; | ||
418 | case 2: | ||
419 | *(volatile u16 *)(addr) = (u16)(val); | ||
420 | break; | ||
421 | case 4: | ||
422 | *(volatile u32 *)(addr) = (u32)(val); | ||
423 | break; | ||
424 | default: | ||
425 | ret = -EINVAL; | ||
426 | break; | ||
427 | } | ||
428 | out: | ||
429 | return ret; | ||
430 | } | ||
431 | |||
432 | static struct dma_map_ops sn_dma_ops = { | ||
433 | .alloc = sn_dma_alloc_coherent, | ||
434 | .free = sn_dma_free_coherent, | ||
435 | .map_page = sn_dma_map_page, | ||
436 | .unmap_page = sn_dma_unmap_page, | ||
437 | .map_sg = sn_dma_map_sg, | ||
438 | .unmap_sg = sn_dma_unmap_sg, | ||
439 | .dma_supported = sn_dma_supported, | ||
440 | .get_required_mask = sn_dma_get_required_mask, | ||
441 | }; | ||
442 | |||
443 | void sn_dma_init(void) | ||
444 | { | ||
445 | dma_ops = &sn_dma_ops; | ||
446 | } | ||
diff --git a/arch/ia64/sn/pci/pcibr/Makefile b/arch/ia64/sn/pci/pcibr/Makefile deleted file mode 100644 index 712f6af7c6e0..000000000000 --- a/arch/ia64/sn/pci/pcibr/Makefile +++ /dev/null | |||
@@ -1,13 +0,0 @@ | |||
1 | # | ||
2 | # This file is subject to the terms and conditions of the GNU General Public | ||
3 | # License. See the file "COPYING" in the main directory of this archive | ||
4 | # for more details. | ||
5 | # | ||
6 | # Copyright (C) 2002-2004 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | # | ||
8 | # Makefile for the sn2 io routines. | ||
9 | |||
10 | ccflags-y := -I $(srctree)/arch/ia64/sn/include | ||
11 | |||
12 | obj-y += pcibr_dma.o pcibr_reg.o \ | ||
13 | pcibr_ate.o pcibr_provider.o | ||
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c deleted file mode 100644 index b67bb4cb73ff..000000000000 --- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c +++ /dev/null | |||
@@ -1,177 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <asm/sn/sn_sal.h> | ||
11 | #include <asm/sn/pcibr_provider.h> | ||
12 | #include <asm/sn/pcibus_provider_defs.h> | ||
13 | #include <asm/sn/pcidev.h> | ||
14 | |||
15 | int pcibr_invalidate_ate; /* by default don't invalidate ATE on free */ | ||
16 | |||
17 | /* | ||
18 | * mark_ate: Mark the ate as either free or inuse. | ||
19 | */ | ||
20 | static void mark_ate(struct ate_resource *ate_resource, int start, int number, | ||
21 | u64 value) | ||
22 | { | ||
23 | u64 *ate = ate_resource->ate; | ||
24 | int index; | ||
25 | int length = 0; | ||
26 | |||
27 | for (index = start; length < number; index++, length++) | ||
28 | ate[index] = value; | ||
29 | } | ||
30 | |||
31 | /* | ||
32 | * find_free_ate: Find the first free ate index starting from the given | ||
33 | * index for the desired consecutive count. | ||
34 | */ | ||
35 | static int find_free_ate(struct ate_resource *ate_resource, int start, | ||
36 | int count) | ||
37 | { | ||
38 | u64 *ate = ate_resource->ate; | ||
39 | int index; | ||
40 | int start_free; | ||
41 | |||
42 | for (index = start; index < ate_resource->num_ate;) { | ||
43 | if (!ate[index]) { | ||
44 | int i; | ||
45 | int free; | ||
46 | free = 0; | ||
47 | start_free = index; /* Found start free ate */ | ||
48 | for (i = start_free; i < ate_resource->num_ate; i++) { | ||
49 | if (!ate[i]) { /* This is free */ | ||
50 | if (++free == count) | ||
51 | return start_free; | ||
52 | } else { | ||
53 | index = i + 1; | ||
54 | break; | ||
55 | } | ||
56 | } | ||
57 | if (i >= ate_resource->num_ate) | ||
58 | return -1; | ||
59 | } else | ||
60 | index++; /* Try next ate */ | ||
61 | } | ||
62 | |||
63 | return -1; | ||
64 | } | ||
65 | |||
66 | /* | ||
67 | * free_ate_resource: Free the requested number of ATEs. | ||
68 | */ | ||
69 | static inline void free_ate_resource(struct ate_resource *ate_resource, | ||
70 | int start) | ||
71 | { | ||
72 | mark_ate(ate_resource, start, ate_resource->ate[start], 0); | ||
73 | if ((ate_resource->lowest_free_index > start) || | ||
74 | (ate_resource->lowest_free_index < 0)) | ||
75 | ate_resource->lowest_free_index = start; | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * alloc_ate_resource: Allocate the requested number of ATEs. | ||
80 | */ | ||
81 | static inline int alloc_ate_resource(struct ate_resource *ate_resource, | ||
82 | int ate_needed) | ||
83 | { | ||
84 | int start_index; | ||
85 | |||
86 | /* | ||
87 | * Check for ate exhaustion. | ||
88 | */ | ||
89 | if (ate_resource->lowest_free_index < 0) | ||
90 | return -1; | ||
91 | |||
92 | /* | ||
93 | * Find the required number of free consecutive ates. | ||
94 | */ | ||
95 | start_index = | ||
96 | find_free_ate(ate_resource, ate_resource->lowest_free_index, | ||
97 | ate_needed); | ||
98 | if (start_index >= 0) | ||
99 | mark_ate(ate_resource, start_index, ate_needed, ate_needed); | ||
100 | |||
101 | ate_resource->lowest_free_index = | ||
102 | find_free_ate(ate_resource, ate_resource->lowest_free_index, 1); | ||
103 | |||
104 | return start_index; | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * Allocate "count" contiguous Bridge Address Translation Entries | ||
109 | * on the specified bridge to be used for PCI to XTALK mappings. | ||
110 | * Indices in rm map range from 1..num_entries. Indices returned | ||
111 | * to caller range from 0..num_entries-1. | ||
112 | * | ||
113 | * Return the start index on success, -1 on failure. | ||
114 | */ | ||
115 | int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count) | ||
116 | { | ||
117 | int status; | ||
118 | unsigned long flags; | ||
119 | |||
120 | spin_lock_irqsave(&pcibus_info->pbi_lock, flags); | ||
121 | status = alloc_ate_resource(&pcibus_info->pbi_int_ate_resource, count); | ||
122 | spin_unlock_irqrestore(&pcibus_info->pbi_lock, flags); | ||
123 | |||
124 | return status; | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * Setup an Address Translation Entry as specified. Use either the Bridge | ||
129 | * internal maps or the external map RAM, as appropriate. | ||
130 | */ | ||
131 | static inline u64 __iomem *pcibr_ate_addr(struct pcibus_info *pcibus_info, | ||
132 | int ate_index) | ||
133 | { | ||
134 | if (ate_index < pcibus_info->pbi_int_ate_size) { | ||
135 | return pcireg_int_ate_addr(pcibus_info, ate_index); | ||
136 | } | ||
137 | panic("pcibr_ate_addr: invalid ate_index 0x%x", ate_index); | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * Update the ate. | ||
142 | */ | ||
143 | inline void | ||
144 | ate_write(struct pcibus_info *pcibus_info, int ate_index, int count, | ||
145 | volatile u64 ate) | ||
146 | { | ||
147 | while (count-- > 0) { | ||
148 | if (ate_index < pcibus_info->pbi_int_ate_size) { | ||
149 | pcireg_int_ate_set(pcibus_info, ate_index, ate); | ||
150 | } else { | ||
151 | panic("ate_write: invalid ate_index 0x%x", ate_index); | ||
152 | } | ||
153 | ate_index++; | ||
154 | ate += IOPGSIZE; | ||
155 | } | ||
156 | |||
157 | pcireg_tflush_get(pcibus_info); /* wait until Bridge PIO complete */ | ||
158 | } | ||
159 | |||
160 | void pcibr_ate_free(struct pcibus_info *pcibus_info, int index) | ||
161 | { | ||
162 | |||
163 | volatile u64 ate; | ||
164 | int count; | ||
165 | unsigned long flags; | ||
166 | |||
167 | if (pcibr_invalidate_ate) { | ||
168 | /* For debugging purposes, clear the valid bit in the ATE */ | ||
169 | ate = *pcibr_ate_addr(pcibus_info, index); | ||
170 | count = pcibus_info->pbi_int_ate_resource.ate[index]; | ||
171 | ate_write(pcibus_info, index, count, (ate & ~PCI32_ATE_V)); | ||
172 | } | ||
173 | |||
174 | spin_lock_irqsave(&pcibus_info->pbi_lock, flags); | ||
175 | free_ate_resource(&pcibus_info->pbi_int_ate_resource, index); | ||
176 | spin_unlock_irqrestore(&pcibus_info->pbi_lock, flags); | ||
177 | } | ||
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c deleted file mode 100644 index 1e863b277ac9..000000000000 --- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c +++ /dev/null | |||
@@ -1,413 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2001-2005 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <linux/pci.h> | ||
11 | #include <linux/export.h> | ||
12 | #include <asm/sn/addrs.h> | ||
13 | #include <asm/sn/geo.h> | ||
14 | #include <asm/sn/pcibr_provider.h> | ||
15 | #include <asm/sn/pcibus_provider_defs.h> | ||
16 | #include <asm/sn/pcidev.h> | ||
17 | #include <asm/sn/pic.h> | ||
18 | #include <asm/sn/sn_sal.h> | ||
19 | #include <asm/sn/tiocp.h> | ||
20 | #include "tio.h" | ||
21 | #include "xtalk/xwidgetdev.h" | ||
22 | #include "xtalk/hubdev.h" | ||
23 | |||
24 | extern int sn_ioif_inited; | ||
25 | |||
26 | /* ===================================================================== | ||
27 | * DMA MANAGEMENT | ||
28 | * | ||
29 | * The Bridge ASIC provides three methods of doing DMA: via a "direct map" | ||
30 | * register available in 32-bit PCI space (which selects a contiguous 2G | ||
31 | * address space on some other widget), via "direct" addressing via 64-bit | ||
32 | * PCI space (all destination information comes from the PCI address, | ||
33 | * including transfer attributes), and via a "mapped" region that allows | ||
34 | * a bunch of different small mappings to be established with the PMU. | ||
35 | * | ||
36 | * For efficiency, we most prefer to use the 32bit direct mapping facility, | ||
37 | * since it requires no resource allocations. The advantage of using the | ||
38 | * PMU over the 64-bit direct is that single-cycle PCI addressing can be | ||
39 | * used; the advantage of using 64-bit direct over PMU addressing is that | ||
40 | * we do not have to allocate entries in the PMU. | ||
41 | */ | ||
42 | |||
43 | static dma_addr_t | ||
44 | pcibr_dmamap_ate32(struct pcidev_info *info, | ||
45 | u64 paddr, size_t req_size, u64 flags, int dma_flags) | ||
46 | { | ||
47 | |||
48 | struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; | ||
49 | struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> | ||
50 | pdi_pcibus_info; | ||
51 | u8 internal_device = (PCI_SLOT(pcidev_info->pdi_host_pcidev_info-> | ||
52 | pdi_linux_pcidev->devfn)) - 1; | ||
53 | int ate_count; | ||
54 | int ate_index; | ||
55 | u64 ate_flags = flags | PCI32_ATE_V; | ||
56 | u64 ate; | ||
57 | u64 pci_addr; | ||
58 | u64 xio_addr; | ||
59 | u64 offset; | ||
60 | |||
61 | /* PIC in PCI-X mode does not supports 32bit PageMap mode */ | ||
62 | if (IS_PIC_SOFT(pcibus_info) && IS_PCIX(pcibus_info)) { | ||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | /* Calculate the number of ATEs needed. */ | ||
67 | if (!(MINIMAL_ATE_FLAG(paddr, req_size))) { | ||
68 | ate_count = IOPG((IOPGSIZE - 1) /* worst case start offset */ | ||
69 | +req_size /* max mapping bytes */ | ||
70 | - 1) + 1; /* round UP */ | ||
71 | } else { /* assume requested target is page aligned */ | ||
72 | ate_count = IOPG(req_size /* max mapping bytes */ | ||
73 | - 1) + 1; /* round UP */ | ||
74 | } | ||
75 | |||
76 | /* Get the number of ATEs required. */ | ||
77 | ate_index = pcibr_ate_alloc(pcibus_info, ate_count); | ||
78 | if (ate_index < 0) | ||
79 | return 0; | ||
80 | |||
81 | /* In PCI-X mode, Prefetch not supported */ | ||
82 | if (IS_PCIX(pcibus_info)) | ||
83 | ate_flags &= ~(PCI32_ATE_PREF); | ||
84 | |||
85 | if (SN_DMA_ADDRTYPE(dma_flags == SN_DMA_ADDR_PHYS)) | ||
86 | xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : | ||
87 | PHYS_TO_TIODMA(paddr); | ||
88 | else | ||
89 | xio_addr = paddr; | ||
90 | |||
91 | offset = IOPGOFF(xio_addr); | ||
92 | ate = ate_flags | (xio_addr - offset); | ||
93 | |||
94 | /* If PIC, put the targetid in the ATE */ | ||
95 | if (IS_PIC_SOFT(pcibus_info)) { | ||
96 | ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT); | ||
97 | } | ||
98 | |||
99 | /* | ||
100 | * If we're mapping for MSI, set the MSI bit in the ATE. If it's a | ||
101 | * TIOCP based pci bus, we also need to set the PIO bit in the ATE. | ||
102 | */ | ||
103 | if (dma_flags & SN_DMA_MSI) { | ||
104 | ate |= PCI32_ATE_MSI; | ||
105 | if (IS_TIOCP_SOFT(pcibus_info)) | ||
106 | ate |= PCI32_ATE_PIO; | ||
107 | } | ||
108 | |||
109 | ate_write(pcibus_info, ate_index, ate_count, ate); | ||
110 | |||
111 | /* | ||
112 | * Set up the DMA mapped Address. | ||
113 | */ | ||
114 | pci_addr = PCI32_MAPPED_BASE + offset + IOPGSIZE * ate_index; | ||
115 | |||
116 | /* | ||
117 | * If swap was set in device in pcibr_endian_set() | ||
118 | * we need to turn swapping on. | ||
119 | */ | ||
120 | if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR) | ||
121 | ATE_SWAP_ON(pci_addr); | ||
122 | |||
123 | |||
124 | return pci_addr; | ||
125 | } | ||
126 | |||
127 | static dma_addr_t | ||
128 | pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr, | ||
129 | u64 dma_attributes, int dma_flags) | ||
130 | { | ||
131 | struct pcibus_info *pcibus_info = (struct pcibus_info *) | ||
132 | ((info->pdi_host_pcidev_info)->pdi_pcibus_info); | ||
133 | u64 pci_addr; | ||
134 | |||
135 | /* Translate to Crosstalk View of Physical Address */ | ||
136 | if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS) | ||
137 | pci_addr = IS_PIC_SOFT(pcibus_info) ? | ||
138 | PHYS_TO_DMA(paddr) : | ||
139 | PHYS_TO_TIODMA(paddr); | ||
140 | else | ||
141 | pci_addr = paddr; | ||
142 | pci_addr |= dma_attributes; | ||
143 | |||
144 | /* Handle Bus mode */ | ||
145 | if (IS_PCIX(pcibus_info)) | ||
146 | pci_addr &= ~PCI64_ATTR_PREF; | ||
147 | |||
148 | /* Handle Bridge Chipset differences */ | ||
149 | if (IS_PIC_SOFT(pcibus_info)) { | ||
150 | pci_addr |= | ||
151 | ((u64) pcibus_info-> | ||
152 | pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT); | ||
153 | } else | ||
154 | pci_addr |= (dma_flags & SN_DMA_MSI) ? | ||
155 | TIOCP_PCI64_CMDTYPE_MSI : | ||
156 | TIOCP_PCI64_CMDTYPE_MEM; | ||
157 | |||
158 | /* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */ | ||
159 | if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn)) | ||
160 | pci_addr |= PCI64_ATTR_VIRTUAL; | ||
161 | |||
162 | return pci_addr; | ||
163 | } | ||
164 | |||
165 | static dma_addr_t | ||
166 | pcibr_dmatrans_direct32(struct pcidev_info * info, | ||
167 | u64 paddr, size_t req_size, u64 flags, int dma_flags) | ||
168 | { | ||
169 | struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; | ||
170 | struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> | ||
171 | pdi_pcibus_info; | ||
172 | u64 xio_addr; | ||
173 | |||
174 | u64 xio_base; | ||
175 | u64 offset; | ||
176 | u64 endoff; | ||
177 | |||
178 | if (IS_PCIX(pcibus_info)) { | ||
179 | return 0; | ||
180 | } | ||
181 | |||
182 | if (dma_flags & SN_DMA_MSI) | ||
183 | return 0; | ||
184 | |||
185 | if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS) | ||
186 | xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : | ||
187 | PHYS_TO_TIODMA(paddr); | ||
188 | else | ||
189 | xio_addr = paddr; | ||
190 | |||
191 | xio_base = pcibus_info->pbi_dir_xbase; | ||
192 | offset = xio_addr - xio_base; | ||
193 | endoff = req_size + offset; | ||
194 | if ((req_size > (1ULL << 31)) || /* Too Big */ | ||
195 | (xio_addr < xio_base) || /* Out of range for mappings */ | ||
196 | (endoff > (1ULL << 31))) { /* Too Big */ | ||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | return PCI32_DIRECT_BASE | offset; | ||
201 | } | ||
202 | |||
203 | /* | ||
204 | * Wrapper routine for freeing DMA maps | ||
205 | * DMA mappings for Direct 64 and 32 do not have any DMA maps. | ||
206 | */ | ||
207 | void | ||
208 | pcibr_dma_unmap(struct pci_dev *hwdev, dma_addr_t dma_handle, int direction) | ||
209 | { | ||
210 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); | ||
211 | struct pcibus_info *pcibus_info = | ||
212 | (struct pcibus_info *)pcidev_info->pdi_pcibus_info; | ||
213 | |||
214 | if (IS_PCI32_MAPPED(dma_handle)) { | ||
215 | int ate_index; | ||
216 | |||
217 | ate_index = | ||
218 | IOPG((ATE_SWAP_OFF(dma_handle) - PCI32_MAPPED_BASE)); | ||
219 | pcibr_ate_free(pcibus_info, ate_index); | ||
220 | } | ||
221 | } | ||
222 | |||
223 | /* | ||
224 | * On SN systems there is a race condition between a PIO read response and | ||
225 | * DMA's. In rare cases, the read response may beat the DMA, causing the | ||
226 | * driver to think that data in memory is complete and meaningful. This code | ||
227 | * eliminates that race. This routine is called by the PIO read routines | ||
228 | * after doing the read. For PIC this routine then forces a fake interrupt | ||
229 | * on another line, which is logically associated with the slot that the PIO | ||
230 | * is addressed to. It then spins while watching the memory location that | ||
231 | * the interrupt is targeted to. When the interrupt response arrives, we | ||
232 | * are sure that the DMA has landed in memory and it is safe for the driver | ||
233 | * to proceed. For TIOCP use the Device(x) Write Request Buffer Flush | ||
234 | * Bridge register since it ensures the data has entered the coherence domain, | ||
235 | * unlike the PIC Device(x) Write Request Buffer Flush register. | ||
236 | */ | ||
237 | |||
238 | void sn_dma_flush(u64 addr) | ||
239 | { | ||
240 | nasid_t nasid; | ||
241 | int is_tio; | ||
242 | int wid_num; | ||
243 | int i, j; | ||
244 | unsigned long flags; | ||
245 | u64 itte; | ||
246 | struct hubdev_info *hubinfo; | ||
247 | struct sn_flush_device_kernel *p; | ||
248 | struct sn_flush_device_common *common; | ||
249 | struct sn_flush_nasid_entry *flush_nasid_list; | ||
250 | |||
251 | if (!sn_ioif_inited) | ||
252 | return; | ||
253 | |||
254 | nasid = NASID_GET(addr); | ||
255 | if (-1 == nasid_to_cnodeid(nasid)) | ||
256 | return; | ||
257 | |||
258 | hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo; | ||
259 | |||
260 | BUG_ON(!hubinfo); | ||
261 | |||
262 | flush_nasid_list = &hubinfo->hdi_flush_nasid_list; | ||
263 | if (flush_nasid_list->widget_p == NULL) | ||
264 | return; | ||
265 | |||
266 | is_tio = (nasid & 1); | ||
267 | if (is_tio) { | ||
268 | int itte_index; | ||
269 | |||
270 | if (TIO_HWIN(addr)) | ||
271 | itte_index = 0; | ||
272 | else if (TIO_BWIN_WINDOWNUM(addr)) | ||
273 | itte_index = TIO_BWIN_WINDOWNUM(addr); | ||
274 | else | ||
275 | itte_index = -1; | ||
276 | |||
277 | if (itte_index >= 0) { | ||
278 | itte = flush_nasid_list->iio_itte[itte_index]; | ||
279 | if (! TIO_ITTE_VALID(itte)) | ||
280 | return; | ||
281 | wid_num = TIO_ITTE_WIDGET(itte); | ||
282 | } else | ||
283 | wid_num = TIO_SWIN_WIDGETNUM(addr); | ||
284 | } else { | ||
285 | if (BWIN_WINDOWNUM(addr)) { | ||
286 | itte = flush_nasid_list->iio_itte[BWIN_WINDOWNUM(addr)]; | ||
287 | wid_num = IIO_ITTE_WIDGET(itte); | ||
288 | } else | ||
289 | wid_num = SWIN_WIDGETNUM(addr); | ||
290 | } | ||
291 | if (flush_nasid_list->widget_p[wid_num] == NULL) | ||
292 | return; | ||
293 | p = &flush_nasid_list->widget_p[wid_num][0]; | ||
294 | |||
295 | /* find a matching BAR */ | ||
296 | for (i = 0; i < DEV_PER_WIDGET; i++,p++) { | ||
297 | common = p->common; | ||
298 | for (j = 0; j < PCI_ROM_RESOURCE; j++) { | ||
299 | if (common->sfdl_bar_list[j].start == 0) | ||
300 | break; | ||
301 | if (addr >= common->sfdl_bar_list[j].start | ||
302 | && addr <= common->sfdl_bar_list[j].end) | ||
303 | break; | ||
304 | } | ||
305 | if (j < PCI_ROM_RESOURCE && common->sfdl_bar_list[j].start != 0) | ||
306 | break; | ||
307 | } | ||
308 | |||
309 | /* if no matching BAR, return without doing anything. */ | ||
310 | if (i == DEV_PER_WIDGET) | ||
311 | return; | ||
312 | |||
313 | /* | ||
314 | * For TIOCP use the Device(x) Write Request Buffer Flush Bridge | ||
315 | * register since it ensures the data has entered the coherence | ||
316 | * domain, unlike PIC. | ||
317 | */ | ||
318 | if (is_tio) { | ||
319 | /* | ||
320 | * Note: devices behind TIOCE should never be matched in the | ||
321 | * above code, and so the following code is PIC/CP centric. | ||
322 | * If CE ever needs the sn_dma_flush mechanism, we will have | ||
323 | * to account for that here and in tioce_bus_fixup(). | ||
324 | */ | ||
325 | u32 tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID)); | ||
326 | u32 revnum = XWIDGET_PART_REV_NUM(tio_id); | ||
327 | |||
328 | /* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */ | ||
329 | if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) { | ||
330 | return; | ||
331 | } else { | ||
332 | pcireg_wrb_flush_get(common->sfdl_pcibus_info, | ||
333 | (common->sfdl_slot - 1)); | ||
334 | } | ||
335 | } else { | ||
336 | spin_lock_irqsave(&p->sfdl_flush_lock, flags); | ||
337 | *common->sfdl_flush_addr = 0; | ||
338 | |||
339 | /* force an interrupt. */ | ||
340 | *(volatile u32 *)(common->sfdl_force_int_addr) = 1; | ||
341 | |||
342 | /* wait for the interrupt to come back. */ | ||
343 | while (*(common->sfdl_flush_addr) != 0x10f) | ||
344 | cpu_relax(); | ||
345 | |||
346 | /* okay, everything is synched up. */ | ||
347 | spin_unlock_irqrestore(&p->sfdl_flush_lock, flags); | ||
348 | } | ||
349 | return; | ||
350 | } | ||
351 | |||
352 | /* | ||
353 | * DMA interfaces. Called from pci_dma.c routines. | ||
354 | */ | ||
355 | |||
356 | dma_addr_t | ||
357 | pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size, int dma_flags) | ||
358 | { | ||
359 | dma_addr_t dma_handle; | ||
360 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); | ||
361 | |||
362 | /* SN cannot support DMA addresses smaller than 32 bits. */ | ||
363 | if (hwdev->dma_mask < 0x7fffffff) { | ||
364 | return 0; | ||
365 | } | ||
366 | |||
367 | if (hwdev->dma_mask == ~0UL) { | ||
368 | /* | ||
369 | * Handle the most common case: 64 bit cards. This | ||
370 | * call should always succeed. | ||
371 | */ | ||
372 | |||
373 | dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr, | ||
374 | PCI64_ATTR_PREF, dma_flags); | ||
375 | } else { | ||
376 | /* Handle 32-63 bit cards via direct mapping */ | ||
377 | dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr, | ||
378 | size, 0, dma_flags); | ||
379 | if (!dma_handle) { | ||
380 | /* | ||
381 | * It is a 32 bit card and we cannot do direct mapping, | ||
382 | * so we use an ATE. | ||
383 | */ | ||
384 | |||
385 | dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr, | ||
386 | size, PCI32_ATE_PREF, | ||
387 | dma_flags); | ||
388 | } | ||
389 | } | ||
390 | |||
391 | return dma_handle; | ||
392 | } | ||
393 | |||
394 | dma_addr_t | ||
395 | pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr, | ||
396 | size_t size, int dma_flags) | ||
397 | { | ||
398 | dma_addr_t dma_handle; | ||
399 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); | ||
400 | |||
401 | if (hwdev->dev.coherent_dma_mask == ~0UL) { | ||
402 | dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr, | ||
403 | PCI64_ATTR_BAR, dma_flags); | ||
404 | } else { | ||
405 | dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info, | ||
406 | phys_addr, size, | ||
407 | PCI32_ATE_BAR, dma_flags); | ||
408 | } | ||
409 | |||
410 | return dma_handle; | ||
411 | } | ||
412 | |||
413 | EXPORT_SYMBOL(sn_dma_flush); | ||
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c deleted file mode 100644 index 7195df1da121..000000000000 --- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c +++ /dev/null | |||
@@ -1,265 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2001-2004, 2006 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #include <linux/interrupt.h> | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/pci.h> | ||
13 | #include <linux/export.h> | ||
14 | #include <asm/sn/addrs.h> | ||
15 | #include <asm/sn/geo.h> | ||
16 | #include <asm/sn/pcibr_provider.h> | ||
17 | #include <asm/sn/pcibus_provider_defs.h> | ||
18 | #include <asm/sn/pcidev.h> | ||
19 | #include <asm/sn/sn_sal.h> | ||
20 | #include <asm/sn/pic.h> | ||
21 | #include <asm/sn/sn2/sn_hwperf.h> | ||
22 | #include "xtalk/xwidgetdev.h" | ||
23 | #include "xtalk/hubdev.h" | ||
24 | |||
25 | int | ||
26 | sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp, | ||
27 | char **ssdt) | ||
28 | { | ||
29 | struct ia64_sal_retval ret_stuff; | ||
30 | u64 busnum; | ||
31 | u64 segment; | ||
32 | |||
33 | ret_stuff.status = 0; | ||
34 | ret_stuff.v0 = 0; | ||
35 | |||
36 | segment = soft->pbi_buscommon.bs_persist_segment; | ||
37 | busnum = soft->pbi_buscommon.bs_persist_busnum; | ||
38 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment, | ||
39 | busnum, (u64) device, (u64) resp, (u64)ia64_tpa(ssdt), | ||
40 | 0, 0); | ||
41 | |||
42 | return (int)ret_stuff.v0; | ||
43 | } | ||
44 | |||
45 | int | ||
46 | sal_pcibr_slot_disable(struct pcibus_info *soft, int device, int action, | ||
47 | void *resp) | ||
48 | { | ||
49 | struct ia64_sal_retval ret_stuff; | ||
50 | u64 busnum; | ||
51 | u64 segment; | ||
52 | |||
53 | ret_stuff.status = 0; | ||
54 | ret_stuff.v0 = 0; | ||
55 | |||
56 | segment = soft->pbi_buscommon.bs_persist_segment; | ||
57 | busnum = soft->pbi_buscommon.bs_persist_busnum; | ||
58 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_DISABLE, | ||
59 | segment, busnum, (u64) device, (u64) action, | ||
60 | (u64) resp, 0, 0); | ||
61 | |||
62 | return (int)ret_stuff.v0; | ||
63 | } | ||
64 | |||
65 | static int sal_pcibr_error_interrupt(struct pcibus_info *soft) | ||
66 | { | ||
67 | struct ia64_sal_retval ret_stuff; | ||
68 | u64 busnum; | ||
69 | int segment; | ||
70 | ret_stuff.status = 0; | ||
71 | ret_stuff.v0 = 0; | ||
72 | |||
73 | segment = soft->pbi_buscommon.bs_persist_segment; | ||
74 | busnum = soft->pbi_buscommon.bs_persist_busnum; | ||
75 | SAL_CALL_NOLOCK(ret_stuff, | ||
76 | (u64) SN_SAL_IOIF_ERROR_INTERRUPT, | ||
77 | (u64) segment, (u64) busnum, 0, 0, 0, 0, 0); | ||
78 | |||
79 | return (int)ret_stuff.v0; | ||
80 | } | ||
81 | |||
82 | u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus) | ||
83 | { | ||
84 | long rc; | ||
85 | u16 uninitialized_var(ioboard); /* GCC be quiet */ | ||
86 | nasid_t nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base); | ||
87 | |||
88 | rc = ia64_sn_sysctl_ioboard_get(nasid, &ioboard); | ||
89 | if (rc) { | ||
90 | printk(KERN_WARNING "ia64_sn_sysctl_ioboard_get failed: %ld\n", | ||
91 | rc); | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | return ioboard; | ||
96 | } | ||
97 | |||
98 | /* | ||
99 | * PCI Bridge Error interrupt handler. Gets invoked whenever a PCI | ||
100 | * bridge sends an error interrupt. | ||
101 | */ | ||
102 | static irqreturn_t | ||
103 | pcibr_error_intr_handler(int irq, void *arg) | ||
104 | { | ||
105 | struct pcibus_info *soft = arg; | ||
106 | |||
107 | if (sal_pcibr_error_interrupt(soft) < 0) | ||
108 | panic("pcibr_error_intr_handler(): Fatal Bridge Error"); | ||
109 | |||
110 | return IRQ_HANDLED; | ||
111 | } | ||
112 | |||
113 | void * | ||
114 | pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller) | ||
115 | { | ||
116 | int nasid, cnode, j; | ||
117 | struct hubdev_info *hubdev_info; | ||
118 | struct pcibus_info *soft; | ||
119 | struct sn_flush_device_kernel *sn_flush_device_kernel; | ||
120 | struct sn_flush_device_common *common; | ||
121 | |||
122 | if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) { | ||
123 | return NULL; | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Allocate kernel bus soft and copy from prom. | ||
128 | */ | ||
129 | |||
130 | soft = kmemdup(prom_bussoft, sizeof(struct pcibus_info), GFP_KERNEL); | ||
131 | if (!soft) { | ||
132 | return NULL; | ||
133 | } | ||
134 | |||
135 | soft->pbi_buscommon.bs_base = (unsigned long) | ||
136 | ioremap(REGION_OFFSET(soft->pbi_buscommon.bs_base), | ||
137 | sizeof(struct pic)); | ||
138 | |||
139 | spin_lock_init(&soft->pbi_lock); | ||
140 | |||
141 | /* | ||
142 | * register the bridge's error interrupt handler | ||
143 | */ | ||
144 | if (request_irq(SGI_PCIASIC_ERROR, pcibr_error_intr_handler, | ||
145 | IRQF_SHARED, "PCIBR error", (void *)(soft))) { | ||
146 | printk(KERN_WARNING | ||
147 | "pcibr cannot allocate interrupt for error handler\n"); | ||
148 | } | ||
149 | irq_set_handler(SGI_PCIASIC_ERROR, handle_level_irq); | ||
150 | sn_set_err_irq_affinity(SGI_PCIASIC_ERROR); | ||
151 | |||
152 | /* | ||
153 | * Update the Bridge with the "kernel" pagesize | ||
154 | */ | ||
155 | if (PAGE_SIZE < 16384) { | ||
156 | pcireg_control_bit_clr(soft, PCIBR_CTRL_PAGE_SIZE); | ||
157 | } else { | ||
158 | pcireg_control_bit_set(soft, PCIBR_CTRL_PAGE_SIZE); | ||
159 | } | ||
160 | |||
161 | nasid = NASID_GET(soft->pbi_buscommon.bs_base); | ||
162 | cnode = nasid_to_cnodeid(nasid); | ||
163 | hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo); | ||
164 | |||
165 | if (hubdev_info->hdi_flush_nasid_list.widget_p) { | ||
166 | sn_flush_device_kernel = hubdev_info->hdi_flush_nasid_list. | ||
167 | widget_p[(int)soft->pbi_buscommon.bs_xid]; | ||
168 | if (sn_flush_device_kernel) { | ||
169 | for (j = 0; j < DEV_PER_WIDGET; | ||
170 | j++, sn_flush_device_kernel++) { | ||
171 | common = sn_flush_device_kernel->common; | ||
172 | if (common->sfdl_slot == -1) | ||
173 | continue; | ||
174 | if ((common->sfdl_persistent_segment == | ||
175 | soft->pbi_buscommon.bs_persist_segment) && | ||
176 | (common->sfdl_persistent_busnum == | ||
177 | soft->pbi_buscommon.bs_persist_busnum)) | ||
178 | common->sfdl_pcibus_info = | ||
179 | soft; | ||
180 | } | ||
181 | } | ||
182 | } | ||
183 | |||
184 | /* Setup the PMU ATE map */ | ||
185 | soft->pbi_int_ate_resource.lowest_free_index = 0; | ||
186 | soft->pbi_int_ate_resource.ate = | ||
187 | kcalloc(soft->pbi_int_ate_size, sizeof(u64), GFP_KERNEL); | ||
188 | |||
189 | if (!soft->pbi_int_ate_resource.ate) { | ||
190 | kfree(soft); | ||
191 | return NULL; | ||
192 | } | ||
193 | |||
194 | return soft; | ||
195 | } | ||
196 | |||
197 | void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info) | ||
198 | { | ||
199 | struct pcidev_info *pcidev_info; | ||
200 | struct pcibus_info *pcibus_info; | ||
201 | int bit = sn_irq_info->irq_int_bit; | ||
202 | |||
203 | if (! sn_irq_info->irq_bridge) | ||
204 | return; | ||
205 | |||
206 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; | ||
207 | if (pcidev_info) { | ||
208 | pcibus_info = | ||
209 | (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info-> | ||
210 | pdi_pcibus_info; | ||
211 | pcireg_force_intr_set(pcibus_info, bit); | ||
212 | } | ||
213 | } | ||
214 | |||
215 | void pcibr_target_interrupt(struct sn_irq_info *sn_irq_info) | ||
216 | { | ||
217 | struct pcidev_info *pcidev_info; | ||
218 | struct pcibus_info *pcibus_info; | ||
219 | int bit = sn_irq_info->irq_int_bit; | ||
220 | u64 xtalk_addr = sn_irq_info->irq_xtalkaddr; | ||
221 | |||
222 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; | ||
223 | if (pcidev_info) { | ||
224 | pcibus_info = | ||
225 | (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info-> | ||
226 | pdi_pcibus_info; | ||
227 | |||
228 | /* Disable the device's IRQ */ | ||
229 | pcireg_intr_enable_bit_clr(pcibus_info, (1 << bit)); | ||
230 | |||
231 | /* Change the device's IRQ */ | ||
232 | pcireg_intr_addr_addr_set(pcibus_info, bit, xtalk_addr); | ||
233 | |||
234 | /* Re-enable the device's IRQ */ | ||
235 | pcireg_intr_enable_bit_set(pcibus_info, (1 << bit)); | ||
236 | |||
237 | pcibr_force_interrupt(sn_irq_info); | ||
238 | } | ||
239 | } | ||
240 | |||
241 | /* | ||
242 | * Provider entries for PIC/CP | ||
243 | */ | ||
244 | |||
245 | struct sn_pcibus_provider pcibr_provider = { | ||
246 | .dma_map = pcibr_dma_map, | ||
247 | .dma_map_consistent = pcibr_dma_map_consistent, | ||
248 | .dma_unmap = pcibr_dma_unmap, | ||
249 | .bus_fixup = pcibr_bus_fixup, | ||
250 | .force_interrupt = pcibr_force_interrupt, | ||
251 | .target_interrupt = pcibr_target_interrupt | ||
252 | }; | ||
253 | |||
254 | int | ||
255 | pcibr_init_provider(void) | ||
256 | { | ||
257 | sn_pci_provider[PCIIO_ASIC_TYPE_PIC] = &pcibr_provider; | ||
258 | sn_pci_provider[PCIIO_ASIC_TYPE_TIOCP] = &pcibr_provider; | ||
259 | |||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | EXPORT_SYMBOL_GPL(sal_pcibr_slot_enable); | ||
264 | EXPORT_SYMBOL_GPL(sal_pcibr_slot_disable); | ||
265 | EXPORT_SYMBOL_GPL(sn_ioboard_to_pci_bus); | ||
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_reg.c b/arch/ia64/sn/pci/pcibr/pcibr_reg.c deleted file mode 100644 index 8b8bbd51d433..000000000000 --- a/arch/ia64/sn/pci/pcibr/pcibr_reg.c +++ /dev/null | |||
@@ -1,285 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #include <linux/interrupt.h> | ||
10 | #include <linux/types.h> | ||
11 | #include <asm/sn/io.h> | ||
12 | #include <asm/sn/pcibr_provider.h> | ||
13 | #include <asm/sn/pcibus_provider_defs.h> | ||
14 | #include <asm/sn/pcidev.h> | ||
15 | #include <asm/sn/pic.h> | ||
16 | #include <asm/sn/tiocp.h> | ||
17 | |||
18 | union br_ptr { | ||
19 | struct tiocp tio; | ||
20 | struct pic pic; | ||
21 | }; | ||
22 | |||
23 | /* | ||
24 | * Control Register Access -- Read/Write 0000_0020 | ||
25 | */ | ||
26 | void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, u64 bits) | ||
27 | { | ||
28 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; | ||
29 | |||
30 | if (pcibus_info) { | ||
31 | switch (pcibus_info->pbi_bridge_type) { | ||
32 | case PCIBR_BRIDGETYPE_TIOCP: | ||
33 | __sn_clrq_relaxed(&ptr->tio.cp_control, bits); | ||
34 | break; | ||
35 | case PCIBR_BRIDGETYPE_PIC: | ||
36 | __sn_clrq_relaxed(&ptr->pic.p_wid_control, bits); | ||
37 | break; | ||
38 | default: | ||
39 | panic | ||
40 | ("pcireg_control_bit_clr: unknown bridgetype bridge 0x%p", | ||
41 | ptr); | ||
42 | } | ||
43 | } | ||
44 | } | ||
45 | |||
46 | void pcireg_control_bit_set(struct pcibus_info *pcibus_info, u64 bits) | ||
47 | { | ||
48 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; | ||
49 | |||
50 | if (pcibus_info) { | ||
51 | switch (pcibus_info->pbi_bridge_type) { | ||
52 | case PCIBR_BRIDGETYPE_TIOCP: | ||
53 | __sn_setq_relaxed(&ptr->tio.cp_control, bits); | ||
54 | break; | ||
55 | case PCIBR_BRIDGETYPE_PIC: | ||
56 | __sn_setq_relaxed(&ptr->pic.p_wid_control, bits); | ||
57 | break; | ||
58 | default: | ||
59 | panic | ||
60 | ("pcireg_control_bit_set: unknown bridgetype bridge 0x%p", | ||
61 | ptr); | ||
62 | } | ||
63 | } | ||
64 | } | ||
65 | |||
66 | /* | ||
67 | * PCI/PCIX Target Flush Register Access -- Read Only 0000_0050 | ||
68 | */ | ||
69 | u64 pcireg_tflush_get(struct pcibus_info *pcibus_info) | ||
70 | { | ||
71 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; | ||
72 | u64 ret = 0; | ||
73 | |||
74 | if (pcibus_info) { | ||
75 | switch (pcibus_info->pbi_bridge_type) { | ||
76 | case PCIBR_BRIDGETYPE_TIOCP: | ||
77 | ret = __sn_readq_relaxed(&ptr->tio.cp_tflush); | ||
78 | break; | ||
79 | case PCIBR_BRIDGETYPE_PIC: | ||
80 | ret = __sn_readq_relaxed(&ptr->pic.p_wid_tflush); | ||
81 | break; | ||
82 | default: | ||
83 | panic | ||
84 | ("pcireg_tflush_get: unknown bridgetype bridge 0x%p", | ||
85 | ptr); | ||
86 | } | ||
87 | } | ||
88 | |||
89 | /* Read of the Target Flush should always return zero */ | ||
90 | if (ret != 0) | ||
91 | panic("pcireg_tflush_get:Target Flush failed\n"); | ||
92 | |||
93 | return ret; | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * Interrupt Status Register Access -- Read Only 0000_0100 | ||
98 | */ | ||
99 | u64 pcireg_intr_status_get(struct pcibus_info * pcibus_info) | ||
100 | { | ||
101 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; | ||
102 | u64 ret = 0; | ||
103 | |||
104 | if (pcibus_info) { | ||
105 | switch (pcibus_info->pbi_bridge_type) { | ||
106 | case PCIBR_BRIDGETYPE_TIOCP: | ||
107 | ret = __sn_readq_relaxed(&ptr->tio.cp_int_status); | ||
108 | break; | ||
109 | case PCIBR_BRIDGETYPE_PIC: | ||
110 | ret = __sn_readq_relaxed(&ptr->pic.p_int_status); | ||
111 | break; | ||
112 | default: | ||
113 | panic | ||
114 | ("pcireg_intr_status_get: unknown bridgetype bridge 0x%p", | ||
115 | ptr); | ||
116 | } | ||
117 | } | ||
118 | return ret; | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * Interrupt Enable Register Access -- Read/Write 0000_0108 | ||
123 | */ | ||
124 | void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, u64 bits) | ||
125 | { | ||
126 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; | ||
127 | |||
128 | if (pcibus_info) { | ||
129 | switch (pcibus_info->pbi_bridge_type) { | ||
130 | case PCIBR_BRIDGETYPE_TIOCP: | ||
131 | __sn_clrq_relaxed(&ptr->tio.cp_int_enable, bits); | ||
132 | break; | ||
133 | case PCIBR_BRIDGETYPE_PIC: | ||
134 | __sn_clrq_relaxed(&ptr->pic.p_int_enable, bits); | ||
135 | break; | ||
136 | default: | ||
137 | panic | ||
138 | ("pcireg_intr_enable_bit_clr: unknown bridgetype bridge 0x%p", | ||
139 | ptr); | ||
140 | } | ||
141 | } | ||
142 | } | ||
143 | |||
144 | void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, u64 bits) | ||
145 | { | ||
146 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; | ||
147 | |||
148 | if (pcibus_info) { | ||
149 | switch (pcibus_info->pbi_bridge_type) { | ||
150 | case PCIBR_BRIDGETYPE_TIOCP: | ||
151 | __sn_setq_relaxed(&ptr->tio.cp_int_enable, bits); | ||
152 | break; | ||
153 | case PCIBR_BRIDGETYPE_PIC: | ||
154 | __sn_setq_relaxed(&ptr->pic.p_int_enable, bits); | ||
155 | break; | ||
156 | default: | ||
157 | panic | ||
158 | ("pcireg_intr_enable_bit_set: unknown bridgetype bridge 0x%p", | ||
159 | ptr); | ||
160 | } | ||
161 | } | ||
162 | } | ||
163 | |||
164 | /* | ||
165 | * Intr Host Address Register (int_addr) -- Read/Write 0000_0130 - 0000_0168 | ||
166 | */ | ||
167 | void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n, | ||
168 | u64 addr) | ||
169 | { | ||
170 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; | ||
171 | |||
172 | if (pcibus_info) { | ||
173 | switch (pcibus_info->pbi_bridge_type) { | ||
174 | case PCIBR_BRIDGETYPE_TIOCP: | ||
175 | __sn_clrq_relaxed(&ptr->tio.cp_int_addr[int_n], | ||
176 | TIOCP_HOST_INTR_ADDR); | ||
177 | __sn_setq_relaxed(&ptr->tio.cp_int_addr[int_n], | ||
178 | (addr & TIOCP_HOST_INTR_ADDR)); | ||
179 | break; | ||
180 | case PCIBR_BRIDGETYPE_PIC: | ||
181 | __sn_clrq_relaxed(&ptr->pic.p_int_addr[int_n], | ||
182 | PIC_HOST_INTR_ADDR); | ||
183 | __sn_setq_relaxed(&ptr->pic.p_int_addr[int_n], | ||
184 | (addr & PIC_HOST_INTR_ADDR)); | ||
185 | break; | ||
186 | default: | ||
187 | panic | ||
188 | ("pcireg_intr_addr_addr_get: unknown bridgetype bridge 0x%p", | ||
189 | ptr); | ||
190 | } | ||
191 | } | ||
192 | } | ||
193 | |||
194 | /* | ||
195 | * Force Interrupt Register Access -- Write Only 0000_01C0 - 0000_01F8 | ||
196 | */ | ||
197 | void pcireg_force_intr_set(struct pcibus_info *pcibus_info, int int_n) | ||
198 | { | ||
199 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; | ||
200 | |||
201 | if (pcibus_info) { | ||
202 | switch (pcibus_info->pbi_bridge_type) { | ||
203 | case PCIBR_BRIDGETYPE_TIOCP: | ||
204 | writeq(1, &ptr->tio.cp_force_pin[int_n]); | ||
205 | break; | ||
206 | case PCIBR_BRIDGETYPE_PIC: | ||
207 | writeq(1, &ptr->pic.p_force_pin[int_n]); | ||
208 | break; | ||
209 | default: | ||
210 | panic | ||
211 | ("pcireg_force_intr_set: unknown bridgetype bridge 0x%p", | ||
212 | ptr); | ||
213 | } | ||
214 | } | ||
215 | } | ||
216 | |||
217 | /* | ||
218 | * Device(x) Write Buffer Flush Reg Access -- Read Only 0000_0240 - 0000_0258 | ||
219 | */ | ||
220 | u64 pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device) | ||
221 | { | ||
222 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; | ||
223 | u64 ret = 0; | ||
224 | |||
225 | if (pcibus_info) { | ||
226 | switch (pcibus_info->pbi_bridge_type) { | ||
227 | case PCIBR_BRIDGETYPE_TIOCP: | ||
228 | ret = | ||
229 | __sn_readq_relaxed(&ptr->tio.cp_wr_req_buf[device]); | ||
230 | break; | ||
231 | case PCIBR_BRIDGETYPE_PIC: | ||
232 | ret = | ||
233 | __sn_readq_relaxed(&ptr->pic.p_wr_req_buf[device]); | ||
234 | break; | ||
235 | default: | ||
236 | panic("pcireg_wrb_flush_get: unknown bridgetype bridge 0x%p", ptr); | ||
237 | } | ||
238 | |||
239 | } | ||
240 | /* Read of the Write Buffer Flush should always return zero */ | ||
241 | return ret; | ||
242 | } | ||
243 | |||
244 | void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index, | ||
245 | u64 val) | ||
246 | { | ||
247 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; | ||
248 | |||
249 | if (pcibus_info) { | ||
250 | switch (pcibus_info->pbi_bridge_type) { | ||
251 | case PCIBR_BRIDGETYPE_TIOCP: | ||
252 | writeq(val, &ptr->tio.cp_int_ate_ram[ate_index]); | ||
253 | break; | ||
254 | case PCIBR_BRIDGETYPE_PIC: | ||
255 | writeq(val, &ptr->pic.p_int_ate_ram[ate_index]); | ||
256 | break; | ||
257 | default: | ||
258 | panic | ||
259 | ("pcireg_int_ate_set: unknown bridgetype bridge 0x%p", | ||
260 | ptr); | ||
261 | } | ||
262 | } | ||
263 | } | ||
264 | |||
265 | u64 __iomem *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index) | ||
266 | { | ||
267 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; | ||
268 | u64 __iomem *ret = NULL; | ||
269 | |||
270 | if (pcibus_info) { | ||
271 | switch (pcibus_info->pbi_bridge_type) { | ||
272 | case PCIBR_BRIDGETYPE_TIOCP: | ||
273 | ret = &ptr->tio.cp_int_ate_ram[ate_index]; | ||
274 | break; | ||
275 | case PCIBR_BRIDGETYPE_PIC: | ||
276 | ret = &ptr->pic.p_int_ate_ram[ate_index]; | ||
277 | break; | ||
278 | default: | ||
279 | panic | ||
280 | ("pcireg_int_ate_addr: unknown bridgetype bridge 0x%p", | ||
281 | ptr); | ||
282 | } | ||
283 | } | ||
284 | return ret; | ||
285 | } | ||
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c deleted file mode 100644 index a70b11fd57d6..000000000000 --- a/arch/ia64/sn/pci/tioca_provider.c +++ /dev/null | |||
@@ -1,677 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | */ | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <linux/interrupt.h> | ||
11 | #include <linux/pci.h> | ||
12 | #include <linux/bitmap.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/export.h> | ||
15 | #include <asm/sn/sn_sal.h> | ||
16 | #include <asm/sn/addrs.h> | ||
17 | #include <asm/sn/io.h> | ||
18 | #include <asm/sn/pcidev.h> | ||
19 | #include <asm/sn/pcibus_provider_defs.h> | ||
20 | #include <asm/sn/tioca_provider.h> | ||
21 | |||
22 | u32 tioca_gart_found; | ||
23 | EXPORT_SYMBOL(tioca_gart_found); /* used by agp-sgi */ | ||
24 | |||
25 | LIST_HEAD(tioca_list); | ||
26 | EXPORT_SYMBOL(tioca_list); /* used by agp-sgi */ | ||
27 | |||
28 | static int tioca_gart_init(struct tioca_kernel *); | ||
29 | |||
30 | /** | ||
31 | * tioca_gart_init - Initialize SGI TIOCA GART | ||
32 | * @tioca_common: ptr to common prom/kernel struct identifying the | ||
33 | * | ||
34 | * If the indicated tioca has devices present, initialize its associated | ||
35 | * GART MMR's and kernel memory. | ||
36 | */ | ||
37 | static int | ||
38 | tioca_gart_init(struct tioca_kernel *tioca_kern) | ||
39 | { | ||
40 | u64 ap_reg; | ||
41 | u64 offset; | ||
42 | struct page *tmp; | ||
43 | struct tioca_common *tioca_common; | ||
44 | struct tioca __iomem *ca_base; | ||
45 | |||
46 | tioca_common = tioca_kern->ca_common; | ||
47 | ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base; | ||
48 | |||
49 | if (list_empty(tioca_kern->ca_devices)) | ||
50 | return 0; | ||
51 | |||
52 | ap_reg = 0; | ||
53 | |||
54 | /* | ||
55 | * Validate aperature size | ||
56 | */ | ||
57 | |||
58 | switch (CA_APERATURE_SIZE >> 20) { | ||
59 | case 4: | ||
60 | ap_reg |= (0x3ff << CA_GART_AP_SIZE_SHFT); /* 4MB */ | ||
61 | break; | ||
62 | case 8: | ||
63 | ap_reg |= (0x3fe << CA_GART_AP_SIZE_SHFT); /* 8MB */ | ||
64 | break; | ||
65 | case 16: | ||
66 | ap_reg |= (0x3fc << CA_GART_AP_SIZE_SHFT); /* 16MB */ | ||
67 | break; | ||
68 | case 32: | ||
69 | ap_reg |= (0x3f8 << CA_GART_AP_SIZE_SHFT); /* 32 MB */ | ||
70 | break; | ||
71 | case 64: | ||
72 | ap_reg |= (0x3f0 << CA_GART_AP_SIZE_SHFT); /* 64 MB */ | ||
73 | break; | ||
74 | case 128: | ||
75 | ap_reg |= (0x3e0 << CA_GART_AP_SIZE_SHFT); /* 128 MB */ | ||
76 | break; | ||
77 | case 256: | ||
78 | ap_reg |= (0x3c0 << CA_GART_AP_SIZE_SHFT); /* 256 MB */ | ||
79 | break; | ||
80 | case 512: | ||
81 | ap_reg |= (0x380 << CA_GART_AP_SIZE_SHFT); /* 512 MB */ | ||
82 | break; | ||
83 | case 1024: | ||
84 | ap_reg |= (0x300 << CA_GART_AP_SIZE_SHFT); /* 1GB */ | ||
85 | break; | ||
86 | case 2048: | ||
87 | ap_reg |= (0x200 << CA_GART_AP_SIZE_SHFT); /* 2GB */ | ||
88 | break; | ||
89 | case 4096: | ||
90 | ap_reg |= (0x000 << CA_GART_AP_SIZE_SHFT); /* 4 GB */ | ||
91 | break; | ||
92 | default: | ||
93 | printk(KERN_ERR "%s: Invalid CA_APERATURE_SIZE " | ||
94 | "0x%lx\n", __func__, (ulong) CA_APERATURE_SIZE); | ||
95 | return -1; | ||
96 | } | ||
97 | |||
98 | /* | ||
99 | * Set up other aperature parameters | ||
100 | */ | ||
101 | |||
102 | if (PAGE_SIZE >= 16384) { | ||
103 | tioca_kern->ca_ap_pagesize = 16384; | ||
104 | ap_reg |= CA_GART_PAGE_SIZE; | ||
105 | } else { | ||
106 | tioca_kern->ca_ap_pagesize = 4096; | ||
107 | } | ||
108 | |||
109 | tioca_kern->ca_ap_size = CA_APERATURE_SIZE; | ||
110 | tioca_kern->ca_ap_bus_base = CA_APERATURE_BASE; | ||
111 | tioca_kern->ca_gart_entries = | ||
112 | tioca_kern->ca_ap_size / tioca_kern->ca_ap_pagesize; | ||
113 | |||
114 | ap_reg |= (CA_GART_AP_ENB_AGP | CA_GART_AP_ENB_PCI); | ||
115 | ap_reg |= tioca_kern->ca_ap_bus_base; | ||
116 | |||
117 | /* | ||
118 | * Allocate and set up the GART | ||
119 | */ | ||
120 | |||
121 | tioca_kern->ca_gart_size = tioca_kern->ca_gart_entries * sizeof(u64); | ||
122 | tmp = | ||
123 | alloc_pages_node(tioca_kern->ca_closest_node, | ||
124 | GFP_KERNEL | __GFP_ZERO, | ||
125 | get_order(tioca_kern->ca_gart_size)); | ||
126 | |||
127 | if (!tmp) { | ||
128 | printk(KERN_ERR "%s: Could not allocate " | ||
129 | "%llu bytes (order %d) for GART\n", | ||
130 | __func__, | ||
131 | tioca_kern->ca_gart_size, | ||
132 | get_order(tioca_kern->ca_gart_size)); | ||
133 | return -ENOMEM; | ||
134 | } | ||
135 | |||
136 | tioca_kern->ca_gart = page_address(tmp); | ||
137 | tioca_kern->ca_gart_coretalk_addr = | ||
138 | PHYS_TO_TIODMA(virt_to_phys(tioca_kern->ca_gart)); | ||
139 | |||
140 | /* | ||
141 | * Compute PCI/AGP convenience fields | ||
142 | */ | ||
143 | |||
144 | offset = CA_PCI32_MAPPED_BASE - CA_APERATURE_BASE; | ||
145 | tioca_kern->ca_pciap_base = CA_PCI32_MAPPED_BASE; | ||
146 | tioca_kern->ca_pciap_size = CA_PCI32_MAPPED_SIZE; | ||
147 | tioca_kern->ca_pcigart_start = offset / tioca_kern->ca_ap_pagesize; | ||
148 | tioca_kern->ca_pcigart_base = | ||
149 | tioca_kern->ca_gart_coretalk_addr + offset; | ||
150 | tioca_kern->ca_pcigart = | ||
151 | &tioca_kern->ca_gart[tioca_kern->ca_pcigart_start]; | ||
152 | tioca_kern->ca_pcigart_entries = | ||
153 | tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize; | ||
154 | tioca_kern->ca_pcigart_pagemap = | ||
155 | kzalloc(tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL); | ||
156 | if (!tioca_kern->ca_pcigart_pagemap) { | ||
157 | free_pages((unsigned long)tioca_kern->ca_gart, | ||
158 | get_order(tioca_kern->ca_gart_size)); | ||
159 | return -1; | ||
160 | } | ||
161 | |||
162 | offset = CA_AGP_MAPPED_BASE - CA_APERATURE_BASE; | ||
163 | tioca_kern->ca_gfxap_base = CA_AGP_MAPPED_BASE; | ||
164 | tioca_kern->ca_gfxap_size = CA_AGP_MAPPED_SIZE; | ||
165 | tioca_kern->ca_gfxgart_start = offset / tioca_kern->ca_ap_pagesize; | ||
166 | tioca_kern->ca_gfxgart_base = | ||
167 | tioca_kern->ca_gart_coretalk_addr + offset; | ||
168 | tioca_kern->ca_gfxgart = | ||
169 | &tioca_kern->ca_gart[tioca_kern->ca_gfxgart_start]; | ||
170 | tioca_kern->ca_gfxgart_entries = | ||
171 | tioca_kern->ca_gfxap_size / tioca_kern->ca_ap_pagesize; | ||
172 | |||
173 | /* | ||
174 | * various control settings: | ||
175 | * use agp op-combining | ||
176 | * use GET semantics to fetch memory | ||
177 | * participate in coherency domain | ||
178 | * DISABLE GART PREFETCHING due to hw bug tracked in SGI PV930029 | ||
179 | */ | ||
180 | |||
181 | __sn_setq_relaxed(&ca_base->ca_control1, | ||
182 | CA_AGPDMA_OP_ENB_COMBDELAY); /* PV895469 ? */ | ||
183 | __sn_clrq_relaxed(&ca_base->ca_control2, CA_GART_MEM_PARAM); | ||
184 | __sn_setq_relaxed(&ca_base->ca_control2, | ||
185 | (0x2ull << CA_GART_MEM_PARAM_SHFT)); | ||
186 | tioca_kern->ca_gart_iscoherent = 1; | ||
187 | __sn_clrq_relaxed(&ca_base->ca_control2, | ||
188 | (CA_GART_WR_PREFETCH_ENB | CA_GART_RD_PREFETCH_ENB)); | ||
189 | |||
190 | /* | ||
191 | * Unmask GART fetch error interrupts. Clear residual errors first. | ||
192 | */ | ||
193 | |||
194 | writeq(CA_GART_FETCH_ERR, &ca_base->ca_int_status_alias); | ||
195 | writeq(CA_GART_FETCH_ERR, &ca_base->ca_mult_error_alias); | ||
196 | __sn_clrq_relaxed(&ca_base->ca_int_mask, CA_GART_FETCH_ERR); | ||
197 | |||
198 | /* | ||
199 | * Program the aperature and gart registers in TIOCA | ||
200 | */ | ||
201 | |||
202 | writeq(ap_reg, &ca_base->ca_gart_aperature); | ||
203 | writeq(tioca_kern->ca_gart_coretalk_addr|1, &ca_base->ca_gart_ptr_table); | ||
204 | |||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * tioca_fastwrite_enable - enable AGP FW for a tioca and its functions | ||
210 | * @tioca_kernel: structure representing the CA | ||
211 | * | ||
212 | * Given a CA, scan all attached functions making sure they all support | ||
213 | * FastWrite. If so, enable FastWrite for all functions and the CA itself. | ||
214 | */ | ||
215 | |||
216 | void | ||
217 | tioca_fastwrite_enable(struct tioca_kernel *tioca_kern) | ||
218 | { | ||
219 | int cap_ptr; | ||
220 | u32 reg; | ||
221 | struct tioca __iomem *tioca_base; | ||
222 | struct pci_dev *pdev; | ||
223 | struct tioca_common *common; | ||
224 | |||
225 | common = tioca_kern->ca_common; | ||
226 | |||
227 | /* | ||
228 | * Scan all vga controllers on this bus making sure they all | ||
229 | * support FW. If not, return. | ||
230 | */ | ||
231 | |||
232 | list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) { | ||
233 | if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8)) | ||
234 | continue; | ||
235 | |||
236 | cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); | ||
237 | if (!cap_ptr) | ||
238 | return; /* no AGP CAP means no FW */ | ||
239 | |||
240 | pci_read_config_dword(pdev, cap_ptr + PCI_AGP_STATUS, ®); | ||
241 | if (!(reg & PCI_AGP_STATUS_FW)) | ||
242 | return; /* function doesn't support FW */ | ||
243 | } | ||
244 | |||
245 | /* | ||
246 | * Set fw for all vga fn's | ||
247 | */ | ||
248 | |||
249 | list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) { | ||
250 | if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8)) | ||
251 | continue; | ||
252 | |||
253 | cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); | ||
254 | pci_read_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, ®); | ||
255 | reg |= PCI_AGP_COMMAND_FW; | ||
256 | pci_write_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, reg); | ||
257 | } | ||
258 | |||
259 | /* | ||
260 | * Set ca's fw to match | ||
261 | */ | ||
262 | |||
263 | tioca_base = (struct tioca __iomem*)common->ca_common.bs_base; | ||
264 | __sn_setq_relaxed(&tioca_base->ca_control1, CA_AGP_FW_ENABLE); | ||
265 | } | ||
266 | |||
267 | EXPORT_SYMBOL(tioca_fastwrite_enable); /* used by agp-sgi */ | ||
268 | |||
269 | /** | ||
270 | * tioca_dma_d64 - create a DMA mapping using 64-bit direct mode | ||
271 | * @paddr: system physical address | ||
272 | * | ||
273 | * Map @paddr into 64-bit CA bus space. No device context is necessary. | ||
274 | * Bits 53:0 come from the coretalk address. We just need to mask in the | ||
275 | * following optional bits of the 64-bit pci address: | ||
276 | * | ||
277 | * 63:60 - Coretalk Packet Type - 0x1 for Mem Get/Put (coherent) | ||
278 | * 0x2 for PIO (non-coherent) | ||
279 | * We will always use 0x1 | ||
280 | * 55:55 - Swap bytes Currently unused | ||
281 | */ | ||
282 | static u64 | ||
283 | tioca_dma_d64(unsigned long paddr) | ||
284 | { | ||
285 | dma_addr_t bus_addr; | ||
286 | |||
287 | bus_addr = PHYS_TO_TIODMA(paddr); | ||
288 | |||
289 | BUG_ON(!bus_addr); | ||
290 | BUG_ON(bus_addr >> 54); | ||
291 | |||
292 | /* Set upper nibble to Cache Coherent Memory op */ | ||
293 | bus_addr |= (1UL << 60); | ||
294 | |||
295 | return bus_addr; | ||
296 | } | ||
297 | |||
298 | /** | ||
299 | * tioca_dma_d48 - create a DMA mapping using 48-bit direct mode | ||
300 | * @pdev: linux pci_dev representing the function | ||
301 | * @paddr: system physical address | ||
302 | * | ||
303 | * Map @paddr into 64-bit bus space of the CA associated with @pcidev_info. | ||
304 | * | ||
305 | * The CA agp 48 bit direct address falls out as follows: | ||
306 | * | ||
307 | * When direct mapping AGP addresses, the 48 bit AGP address is | ||
308 | * constructed as follows: | ||
309 | * | ||
310 | * [47:40] - Low 8 bits of the page Node ID extracted from coretalk | ||
311 | * address [47:40]. The upper 8 node bits are fixed | ||
312 | * and come from the xxx register bits [5:0] | ||
313 | * [39:38] - Chiplet ID extracted from coretalk address [39:38] | ||
314 | * [37:00] - node offset extracted from coretalk address [37:00] | ||
315 | * | ||
316 | * Since the node id in general will be non-zero, and the chiplet id | ||
317 | * will always be non-zero, it follows that the device must support | ||
318 | * a dma mask of at least 0xffffffffff (40 bits) to target node 0 | ||
319 | * and in general should be 0xffffffffffff (48 bits) to target nodes | ||
320 | * up to 255. Nodes above 255 need the support of the xxx register, | ||
321 | * and so a given CA can only directly target nodes in the range | ||
322 | * xxx - xxx+255. | ||
323 | */ | ||
324 | static u64 | ||
325 | tioca_dma_d48(struct pci_dev *pdev, u64 paddr) | ||
326 | { | ||
327 | struct tioca_common *tioca_common; | ||
328 | struct tioca __iomem *ca_base; | ||
329 | u64 ct_addr; | ||
330 | dma_addr_t bus_addr; | ||
331 | u32 node_upper; | ||
332 | u64 agp_dma_extn; | ||
333 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev); | ||
334 | |||
335 | tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info; | ||
336 | ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base; | ||
337 | |||
338 | ct_addr = PHYS_TO_TIODMA(paddr); | ||
339 | if (!ct_addr) | ||
340 | return 0; | ||
341 | |||
342 | bus_addr = (dma_addr_t) (ct_addr & 0xffffffffffffUL); | ||
343 | node_upper = ct_addr >> 48; | ||
344 | |||
345 | if (node_upper > 64) { | ||
346 | printk(KERN_ERR "%s: coretalk addr 0x%p node id out " | ||
347 | "of range\n", __func__, (void *)ct_addr); | ||
348 | return 0; | ||
349 | } | ||
350 | |||
351 | agp_dma_extn = __sn_readq_relaxed(&ca_base->ca_agp_dma_addr_extn); | ||
352 | if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) { | ||
353 | printk(KERN_ERR "%s: coretalk upper node (%u) " | ||
354 | "mismatch with ca_agp_dma_addr_extn (%llu)\n", | ||
355 | __func__, | ||
356 | node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)); | ||
357 | return 0; | ||
358 | } | ||
359 | |||
360 | return bus_addr; | ||
361 | } | ||
362 | |||
363 | /** | ||
364 | * tioca_dma_mapped - create a DMA mapping using a CA GART | ||
365 | * @pdev: linux pci_dev representing the function | ||
366 | * @paddr: host physical address to map | ||
367 | * @req_size: len (bytes) to map | ||
368 | * | ||
369 | * Map @paddr into CA address space using the GART mechanism. The mapped | ||
370 | * dma_addr_t is guaranteed to be contiguous in CA bus space. | ||
371 | */ | ||
372 | static dma_addr_t | ||
373 | tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size) | ||
374 | { | ||
375 | int ps, ps_shift, entry, entries, mapsize; | ||
376 | u64 xio_addr, end_xio_addr; | ||
377 | struct tioca_common *tioca_common; | ||
378 | struct tioca_kernel *tioca_kern; | ||
379 | dma_addr_t bus_addr = 0; | ||
380 | struct tioca_dmamap *ca_dmamap; | ||
381 | void *map; | ||
382 | unsigned long flags; | ||
383 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev); | ||
384 | |||
385 | tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info; | ||
386 | tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private; | ||
387 | |||
388 | xio_addr = PHYS_TO_TIODMA(paddr); | ||
389 | if (!xio_addr) | ||
390 | return 0; | ||
391 | |||
392 | spin_lock_irqsave(&tioca_kern->ca_lock, flags); | ||
393 | |||
394 | /* | ||
395 | * allocate a map struct | ||
396 | */ | ||
397 | |||
398 | ca_dmamap = kzalloc(sizeof(struct tioca_dmamap), GFP_ATOMIC); | ||
399 | if (!ca_dmamap) | ||
400 | goto map_return; | ||
401 | |||
402 | /* | ||
403 | * Locate free entries that can hold req_size. Account for | ||
404 | * unaligned start/length when allocating. | ||
405 | */ | ||
406 | |||
407 | ps = tioca_kern->ca_ap_pagesize; /* will be power of 2 */ | ||
408 | ps_shift = ffs(ps) - 1; | ||
409 | end_xio_addr = xio_addr + req_size - 1; | ||
410 | |||
411 | entries = (end_xio_addr >> ps_shift) - (xio_addr >> ps_shift) + 1; | ||
412 | |||
413 | map = tioca_kern->ca_pcigart_pagemap; | ||
414 | mapsize = tioca_kern->ca_pcigart_entries; | ||
415 | |||
416 | entry = bitmap_find_next_zero_area(map, mapsize, 0, entries, 0); | ||
417 | if (entry >= mapsize) { | ||
418 | kfree(ca_dmamap); | ||
419 | goto map_return; | ||
420 | } | ||
421 | |||
422 | bitmap_set(map, entry, entries); | ||
423 | |||
424 | bus_addr = tioca_kern->ca_pciap_base + (entry * ps); | ||
425 | |||
426 | ca_dmamap->cad_dma_addr = bus_addr; | ||
427 | ca_dmamap->cad_gart_size = entries; | ||
428 | ca_dmamap->cad_gart_entry = entry; | ||
429 | list_add(&ca_dmamap->cad_list, &tioca_kern->ca_dmamaps); | ||
430 | |||
431 | if (xio_addr % ps) { | ||
432 | tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr); | ||
433 | bus_addr += xio_addr & (ps - 1); | ||
434 | xio_addr &= ~(ps - 1); | ||
435 | xio_addr += ps; | ||
436 | entry++; | ||
437 | } | ||
438 | |||
439 | while (xio_addr < end_xio_addr) { | ||
440 | tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr); | ||
441 | xio_addr += ps; | ||
442 | entry++; | ||
443 | } | ||
444 | |||
445 | tioca_tlbflush(tioca_kern); | ||
446 | |||
447 | map_return: | ||
448 | spin_unlock_irqrestore(&tioca_kern->ca_lock, flags); | ||
449 | return bus_addr; | ||
450 | } | ||
451 | |||
452 | /** | ||
453 | * tioca_dma_unmap - release CA mapping resources | ||
454 | * @pdev: linux pci_dev representing the function | ||
455 | * @bus_addr: bus address returned by an earlier tioca_dma_map | ||
456 | * @dir: mapping direction (unused) | ||
457 | * | ||
458 | * Locate mapping resources associated with @bus_addr and release them. | ||
459 | * For mappings created using the direct modes (64 or 48) there are no | ||
460 | * resources to release. | ||
461 | */ | ||
462 | static void | ||
463 | tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) | ||
464 | { | ||
465 | int i, entry; | ||
466 | struct tioca_common *tioca_common; | ||
467 | struct tioca_kernel *tioca_kern; | ||
468 | struct tioca_dmamap *map; | ||
469 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev); | ||
470 | unsigned long flags; | ||
471 | |||
472 | tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info; | ||
473 | tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private; | ||
474 | |||
475 | /* return straight away if this isn't be a mapped address */ | ||
476 | |||
477 | if (bus_addr < tioca_kern->ca_pciap_base || | ||
478 | bus_addr >= (tioca_kern->ca_pciap_base + tioca_kern->ca_pciap_size)) | ||
479 | return; | ||
480 | |||
481 | spin_lock_irqsave(&tioca_kern->ca_lock, flags); | ||
482 | |||
483 | list_for_each_entry(map, &tioca_kern->ca_dmamaps, cad_list) | ||
484 | if (map->cad_dma_addr == bus_addr) | ||
485 | break; | ||
486 | |||
487 | BUG_ON(map == NULL); | ||
488 | |||
489 | entry = map->cad_gart_entry; | ||
490 | |||
491 | for (i = 0; i < map->cad_gart_size; i++, entry++) { | ||
492 | clear_bit(entry, tioca_kern->ca_pcigart_pagemap); | ||
493 | tioca_kern->ca_pcigart[entry] = 0; | ||
494 | } | ||
495 | tioca_tlbflush(tioca_kern); | ||
496 | |||
497 | list_del(&map->cad_list); | ||
498 | spin_unlock_irqrestore(&tioca_kern->ca_lock, flags); | ||
499 | kfree(map); | ||
500 | } | ||
501 | |||
502 | /** | ||
503 | * tioca_dma_map - map pages for PCI DMA | ||
504 | * @pdev: linux pci_dev representing the function | ||
505 | * @paddr: host physical address to map | ||
506 | * @byte_count: bytes to map | ||
507 | * | ||
508 | * This is the main wrapper for mapping host physical pages to CA PCI space. | ||
509 | * The mapping mode used is based on the devices dma_mask. As a last resort | ||
510 | * use the GART mapped mode. | ||
511 | */ | ||
512 | static u64 | ||
513 | tioca_dma_map(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags) | ||
514 | { | ||
515 | u64 mapaddr; | ||
516 | |||
517 | /* | ||
518 | * Not supported for now ... | ||
519 | */ | ||
520 | if (dma_flags & SN_DMA_MSI) | ||
521 | return 0; | ||
522 | |||
523 | /* | ||
524 | * If card is 64 or 48 bit addressable, use a direct mapping. 32 | ||
525 | * bit direct is so restrictive w.r.t. where the memory resides that | ||
526 | * we don't use it even though CA has some support. | ||
527 | */ | ||
528 | |||
529 | if (pdev->dma_mask == ~0UL) | ||
530 | mapaddr = tioca_dma_d64(paddr); | ||
531 | else if (pdev->dma_mask == 0xffffffffffffUL) | ||
532 | mapaddr = tioca_dma_d48(pdev, paddr); | ||
533 | else | ||
534 | mapaddr = 0; | ||
535 | |||
536 | /* Last resort ... use PCI portion of CA GART */ | ||
537 | |||
538 | if (mapaddr == 0) | ||
539 | mapaddr = tioca_dma_mapped(pdev, paddr, byte_count); | ||
540 | |||
541 | return mapaddr; | ||
542 | } | ||
543 | |||
544 | /** | ||
545 | * tioca_error_intr_handler - SGI TIO CA error interrupt handler | ||
546 | * @irq: unused | ||
547 | * @arg: pointer to tioca_common struct for the given CA | ||
548 | * | ||
549 | * Handle a CA error interrupt. Simply a wrapper around a SAL call which | ||
550 | * defers processing to the SGI prom. | ||
551 | */ | ||
552 | static irqreturn_t | ||
553 | tioca_error_intr_handler(int irq, void *arg) | ||
554 | { | ||
555 | struct tioca_common *soft = arg; | ||
556 | struct ia64_sal_retval ret_stuff; | ||
557 | u64 segment; | ||
558 | u64 busnum; | ||
559 | ret_stuff.status = 0; | ||
560 | ret_stuff.v0 = 0; | ||
561 | |||
562 | segment = soft->ca_common.bs_persist_segment; | ||
563 | busnum = soft->ca_common.bs_persist_busnum; | ||
564 | |||
565 | SAL_CALL_NOLOCK(ret_stuff, | ||
566 | (u64) SN_SAL_IOIF_ERROR_INTERRUPT, | ||
567 | segment, busnum, 0, 0, 0, 0, 0); | ||
568 | |||
569 | return IRQ_HANDLED; | ||
570 | } | ||
571 | |||
572 | /** | ||
573 | * tioca_bus_fixup - perform final PCI fixup for a TIO CA bus | ||
574 | * @prom_bussoft: Common prom/kernel struct representing the bus | ||
575 | * | ||
576 | * Replicates the tioca_common pointed to by @prom_bussoft in kernel | ||
577 | * space. Allocates and initializes a kernel-only area for a given CA, | ||
578 | * and sets up an irq for handling CA error interrupts. | ||
579 | * | ||
580 | * On successful setup, returns the kernel version of tioca_common back to | ||
581 | * the caller. | ||
582 | */ | ||
583 | static void * | ||
584 | tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller) | ||
585 | { | ||
586 | struct tioca_common *tioca_common; | ||
587 | struct tioca_kernel *tioca_kern; | ||
588 | struct pci_bus *bus; | ||
589 | |||
590 | /* sanity check prom rev */ | ||
591 | |||
592 | if (is_shub1() && sn_sal_rev() < 0x0406) { | ||
593 | printk | ||
594 | (KERN_ERR "%s: SGI prom rev 4.06 or greater required " | ||
595 | "for tioca support\n", __func__); | ||
596 | return NULL; | ||
597 | } | ||
598 | |||
599 | /* | ||
600 | * Allocate kernel bus soft and copy from prom. | ||
601 | */ | ||
602 | |||
603 | tioca_common = kmemdup(prom_bussoft, sizeof(struct tioca_common), | ||
604 | GFP_KERNEL); | ||
605 | if (!tioca_common) | ||
606 | return NULL; | ||
607 | |||
608 | tioca_common->ca_common.bs_base = (unsigned long) | ||
609 | ioremap(REGION_OFFSET(tioca_common->ca_common.bs_base), | ||
610 | sizeof(struct tioca_common)); | ||
611 | |||
612 | /* init kernel-private area */ | ||
613 | |||
614 | tioca_kern = kzalloc(sizeof(struct tioca_kernel), GFP_KERNEL); | ||
615 | if (!tioca_kern) { | ||
616 | kfree(tioca_common); | ||
617 | return NULL; | ||
618 | } | ||
619 | |||
620 | tioca_kern->ca_common = tioca_common; | ||
621 | spin_lock_init(&tioca_kern->ca_lock); | ||
622 | INIT_LIST_HEAD(&tioca_kern->ca_dmamaps); | ||
623 | tioca_kern->ca_closest_node = | ||
624 | nasid_to_cnodeid(tioca_common->ca_closest_nasid); | ||
625 | tioca_common->ca_kernel_private = (u64) tioca_kern; | ||
626 | |||
627 | bus = pci_find_bus(tioca_common->ca_common.bs_persist_segment, | ||
628 | tioca_common->ca_common.bs_persist_busnum); | ||
629 | BUG_ON(!bus); | ||
630 | tioca_kern->ca_devices = &bus->devices; | ||
631 | |||
632 | /* init GART */ | ||
633 | |||
634 | if (tioca_gart_init(tioca_kern) < 0) { | ||
635 | kfree(tioca_kern); | ||
636 | kfree(tioca_common); | ||
637 | return NULL; | ||
638 | } | ||
639 | |||
640 | tioca_gart_found++; | ||
641 | list_add(&tioca_kern->ca_list, &tioca_list); | ||
642 | |||
643 | if (request_irq(SGI_TIOCA_ERROR, | ||
644 | tioca_error_intr_handler, | ||
645 | IRQF_SHARED, "TIOCA error", (void *)tioca_common)) | ||
646 | printk(KERN_WARNING | ||
647 | "%s: Unable to get irq %d. " | ||
648 | "Error interrupts won't be routed for TIOCA bus %d\n", | ||
649 | __func__, SGI_TIOCA_ERROR, | ||
650 | (int)tioca_common->ca_common.bs_persist_busnum); | ||
651 | |||
652 | irq_set_handler(SGI_TIOCA_ERROR, handle_level_irq); | ||
653 | sn_set_err_irq_affinity(SGI_TIOCA_ERROR); | ||
654 | |||
655 | /* Setup locality information */ | ||
656 | controller->node = tioca_kern->ca_closest_node; | ||
657 | return tioca_common; | ||
658 | } | ||
659 | |||
660 | static struct sn_pcibus_provider tioca_pci_interfaces = { | ||
661 | .dma_map = tioca_dma_map, | ||
662 | .dma_map_consistent = tioca_dma_map, | ||
663 | .dma_unmap = tioca_dma_unmap, | ||
664 | .bus_fixup = tioca_bus_fixup, | ||
665 | .force_interrupt = NULL, | ||
666 | .target_interrupt = NULL | ||
667 | }; | ||
668 | |||
669 | /** | ||
670 | * tioca_init_provider - init SN PCI provider ops for TIO CA | ||
671 | */ | ||
672 | int | ||
673 | tioca_init_provider(void) | ||
674 | { | ||
675 | sn_pci_provider[PCIIO_ASIC_TYPE_TIOCA] = &tioca_pci_interfaces; | ||
676 | return 0; | ||
677 | } | ||
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c deleted file mode 100644 index 3bd9abc35485..000000000000 --- a/arch/ia64/sn/pci/tioce_provider.c +++ /dev/null | |||
@@ -1,1062 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2003-2006 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | */ | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <linux/interrupt.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/pci.h> | ||
13 | #include <asm/sn/sn_sal.h> | ||
14 | #include <asm/sn/addrs.h> | ||
15 | #include <asm/sn/io.h> | ||
16 | #include <asm/sn/pcidev.h> | ||
17 | #include <asm/sn/pcibus_provider_defs.h> | ||
18 | #include <asm/sn/tioce_provider.h> | ||
19 | |||
20 | /* | ||
21 | * 1/26/2006 | ||
22 | * | ||
23 | * WAR for SGI PV 944642. For revA TIOCE, need to use the following recipe | ||
24 | * (taken from the above PV) before and after accessing tioce internal MMR's | ||
25 | * to avoid tioce lockups. | ||
26 | * | ||
27 | * The recipe as taken from the PV: | ||
28 | * | ||
29 | * if(mmr address < 0x45000) { | ||
30 | * if(mmr address == 0 or 0x80) | ||
31 | * mmr wrt or read address 0xc0 | ||
32 | * else if(mmr address == 0x148 or 0x200) | ||
33 | * mmr wrt or read address 0x28 | ||
34 | * else | ||
35 | * mmr wrt or read address 0x158 | ||
36 | * | ||
37 | * do desired mmr access (rd or wrt) | ||
38 | * | ||
39 | * if(mmr address == 0x100) | ||
40 | * mmr wrt or read address 0x38 | ||
41 | * mmr wrt or read address 0xb050 | ||
42 | * } else | ||
43 | * do desired mmr access | ||
44 | * | ||
45 | * According to hw, we can use reads instead of writes to the above address | ||
46 | * | ||
47 | * Note this WAR can only to be used for accessing internal MMR's in the | ||
48 | * TIOCE Coretalk Address Range 0x0 - 0x07ff_ffff. This includes the | ||
49 | * "Local CE Registers and Memories" and "PCI Compatible Config Space" address | ||
50 | * spaces from table 2-1 of the "CE Programmer's Reference Overview" document. | ||
51 | * | ||
52 | * All registers defined in struct tioce will meet that criteria. | ||
53 | */ | ||
54 | |||
55 | static inline void | ||
56 | tioce_mmr_war_pre(struct tioce_kernel *kern, void __iomem *mmr_addr) | ||
57 | { | ||
58 | u64 mmr_base; | ||
59 | u64 mmr_offset; | ||
60 | |||
61 | if (kern->ce_common->ce_rev != TIOCE_REV_A) | ||
62 | return; | ||
63 | |||
64 | mmr_base = kern->ce_common->ce_pcibus.bs_base; | ||
65 | mmr_offset = (unsigned long)mmr_addr - mmr_base; | ||
66 | |||
67 | if (mmr_offset < 0x45000) { | ||
68 | u64 mmr_war_offset; | ||
69 | |||
70 | if (mmr_offset == 0 || mmr_offset == 0x80) | ||
71 | mmr_war_offset = 0xc0; | ||
72 | else if (mmr_offset == 0x148 || mmr_offset == 0x200) | ||
73 | mmr_war_offset = 0x28; | ||
74 | else | ||
75 | mmr_war_offset = 0x158; | ||
76 | |||
77 | readq_relaxed((void __iomem *)(mmr_base + mmr_war_offset)); | ||
78 | } | ||
79 | } | ||
80 | |||
81 | static inline void | ||
82 | tioce_mmr_war_post(struct tioce_kernel *kern, void __iomem *mmr_addr) | ||
83 | { | ||
84 | u64 mmr_base; | ||
85 | u64 mmr_offset; | ||
86 | |||
87 | if (kern->ce_common->ce_rev != TIOCE_REV_A) | ||
88 | return; | ||
89 | |||
90 | mmr_base = kern->ce_common->ce_pcibus.bs_base; | ||
91 | mmr_offset = (unsigned long)mmr_addr - mmr_base; | ||
92 | |||
93 | if (mmr_offset < 0x45000) { | ||
94 | if (mmr_offset == 0x100) | ||
95 | readq_relaxed((void __iomem *)(mmr_base + 0x38)); | ||
96 | readq_relaxed((void __iomem *)(mmr_base + 0xb050)); | ||
97 | } | ||
98 | } | ||
99 | |||
100 | /* load mmr contents into a variable */ | ||
101 | #define tioce_mmr_load(kern, mmrp, varp) do {\ | ||
102 | tioce_mmr_war_pre(kern, mmrp); \ | ||
103 | *(varp) = readq_relaxed(mmrp); \ | ||
104 | tioce_mmr_war_post(kern, mmrp); \ | ||
105 | } while (0) | ||
106 | |||
107 | /* store variable contents into mmr */ | ||
108 | #define tioce_mmr_store(kern, mmrp, varp) do {\ | ||
109 | tioce_mmr_war_pre(kern, mmrp); \ | ||
110 | writeq(*varp, mmrp); \ | ||
111 | tioce_mmr_war_post(kern, mmrp); \ | ||
112 | } while (0) | ||
113 | |||
114 | /* store immediate value into mmr */ | ||
115 | #define tioce_mmr_storei(kern, mmrp, val) do {\ | ||
116 | tioce_mmr_war_pre(kern, mmrp); \ | ||
117 | writeq(val, mmrp); \ | ||
118 | tioce_mmr_war_post(kern, mmrp); \ | ||
119 | } while (0) | ||
120 | |||
121 | /* set bits (immediate value) into mmr */ | ||
122 | #define tioce_mmr_seti(kern, mmrp, bits) do {\ | ||
123 | u64 tmp; \ | ||
124 | tioce_mmr_load(kern, mmrp, &tmp); \ | ||
125 | tmp |= (bits); \ | ||
126 | tioce_mmr_store(kern, mmrp, &tmp); \ | ||
127 | } while (0) | ||
128 | |||
129 | /* clear bits (immediate value) into mmr */ | ||
130 | #define tioce_mmr_clri(kern, mmrp, bits) do { \ | ||
131 | u64 tmp; \ | ||
132 | tioce_mmr_load(kern, mmrp, &tmp); \ | ||
133 | tmp &= ~(bits); \ | ||
134 | tioce_mmr_store(kern, mmrp, &tmp); \ | ||
135 | } while (0) | ||
136 | |||
137 | /** | ||
138 | * Bus address ranges for the 5 flavors of TIOCE DMA | ||
139 | */ | ||
140 | |||
141 | #define TIOCE_D64_MIN 0x8000000000000000UL | ||
142 | #define TIOCE_D64_MAX 0xffffffffffffffffUL | ||
143 | #define TIOCE_D64_ADDR(a) ((a) >= TIOCE_D64_MIN) | ||
144 | |||
145 | #define TIOCE_D32_MIN 0x0000000080000000UL | ||
146 | #define TIOCE_D32_MAX 0x00000000ffffffffUL | ||
147 | #define TIOCE_D32_ADDR(a) ((a) >= TIOCE_D32_MIN && (a) <= TIOCE_D32_MAX) | ||
148 | |||
149 | #define TIOCE_M32_MIN 0x0000000000000000UL | ||
150 | #define TIOCE_M32_MAX 0x000000007fffffffUL | ||
151 | #define TIOCE_M32_ADDR(a) ((a) >= TIOCE_M32_MIN && (a) <= TIOCE_M32_MAX) | ||
152 | |||
153 | #define TIOCE_M40_MIN 0x0000004000000000UL | ||
154 | #define TIOCE_M40_MAX 0x0000007fffffffffUL | ||
155 | #define TIOCE_M40_ADDR(a) ((a) >= TIOCE_M40_MIN && (a) <= TIOCE_M40_MAX) | ||
156 | |||
157 | #define TIOCE_M40S_MIN 0x0000008000000000UL | ||
158 | #define TIOCE_M40S_MAX 0x000000ffffffffffUL | ||
159 | #define TIOCE_M40S_ADDR(a) ((a) >= TIOCE_M40S_MIN && (a) <= TIOCE_M40S_MAX) | ||
160 | |||
161 | /* | ||
162 | * ATE manipulation macros. | ||
163 | */ | ||
164 | |||
165 | #define ATE_PAGESHIFT(ps) (__ffs(ps)) | ||
166 | #define ATE_PAGEMASK(ps) ((ps)-1) | ||
167 | |||
168 | #define ATE_PAGE(x, ps) ((x) >> ATE_PAGESHIFT(ps)) | ||
169 | #define ATE_NPAGES(start, len, pagesize) \ | ||
170 | (ATE_PAGE((start)+(len)-1, pagesize) - ATE_PAGE(start, pagesize) + 1) | ||
171 | |||
172 | #define ATE_VALID(ate) ((ate) & (1UL << 63)) | ||
173 | #define ATE_MAKE(addr, ps, msi) \ | ||
174 | (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63) | ((msi)?(1UL << 62):0)) | ||
175 | |||
176 | /* | ||
177 | * Flavors of ate-based mapping supported by tioce_alloc_map() | ||
178 | */ | ||
179 | |||
180 | #define TIOCE_ATE_M32 1 | ||
181 | #define TIOCE_ATE_M40 2 | ||
182 | #define TIOCE_ATE_M40S 3 | ||
183 | |||
184 | #define KB(x) ((u64)(x) << 10) | ||
185 | #define MB(x) ((u64)(x) << 20) | ||
186 | #define GB(x) ((u64)(x) << 30) | ||
187 | |||
188 | /** | ||
189 | * tioce_dma_d64 - create a DMA mapping using 64-bit direct mode | ||
190 | * @ct_addr: system coretalk address | ||
191 | * | ||
192 | * Map @ct_addr into 64-bit CE bus space. No device context is necessary | ||
193 | * and no CE mapping are consumed. | ||
194 | * | ||
195 | * Bits 53:0 come from the coretalk address. The remaining bits are set as | ||
196 | * follows: | ||
197 | * | ||
198 | * 63 - must be 1 to indicate d64 mode to CE hardware | ||
199 | * 62 - barrier bit ... controlled with tioce_dma_barrier() | ||
200 | * 61 - msi bit ... specified through dma_flags | ||
201 | * 60:54 - reserved, MBZ | ||
202 | */ | ||
203 | static u64 | ||
204 | tioce_dma_d64(unsigned long ct_addr, int dma_flags) | ||
205 | { | ||
206 | u64 bus_addr; | ||
207 | |||
208 | bus_addr = ct_addr | (1UL << 63); | ||
209 | if (dma_flags & SN_DMA_MSI) | ||
210 | bus_addr |= (1UL << 61); | ||
211 | |||
212 | return bus_addr; | ||
213 | } | ||
214 | |||
215 | /** | ||
216 | * pcidev_to_tioce - return misc ce related pointers given a pci_dev | ||
217 | * @pci_dev: pci device context | ||
218 | * @base: ptr to store struct tioce_mmr * for the CE holding this device | ||
219 | * @kernel: ptr to store struct tioce_kernel * for the CE holding this device | ||
220 | * @port: ptr to store the CE port number that this device is on | ||
221 | * | ||
222 | * Return pointers to various CE-related structures for the CE upstream of | ||
223 | * @pci_dev. | ||
224 | */ | ||
225 | static inline void | ||
226 | pcidev_to_tioce(struct pci_dev *pdev, struct tioce __iomem **base, | ||
227 | struct tioce_kernel **kernel, int *port) | ||
228 | { | ||
229 | struct pcidev_info *pcidev_info; | ||
230 | struct tioce_common *ce_common; | ||
231 | struct tioce_kernel *ce_kernel; | ||
232 | |||
233 | pcidev_info = SN_PCIDEV_INFO(pdev); | ||
234 | ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info; | ||
235 | ce_kernel = (struct tioce_kernel *)ce_common->ce_kernel_private; | ||
236 | |||
237 | if (base) | ||
238 | *base = (struct tioce __iomem *)ce_common->ce_pcibus.bs_base; | ||
239 | if (kernel) | ||
240 | *kernel = ce_kernel; | ||
241 | |||
242 | /* | ||
243 | * we use port as a zero-based value internally, even though the | ||
244 | * documentation is 1-based. | ||
245 | */ | ||
246 | if (port) | ||
247 | *port = | ||
248 | (pdev->bus->number < ce_kernel->ce_port1_secondary) ? 0 : 1; | ||
249 | } | ||
250 | |||
251 | /** | ||
252 | * tioce_alloc_map - Given a coretalk address, map it to pcie bus address | ||
253 | * space using one of the various ATE-based address modes. | ||
254 | * @ce_kern: tioce context | ||
255 | * @type: map mode to use | ||
256 | * @port: 0-based port that the requesting device is downstream of | ||
257 | * @ct_addr: the coretalk address to map | ||
258 | * @len: number of bytes to map | ||
259 | * | ||
260 | * Given the addressing type, set up various parameters that define the | ||
261 | * ATE pool to use. Search for a contiguous block of entries to cover the | ||
262 | * length, and if enough resources exist, fill in the ATEs and construct a | ||
263 | * tioce_dmamap struct to track the mapping. | ||
264 | */ | ||
265 | static u64 | ||
266 | tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, | ||
267 | u64 ct_addr, int len, int dma_flags) | ||
268 | { | ||
269 | int i; | ||
270 | int j; | ||
271 | int first; | ||
272 | int last; | ||
273 | int entries; | ||
274 | int nates; | ||
275 | u64 pagesize; | ||
276 | int msi_capable, msi_wanted; | ||
277 | u64 *ate_shadow; | ||
278 | u64 __iomem *ate_reg; | ||
279 | u64 addr; | ||
280 | struct tioce __iomem *ce_mmr; | ||
281 | u64 bus_base; | ||
282 | struct tioce_dmamap *map; | ||
283 | |||
284 | ce_mmr = (struct tioce __iomem *)ce_kern->ce_common->ce_pcibus.bs_base; | ||
285 | |||
286 | switch (type) { | ||
287 | case TIOCE_ATE_M32: | ||
288 | /* | ||
289 | * The first 64 entries of the ate3240 pool are dedicated to | ||
290 | * super-page (TIOCE_ATE_M40S) mode. | ||
291 | */ | ||
292 | first = 64; | ||
293 | entries = TIOCE_NUM_M3240_ATES - 64; | ||
294 | ate_shadow = ce_kern->ce_ate3240_shadow; | ||
295 | ate_reg = ce_mmr->ce_ure_ate3240; | ||
296 | pagesize = ce_kern->ce_ate3240_pagesize; | ||
297 | bus_base = TIOCE_M32_MIN; | ||
298 | msi_capable = 1; | ||
299 | break; | ||
300 | case TIOCE_ATE_M40: | ||
301 | first = 0; | ||
302 | entries = TIOCE_NUM_M40_ATES; | ||
303 | ate_shadow = ce_kern->ce_ate40_shadow; | ||
304 | ate_reg = ce_mmr->ce_ure_ate40; | ||
305 | pagesize = MB(64); | ||
306 | bus_base = TIOCE_M40_MIN; | ||
307 | msi_capable = 0; | ||
308 | break; | ||
309 | case TIOCE_ATE_M40S: | ||
310 | /* | ||
311 | * ate3240 entries 0-31 are dedicated to port1 super-page | ||
312 | * mappings. ate3240 entries 32-63 are dedicated to port2. | ||
313 | */ | ||
314 | first = port * 32; | ||
315 | entries = 32; | ||
316 | ate_shadow = ce_kern->ce_ate3240_shadow; | ||
317 | ate_reg = ce_mmr->ce_ure_ate3240; | ||
318 | pagesize = GB(16); | ||
319 | bus_base = TIOCE_M40S_MIN; | ||
320 | msi_capable = 0; | ||
321 | break; | ||
322 | default: | ||
323 | return 0; | ||
324 | } | ||
325 | |||
326 | msi_wanted = dma_flags & SN_DMA_MSI; | ||
327 | if (msi_wanted && !msi_capable) | ||
328 | return 0; | ||
329 | |||
330 | nates = ATE_NPAGES(ct_addr, len, pagesize); | ||
331 | if (nates > entries) | ||
332 | return 0; | ||
333 | |||
334 | last = first + entries - nates; | ||
335 | for (i = first; i <= last; i++) { | ||
336 | if (ATE_VALID(ate_shadow[i])) | ||
337 | continue; | ||
338 | |||
339 | for (j = i; j < i + nates; j++) | ||
340 | if (ATE_VALID(ate_shadow[j])) | ||
341 | break; | ||
342 | |||
343 | if (j >= i + nates) | ||
344 | break; | ||
345 | } | ||
346 | |||
347 | if (i > last) | ||
348 | return 0; | ||
349 | |||
350 | map = kzalloc(sizeof(struct tioce_dmamap), GFP_ATOMIC); | ||
351 | if (!map) | ||
352 | return 0; | ||
353 | |||
354 | addr = ct_addr; | ||
355 | for (j = 0; j < nates; j++) { | ||
356 | u64 ate; | ||
357 | |||
358 | ate = ATE_MAKE(addr, pagesize, msi_wanted); | ||
359 | ate_shadow[i + j] = ate; | ||
360 | tioce_mmr_storei(ce_kern, &ate_reg[i + j], ate); | ||
361 | addr += pagesize; | ||
362 | } | ||
363 | |||
364 | map->refcnt = 1; | ||
365 | map->nbytes = nates * pagesize; | ||
366 | map->ct_start = ct_addr & ~ATE_PAGEMASK(pagesize); | ||
367 | map->pci_start = bus_base + (i * pagesize); | ||
368 | map->ate_hw = &ate_reg[i]; | ||
369 | map->ate_shadow = &ate_shadow[i]; | ||
370 | map->ate_count = nates; | ||
371 | |||
372 | list_add(&map->ce_dmamap_list, &ce_kern->ce_dmamap_list); | ||
373 | |||
374 | return (map->pci_start + (ct_addr - map->ct_start)); | ||
375 | } | ||
376 | |||
377 | /** | ||
378 | * tioce_dma_d32 - create a DMA mapping using 32-bit direct mode | ||
379 | * @pdev: linux pci_dev representing the function | ||
380 | * @paddr: system physical address | ||
381 | * | ||
382 | * Map @paddr into 32-bit bus space of the CE associated with @pcidev_info. | ||
383 | */ | ||
384 | static u64 | ||
385 | tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr, int dma_flags) | ||
386 | { | ||
387 | int dma_ok; | ||
388 | int port; | ||
389 | struct tioce __iomem *ce_mmr; | ||
390 | struct tioce_kernel *ce_kern; | ||
391 | u64 ct_upper; | ||
392 | u64 ct_lower; | ||
393 | dma_addr_t bus_addr; | ||
394 | |||
395 | if (dma_flags & SN_DMA_MSI) | ||
396 | return 0; | ||
397 | |||
398 | ct_upper = ct_addr & ~0x3fffffffUL; | ||
399 | ct_lower = ct_addr & 0x3fffffffUL; | ||
400 | |||
401 | pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port); | ||
402 | |||
403 | if (ce_kern->ce_port[port].dirmap_refcnt == 0) { | ||
404 | u64 tmp; | ||
405 | |||
406 | ce_kern->ce_port[port].dirmap_shadow = ct_upper; | ||
407 | tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_dir_map[port], | ||
408 | ct_upper); | ||
409 | tmp = ce_mmr->ce_ure_dir_map[port]; | ||
410 | dma_ok = 1; | ||
411 | } else | ||
412 | dma_ok = (ce_kern->ce_port[port].dirmap_shadow == ct_upper); | ||
413 | |||
414 | if (dma_ok) { | ||
415 | ce_kern->ce_port[port].dirmap_refcnt++; | ||
416 | bus_addr = TIOCE_D32_MIN + ct_lower; | ||
417 | } else | ||
418 | bus_addr = 0; | ||
419 | |||
420 | return bus_addr; | ||
421 | } | ||
422 | |||
423 | /** | ||
424 | * tioce_dma_barrier - swizzle a TIOCE bus address to include or exclude | ||
425 | * the barrier bit. | ||
426 | * @bus_addr: bus address to swizzle | ||
427 | * | ||
428 | * Given a TIOCE bus address, set the appropriate bit to indicate barrier | ||
429 | * attributes. | ||
430 | */ | ||
431 | static u64 | ||
432 | tioce_dma_barrier(u64 bus_addr, int on) | ||
433 | { | ||
434 | u64 barrier_bit; | ||
435 | |||
436 | /* barrier not supported in M40/M40S mode */ | ||
437 | if (TIOCE_M40_ADDR(bus_addr) || TIOCE_M40S_ADDR(bus_addr)) | ||
438 | return bus_addr; | ||
439 | |||
440 | if (TIOCE_D64_ADDR(bus_addr)) | ||
441 | barrier_bit = (1UL << 62); | ||
442 | else /* must be m32 or d32 */ | ||
443 | barrier_bit = (1UL << 30); | ||
444 | |||
445 | return (on) ? (bus_addr | barrier_bit) : (bus_addr & ~barrier_bit); | ||
446 | } | ||
447 | |||
448 | /** | ||
449 | * tioce_dma_unmap - release CE mapping resources | ||
450 | * @pdev: linux pci_dev representing the function | ||
451 | * @bus_addr: bus address returned by an earlier tioce_dma_map | ||
452 | * @dir: mapping direction (unused) | ||
453 | * | ||
454 | * Locate mapping resources associated with @bus_addr and release them. | ||
455 | * For mappings created using the direct modes there are no resources | ||
456 | * to release. | ||
457 | */ | ||
458 | void | ||
459 | tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) | ||
460 | { | ||
461 | int i; | ||
462 | int port; | ||
463 | struct tioce_kernel *ce_kern; | ||
464 | struct tioce __iomem *ce_mmr; | ||
465 | unsigned long flags; | ||
466 | |||
467 | bus_addr = tioce_dma_barrier(bus_addr, 0); | ||
468 | pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port); | ||
469 | |||
470 | /* nothing to do for D64 */ | ||
471 | |||
472 | if (TIOCE_D64_ADDR(bus_addr)) | ||
473 | return; | ||
474 | |||
475 | spin_lock_irqsave(&ce_kern->ce_lock, flags); | ||
476 | |||
477 | if (TIOCE_D32_ADDR(bus_addr)) { | ||
478 | if (--ce_kern->ce_port[port].dirmap_refcnt == 0) { | ||
479 | ce_kern->ce_port[port].dirmap_shadow = 0; | ||
480 | tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_dir_map[port], | ||
481 | 0); | ||
482 | } | ||
483 | } else { | ||
484 | struct tioce_dmamap *map; | ||
485 | |||
486 | list_for_each_entry(map, &ce_kern->ce_dmamap_list, | ||
487 | ce_dmamap_list) { | ||
488 | u64 last; | ||
489 | |||
490 | last = map->pci_start + map->nbytes - 1; | ||
491 | if (bus_addr >= map->pci_start && bus_addr <= last) | ||
492 | break; | ||
493 | } | ||
494 | |||
495 | if (&map->ce_dmamap_list == &ce_kern->ce_dmamap_list) { | ||
496 | printk(KERN_WARNING | ||
497 | "%s: %s - no map found for bus_addr 0x%llx\n", | ||
498 | __func__, pci_name(pdev), bus_addr); | ||
499 | } else if (--map->refcnt == 0) { | ||
500 | for (i = 0; i < map->ate_count; i++) { | ||
501 | map->ate_shadow[i] = 0; | ||
502 | tioce_mmr_storei(ce_kern, &map->ate_hw[i], 0); | ||
503 | } | ||
504 | |||
505 | list_del(&map->ce_dmamap_list); | ||
506 | kfree(map); | ||
507 | } | ||
508 | } | ||
509 | |||
510 | spin_unlock_irqrestore(&ce_kern->ce_lock, flags); | ||
511 | } | ||
512 | |||
513 | /** | ||
514 | * tioce_do_dma_map - map pages for PCI DMA | ||
515 | * @pdev: linux pci_dev representing the function | ||
516 | * @paddr: host physical address to map | ||
517 | * @byte_count: bytes to map | ||
518 | * | ||
519 | * This is the main wrapper for mapping host physical pages to CE PCI space. | ||
520 | * The mapping mode used is based on the device's dma_mask. | ||
521 | */ | ||
522 | static u64 | ||
523 | tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, | ||
524 | int barrier, int dma_flags) | ||
525 | { | ||
526 | unsigned long flags; | ||
527 | u64 ct_addr; | ||
528 | u64 mapaddr = 0; | ||
529 | struct tioce_kernel *ce_kern; | ||
530 | struct tioce_dmamap *map; | ||
531 | int port; | ||
532 | u64 dma_mask; | ||
533 | |||
534 | dma_mask = (barrier) ? pdev->dev.coherent_dma_mask : pdev->dma_mask; | ||
535 | |||
536 | /* cards must be able to address at least 31 bits */ | ||
537 | if (dma_mask < 0x7fffffffUL) | ||
538 | return 0; | ||
539 | |||
540 | if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS) | ||
541 | ct_addr = PHYS_TO_TIODMA(paddr); | ||
542 | else | ||
543 | ct_addr = paddr; | ||
544 | |||
545 | /* | ||
546 | * If the device can generate 64 bit addresses, create a D64 map. | ||
547 | */ | ||
548 | if (dma_mask == ~0UL) { | ||
549 | mapaddr = tioce_dma_d64(ct_addr, dma_flags); | ||
550 | if (mapaddr) | ||
551 | goto dma_map_done; | ||
552 | } | ||
553 | |||
554 | pcidev_to_tioce(pdev, NULL, &ce_kern, &port); | ||
555 | |||
556 | spin_lock_irqsave(&ce_kern->ce_lock, flags); | ||
557 | |||
558 | /* | ||
559 | * D64 didn't work ... See if we have an existing map that covers | ||
560 | * this address range. Must account for devices dma_mask here since | ||
561 | * an existing map might have been done in a mode using more pci | ||
562 | * address bits than this device can support. | ||
563 | */ | ||
564 | list_for_each_entry(map, &ce_kern->ce_dmamap_list, ce_dmamap_list) { | ||
565 | u64 last; | ||
566 | |||
567 | last = map->ct_start + map->nbytes - 1; | ||
568 | if (ct_addr >= map->ct_start && | ||
569 | ct_addr + byte_count - 1 <= last && | ||
570 | map->pci_start <= dma_mask) { | ||
571 | map->refcnt++; | ||
572 | mapaddr = map->pci_start + (ct_addr - map->ct_start); | ||
573 | break; | ||
574 | } | ||
575 | } | ||
576 | |||
577 | /* | ||
578 | * If we don't have a map yet, and the card can generate 40 | ||
579 | * bit addresses, try the M40/M40S modes. Note these modes do not | ||
580 | * support a barrier bit, so if we need a consistent map these | ||
581 | * won't work. | ||
582 | */ | ||
583 | if (!mapaddr && !barrier && dma_mask >= 0xffffffffffUL) { | ||
584 | /* | ||
585 | * We have two options for 40-bit mappings: 16GB "super" ATEs | ||
586 | * and 64MB "regular" ATEs. We'll try both if needed for a | ||
587 | * given mapping but which one we try first depends on the | ||
588 | * size. For requests >64MB, prefer to use a super page with | ||
589 | * regular as the fallback. Otherwise, try in the reverse order. | ||
590 | */ | ||
591 | |||
592 | if (byte_count > MB(64)) { | ||
593 | mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, | ||
594 | port, ct_addr, byte_count, | ||
595 | dma_flags); | ||
596 | if (!mapaddr) | ||
597 | mapaddr = | ||
598 | tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, | ||
599 | ct_addr, byte_count, | ||
600 | dma_flags); | ||
601 | } else { | ||
602 | mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, | ||
603 | ct_addr, byte_count, | ||
604 | dma_flags); | ||
605 | if (!mapaddr) | ||
606 | mapaddr = | ||
607 | tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, | ||
608 | port, ct_addr, byte_count, | ||
609 | dma_flags); | ||
610 | } | ||
611 | } | ||
612 | |||
613 | /* | ||
614 | * 32-bit direct is the next mode to try | ||
615 | */ | ||
616 | if (!mapaddr && dma_mask >= 0xffffffffUL) | ||
617 | mapaddr = tioce_dma_d32(pdev, ct_addr, dma_flags); | ||
618 | |||
619 | /* | ||
620 | * Last resort, try 32-bit ATE-based map. | ||
621 | */ | ||
622 | if (!mapaddr) | ||
623 | mapaddr = | ||
624 | tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr, | ||
625 | byte_count, dma_flags); | ||
626 | |||
627 | spin_unlock_irqrestore(&ce_kern->ce_lock, flags); | ||
628 | |||
629 | dma_map_done: | ||
630 | if (mapaddr && barrier) | ||
631 | mapaddr = tioce_dma_barrier(mapaddr, 1); | ||
632 | |||
633 | return mapaddr; | ||
634 | } | ||
635 | |||
636 | /** | ||
637 | * tioce_dma - standard pci dma map interface | ||
638 | * @pdev: pci device requesting the map | ||
639 | * @paddr: system physical address to map into pci space | ||
640 | * @byte_count: # bytes to map | ||
641 | * | ||
642 | * Simply call tioce_do_dma_map() to create a map with the barrier bit clear | ||
643 | * in the address. | ||
644 | */ | ||
645 | static u64 | ||
646 | tioce_dma(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags) | ||
647 | { | ||
648 | return tioce_do_dma_map(pdev, paddr, byte_count, 0, dma_flags); | ||
649 | } | ||
650 | |||
651 | /** | ||
652 | * tioce_dma_consistent - consistent pci dma map interface | ||
653 | * @pdev: pci device requesting the map | ||
654 | * @paddr: system physical address to map into pci space | ||
655 | * @byte_count: # bytes to map | ||
656 | * | ||
657 | * Simply call tioce_do_dma_map() to create a map with the barrier bit set | ||
658 | * in the address. | ||
659 | */ | ||
660 | static u64 | ||
661 | tioce_dma_consistent(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags) | ||
662 | { | ||
663 | return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags); | ||
664 | } | ||
665 | |||
666 | /** | ||
667 | * tioce_error_intr_handler - SGI TIO CE error interrupt handler | ||
668 | * @irq: unused | ||
669 | * @arg: pointer to tioce_common struct for the given CE | ||
670 | * | ||
671 | * Handle a CE error interrupt. Simply a wrapper around a SAL call which | ||
672 | * defers processing to the SGI prom. | ||
673 | */ | ||
674 | static irqreturn_t | ||
675 | tioce_error_intr_handler(int irq, void *arg) | ||
676 | { | ||
677 | struct tioce_common *soft = arg; | ||
678 | struct ia64_sal_retval ret_stuff; | ||
679 | ret_stuff.status = 0; | ||
680 | ret_stuff.v0 = 0; | ||
681 | |||
682 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_ERROR_INTERRUPT, | ||
683 | soft->ce_pcibus.bs_persist_segment, | ||
684 | soft->ce_pcibus.bs_persist_busnum, 0, 0, 0, 0, 0); | ||
685 | |||
686 | if (ret_stuff.v0) | ||
687 | panic("tioce_error_intr_handler: Fatal TIOCE error"); | ||
688 | |||
689 | return IRQ_HANDLED; | ||
690 | } | ||
691 | |||
692 | /** | ||
693 | * tioce_reserve_m32 - reserve M32 ATEs for the indicated address range | ||
694 | * @tioce_kernel: TIOCE context to reserve ATEs for | ||
695 | * @base: starting bus address to reserve | ||
696 | * @limit: last bus address to reserve | ||
697 | * | ||
698 | * If base/limit falls within the range of bus space mapped through the | ||
699 | * M32 space, reserve the resources corresponding to the range. | ||
700 | */ | ||
701 | static void | ||
702 | tioce_reserve_m32(struct tioce_kernel *ce_kern, u64 base, u64 limit) | ||
703 | { | ||
704 | int ate_index, last_ate, ps; | ||
705 | struct tioce __iomem *ce_mmr; | ||
706 | |||
707 | ce_mmr = (struct tioce __iomem *)ce_kern->ce_common->ce_pcibus.bs_base; | ||
708 | ps = ce_kern->ce_ate3240_pagesize; | ||
709 | ate_index = ATE_PAGE(base, ps); | ||
710 | last_ate = ate_index + ATE_NPAGES(base, limit-base+1, ps) - 1; | ||
711 | |||
712 | if (ate_index < 64) | ||
713 | ate_index = 64; | ||
714 | |||
715 | if (last_ate >= TIOCE_NUM_M3240_ATES) | ||
716 | last_ate = TIOCE_NUM_M3240_ATES - 1; | ||
717 | |||
718 | while (ate_index <= last_ate) { | ||
719 | u64 ate; | ||
720 | |||
721 | ate = ATE_MAKE(0xdeadbeef, ps, 0); | ||
722 | ce_kern->ce_ate3240_shadow[ate_index] = ate; | ||
723 | tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_ate3240[ate_index], | ||
724 | ate); | ||
725 | ate_index++; | ||
726 | } | ||
727 | } | ||
728 | |||
729 | /** | ||
730 | * tioce_kern_init - init kernel structures related to a given TIOCE | ||
731 | * @tioce_common: ptr to a cached tioce_common struct that originated in prom | ||
732 | */ | ||
733 | static struct tioce_kernel * | ||
734 | tioce_kern_init(struct tioce_common *tioce_common) | ||
735 | { | ||
736 | int i; | ||
737 | int ps; | ||
738 | int dev; | ||
739 | u32 tmp; | ||
740 | unsigned int seg, bus; | ||
741 | struct tioce __iomem *tioce_mmr; | ||
742 | struct tioce_kernel *tioce_kern; | ||
743 | |||
744 | tioce_kern = kzalloc(sizeof(struct tioce_kernel), GFP_KERNEL); | ||
745 | if (!tioce_kern) { | ||
746 | return NULL; | ||
747 | } | ||
748 | |||
749 | tioce_kern->ce_common = tioce_common; | ||
750 | spin_lock_init(&tioce_kern->ce_lock); | ||
751 | INIT_LIST_HEAD(&tioce_kern->ce_dmamap_list); | ||
752 | tioce_common->ce_kernel_private = (u64) tioce_kern; | ||
753 | |||
754 | /* | ||
755 | * Determine the secondary bus number of the port2 logical PPB. | ||
756 | * This is used to decide whether a given pci device resides on | ||
757 | * port1 or port2. Note: We don't have enough plumbing set up | ||
758 | * here to use pci_read_config_xxx() so use raw_pci_read(). | ||
759 | */ | ||
760 | |||
761 | seg = tioce_common->ce_pcibus.bs_persist_segment; | ||
762 | bus = tioce_common->ce_pcibus.bs_persist_busnum; | ||
763 | |||
764 | raw_pci_read(seg, bus, PCI_DEVFN(2, 0), PCI_SECONDARY_BUS, 1,&tmp); | ||
765 | tioce_kern->ce_port1_secondary = (u8) tmp; | ||
766 | |||
767 | /* | ||
768 | * Set PMU pagesize to the largest size available, and zero out | ||
769 | * the ATEs. | ||
770 | */ | ||
771 | |||
772 | tioce_mmr = (struct tioce __iomem *)tioce_common->ce_pcibus.bs_base; | ||
773 | tioce_mmr_clri(tioce_kern, &tioce_mmr->ce_ure_page_map, | ||
774 | CE_URE_PAGESIZE_MASK); | ||
775 | tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_ure_page_map, | ||
776 | CE_URE_256K_PAGESIZE); | ||
777 | ps = tioce_kern->ce_ate3240_pagesize = KB(256); | ||
778 | |||
779 | for (i = 0; i < TIOCE_NUM_M40_ATES; i++) { | ||
780 | tioce_kern->ce_ate40_shadow[i] = 0; | ||
781 | tioce_mmr_storei(tioce_kern, &tioce_mmr->ce_ure_ate40[i], 0); | ||
782 | } | ||
783 | |||
784 | for (i = 0; i < TIOCE_NUM_M3240_ATES; i++) { | ||
785 | tioce_kern->ce_ate3240_shadow[i] = 0; | ||
786 | tioce_mmr_storei(tioce_kern, &tioce_mmr->ce_ure_ate3240[i], 0); | ||
787 | } | ||
788 | |||
789 | /* | ||
790 | * Reserve ATEs corresponding to reserved address ranges. These | ||
791 | * include: | ||
792 | * | ||
793 | * Memory space covered by each PPB mem base/limit register | ||
794 | * Memory space covered by each PPB prefetch base/limit register | ||
795 | * | ||
796 | * These bus ranges are for pio (downstream) traffic only, and so | ||
797 | * cannot be used for DMA. | ||
798 | */ | ||
799 | |||
800 | for (dev = 1; dev <= 2; dev++) { | ||
801 | u64 base, limit; | ||
802 | |||
803 | /* mem base/limit */ | ||
804 | |||
805 | raw_pci_read(seg, bus, PCI_DEVFN(dev, 0), | ||
806 | PCI_MEMORY_BASE, 2, &tmp); | ||
807 | base = (u64)tmp << 16; | ||
808 | |||
809 | raw_pci_read(seg, bus, PCI_DEVFN(dev, 0), | ||
810 | PCI_MEMORY_LIMIT, 2, &tmp); | ||
811 | limit = (u64)tmp << 16; | ||
812 | limit |= 0xfffffUL; | ||
813 | |||
814 | if (base < limit) | ||
815 | tioce_reserve_m32(tioce_kern, base, limit); | ||
816 | |||
817 | /* | ||
818 | * prefetch mem base/limit. The tioce ppb's have 64-bit | ||
819 | * decoders, so read the upper portions w/o checking the | ||
820 | * attributes. | ||
821 | */ | ||
822 | |||
823 | raw_pci_read(seg, bus, PCI_DEVFN(dev, 0), | ||
824 | PCI_PREF_MEMORY_BASE, 2, &tmp); | ||
825 | base = ((u64)tmp & PCI_PREF_RANGE_MASK) << 16; | ||
826 | |||
827 | raw_pci_read(seg, bus, PCI_DEVFN(dev, 0), | ||
828 | PCI_PREF_BASE_UPPER32, 4, &tmp); | ||
829 | base |= (u64)tmp << 32; | ||
830 | |||
831 | raw_pci_read(seg, bus, PCI_DEVFN(dev, 0), | ||
832 | PCI_PREF_MEMORY_LIMIT, 2, &tmp); | ||
833 | |||
834 | limit = ((u64)tmp & PCI_PREF_RANGE_MASK) << 16; | ||
835 | limit |= 0xfffffUL; | ||
836 | |||
837 | raw_pci_read(seg, bus, PCI_DEVFN(dev, 0), | ||
838 | PCI_PREF_LIMIT_UPPER32, 4, &tmp); | ||
839 | limit |= (u64)tmp << 32; | ||
840 | |||
841 | if ((base < limit) && TIOCE_M32_ADDR(base)) | ||
842 | tioce_reserve_m32(tioce_kern, base, limit); | ||
843 | } | ||
844 | |||
845 | return tioce_kern; | ||
846 | } | ||
847 | |||
848 | /** | ||
849 | * tioce_force_interrupt - implement altix force_interrupt() backend for CE | ||
850 | * @sn_irq_info: sn asic irq that we need an interrupt generated for | ||
851 | * | ||
852 | * Given an sn_irq_info struct, set the proper bit in ce_adm_force_int to | ||
853 | * force a secondary interrupt to be generated. This is to work around an | ||
854 | * asic issue where there is a small window of opportunity for a legacy device | ||
855 | * interrupt to be lost. | ||
856 | */ | ||
857 | static void | ||
858 | tioce_force_interrupt(struct sn_irq_info *sn_irq_info) | ||
859 | { | ||
860 | struct pcidev_info *pcidev_info; | ||
861 | struct tioce_common *ce_common; | ||
862 | struct tioce_kernel *ce_kern; | ||
863 | struct tioce __iomem *ce_mmr; | ||
864 | u64 force_int_val; | ||
865 | |||
866 | if (!sn_irq_info->irq_bridge) | ||
867 | return; | ||
868 | |||
869 | if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_TIOCE) | ||
870 | return; | ||
871 | |||
872 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; | ||
873 | if (!pcidev_info) | ||
874 | return; | ||
875 | |||
876 | ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info; | ||
877 | ce_mmr = (struct tioce __iomem *)ce_common->ce_pcibus.bs_base; | ||
878 | ce_kern = (struct tioce_kernel *)ce_common->ce_kernel_private; | ||
879 | |||
880 | /* | ||
881 | * TIOCE Rev A workaround (PV 945826), force an interrupt by writing | ||
882 | * the TIO_INTx register directly (1/26/2006) | ||
883 | */ | ||
884 | if (ce_common->ce_rev == TIOCE_REV_A) { | ||
885 | u64 int_bit_mask = (1ULL << sn_irq_info->irq_int_bit); | ||
886 | u64 status; | ||
887 | |||
888 | tioce_mmr_load(ce_kern, &ce_mmr->ce_adm_int_status, &status); | ||
889 | if (status & int_bit_mask) { | ||
890 | u64 force_irq = (1 << 8) | sn_irq_info->irq_irq; | ||
891 | u64 ctalk = sn_irq_info->irq_xtalkaddr; | ||
892 | u64 nasid, offset; | ||
893 | |||
894 | nasid = (ctalk & CTALK_NASID_MASK) >> CTALK_NASID_SHFT; | ||
895 | offset = (ctalk & CTALK_NODE_OFFSET); | ||
896 | HUB_S(TIO_IOSPACE_ADDR(nasid, offset), force_irq); | ||
897 | } | ||
898 | |||
899 | return; | ||
900 | } | ||
901 | |||
902 | /* | ||
903 | * irq_int_bit is originally set up by prom, and holds the interrupt | ||
904 | * bit shift (not mask) as defined by the bit definitions in the | ||
905 | * ce_adm_int mmr. These shifts are not the same for the | ||
906 | * ce_adm_force_int register, so do an explicit mapping here to make | ||
907 | * things clearer. | ||
908 | */ | ||
909 | |||
910 | switch (sn_irq_info->irq_int_bit) { | ||
911 | case CE_ADM_INT_PCIE_PORT1_DEV_A_SHFT: | ||
912 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_A_SHFT; | ||
913 | break; | ||
914 | case CE_ADM_INT_PCIE_PORT1_DEV_B_SHFT: | ||
915 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_B_SHFT; | ||
916 | break; | ||
917 | case CE_ADM_INT_PCIE_PORT1_DEV_C_SHFT: | ||
918 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_C_SHFT; | ||
919 | break; | ||
920 | case CE_ADM_INT_PCIE_PORT1_DEV_D_SHFT: | ||
921 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_D_SHFT; | ||
922 | break; | ||
923 | case CE_ADM_INT_PCIE_PORT2_DEV_A_SHFT: | ||
924 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_A_SHFT; | ||
925 | break; | ||
926 | case CE_ADM_INT_PCIE_PORT2_DEV_B_SHFT: | ||
927 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_B_SHFT; | ||
928 | break; | ||
929 | case CE_ADM_INT_PCIE_PORT2_DEV_C_SHFT: | ||
930 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_C_SHFT; | ||
931 | break; | ||
932 | case CE_ADM_INT_PCIE_PORT2_DEV_D_SHFT: | ||
933 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_D_SHFT; | ||
934 | break; | ||
935 | default: | ||
936 | return; | ||
937 | } | ||
938 | tioce_mmr_storei(ce_kern, &ce_mmr->ce_adm_force_int, force_int_val); | ||
939 | } | ||
940 | |||
941 | /** | ||
942 | * tioce_target_interrupt - implement set_irq_affinity for tioce resident | ||
943 | * functions. Note: only applies to line interrupts, not MSI's. | ||
944 | * | ||
945 | * @sn_irq_info: SN IRQ context | ||
946 | * | ||
947 | * Given an sn_irq_info, set the associated CE device's interrupt destination | ||
948 | * register. Since the interrupt destination registers are on a per-ce-slot | ||
949 | * basis, this will retarget line interrupts for all functions downstream of | ||
950 | * the slot. | ||
951 | */ | ||
952 | static void | ||
953 | tioce_target_interrupt(struct sn_irq_info *sn_irq_info) | ||
954 | { | ||
955 | struct pcidev_info *pcidev_info; | ||
956 | struct tioce_common *ce_common; | ||
957 | struct tioce_kernel *ce_kern; | ||
958 | struct tioce __iomem *ce_mmr; | ||
959 | int bit; | ||
960 | u64 vector; | ||
961 | |||
962 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; | ||
963 | if (!pcidev_info) | ||
964 | return; | ||
965 | |||
966 | ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info; | ||
967 | ce_mmr = (struct tioce __iomem *)ce_common->ce_pcibus.bs_base; | ||
968 | ce_kern = (struct tioce_kernel *)ce_common->ce_kernel_private; | ||
969 | |||
970 | bit = sn_irq_info->irq_int_bit; | ||
971 | |||
972 | tioce_mmr_seti(ce_kern, &ce_mmr->ce_adm_int_mask, (1UL << bit)); | ||
973 | vector = (u64)sn_irq_info->irq_irq << INTR_VECTOR_SHFT; | ||
974 | vector |= sn_irq_info->irq_xtalkaddr; | ||
975 | tioce_mmr_storei(ce_kern, &ce_mmr->ce_adm_int_dest[bit], vector); | ||
976 | tioce_mmr_clri(ce_kern, &ce_mmr->ce_adm_int_mask, (1UL << bit)); | ||
977 | |||
978 | tioce_force_interrupt(sn_irq_info); | ||
979 | } | ||
980 | |||
981 | /** | ||
982 | * tioce_bus_fixup - perform final PCI fixup for a TIO CE bus | ||
983 | * @prom_bussoft: Common prom/kernel struct representing the bus | ||
984 | * | ||
985 | * Replicates the tioce_common pointed to by @prom_bussoft in kernel | ||
986 | * space. Allocates and initializes a kernel-only area for a given CE, | ||
987 | * and sets up an irq for handling CE error interrupts. | ||
988 | * | ||
989 | * On successful setup, returns the kernel version of tioce_common back to | ||
990 | * the caller. | ||
991 | */ | ||
992 | static void * | ||
993 | tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller) | ||
994 | { | ||
995 | struct tioce_common *tioce_common; | ||
996 | struct tioce_kernel *tioce_kern; | ||
997 | struct tioce __iomem *tioce_mmr; | ||
998 | |||
999 | /* | ||
1000 | * Allocate kernel bus soft and copy from prom. | ||
1001 | */ | ||
1002 | |||
1003 | tioce_common = kzalloc(sizeof(struct tioce_common), GFP_KERNEL); | ||
1004 | if (!tioce_common) | ||
1005 | return NULL; | ||
1006 | |||
1007 | memcpy(tioce_common, prom_bussoft, sizeof(struct tioce_common)); | ||
1008 | tioce_common->ce_pcibus.bs_base = (unsigned long) | ||
1009 | ioremap(REGION_OFFSET(tioce_common->ce_pcibus.bs_base), | ||
1010 | sizeof(struct tioce_common)); | ||
1011 | |||
1012 | tioce_kern = tioce_kern_init(tioce_common); | ||
1013 | if (tioce_kern == NULL) { | ||
1014 | kfree(tioce_common); | ||
1015 | return NULL; | ||
1016 | } | ||
1017 | |||
1018 | /* | ||
1019 | * Clear out any transient errors before registering the error | ||
1020 | * interrupt handler. | ||
1021 | */ | ||
1022 | |||
1023 | tioce_mmr = (struct tioce __iomem *)tioce_common->ce_pcibus.bs_base; | ||
1024 | tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_adm_int_status_alias, ~0ULL); | ||
1025 | tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_adm_error_summary_alias, | ||
1026 | ~0ULL); | ||
1027 | tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_dre_comp_err_addr, 0ULL); | ||
1028 | |||
1029 | if (request_irq(SGI_PCIASIC_ERROR, | ||
1030 | tioce_error_intr_handler, | ||
1031 | IRQF_SHARED, "TIOCE error", (void *)tioce_common)) | ||
1032 | printk(KERN_WARNING | ||
1033 | "%s: Unable to get irq %d. " | ||
1034 | "Error interrupts won't be routed for " | ||
1035 | "TIOCE bus %04x:%02x\n", | ||
1036 | __func__, SGI_PCIASIC_ERROR, | ||
1037 | tioce_common->ce_pcibus.bs_persist_segment, | ||
1038 | tioce_common->ce_pcibus.bs_persist_busnum); | ||
1039 | |||
1040 | irq_set_handler(SGI_PCIASIC_ERROR, handle_level_irq); | ||
1041 | sn_set_err_irq_affinity(SGI_PCIASIC_ERROR); | ||
1042 | return tioce_common; | ||
1043 | } | ||
1044 | |||
1045 | static struct sn_pcibus_provider tioce_pci_interfaces = { | ||
1046 | .dma_map = tioce_dma, | ||
1047 | .dma_map_consistent = tioce_dma_consistent, | ||
1048 | .dma_unmap = tioce_dma_unmap, | ||
1049 | .bus_fixup = tioce_bus_fixup, | ||
1050 | .force_interrupt = tioce_force_interrupt, | ||
1051 | .target_interrupt = tioce_target_interrupt | ||
1052 | }; | ||
1053 | |||
1054 | /** | ||
1055 | * tioce_init_provider - init SN PCI provider ops for TIO CE | ||
1056 | */ | ||
1057 | int | ||
1058 | tioce_init_provider(void) | ||
1059 | { | ||
1060 | sn_pci_provider[PCIIO_ASIC_TYPE_TIOCE] = &tioce_pci_interfaces; | ||
1061 | return 0; | ||
1062 | } | ||
diff --git a/arch/ia64/uv/kernel/setup.c b/arch/ia64/uv/kernel/setup.c index 32d6ea2e89f8..11478d2d863d 100644 --- a/arch/ia64/uv/kernel/setup.c +++ b/arch/ia64/uv/kernel/setup.c | |||
@@ -17,11 +17,9 @@ | |||
17 | DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); | 17 | DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); |
18 | EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info); | 18 | EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info); |
19 | 19 | ||
20 | #ifdef CONFIG_IA64_SGI_UV | ||
21 | int sn_prom_type; | 20 | int sn_prom_type; |
22 | long sn_coherency_id; | 21 | long sn_coherency_id; |
23 | EXPORT_SYMBOL_GPL(sn_coherency_id); | 22 | EXPORT_SYMBOL_GPL(sn_coherency_id); |
24 | #endif | ||
25 | 23 | ||
26 | struct redir_addr { | 24 | struct redir_addr { |
27 | unsigned long redirect; | 25 | unsigned long redirect; |
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 5f6158973289..06a16dc5cfb5 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
@@ -323,7 +323,7 @@ config ACPI_NUMA | |||
323 | bool "NUMA support" | 323 | bool "NUMA support" |
324 | depends on NUMA | 324 | depends on NUMA |
325 | depends on (X86 || IA64 || ARM64) | 325 | depends on (X86 || IA64 || ARM64) |
326 | default y if IA64_GENERIC || IA64_SGI_SN2 || ARM64 | 326 | default y if IA64_GENERIC || ARM64 |
327 | 327 | ||
328 | config ACPI_CUSTOM_DSDT_FILE | 328 | config ACPI_CUSTOM_DSDT_FILE |
329 | string "Custom DSDT Table file to include" | 329 | string "Custom DSDT Table file to include" |