aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/sn
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/ia64/sn
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/ia64/sn')
-rw-r--r--arch/ia64/sn/Makefile14
-rw-r--r--arch/ia64/sn/include/ioerror.h81
-rw-r--r--arch/ia64/sn/include/pci/pcibr_provider.h149
-rw-r--r--arch/ia64/sn/include/pci/pcibus_provider_defs.h43
-rw-r--r--arch/ia64/sn/include/pci/pcidev.h54
-rw-r--r--arch/ia64/sn/include/pci/pic.h261
-rw-r--r--arch/ia64/sn/include/pci/tiocp.h256
-rw-r--r--arch/ia64/sn/include/tio.h37
-rw-r--r--arch/ia64/sn/include/xtalk/hubdev.h67
-rw-r--r--arch/ia64/sn/include/xtalk/xbow.h291
-rw-r--r--arch/ia64/sn/include/xtalk/xwidgetdev.h70
-rw-r--r--arch/ia64/sn/kernel/Makefile12
-rw-r--r--arch/ia64/sn/kernel/bte.c453
-rw-r--r--arch/ia64/sn/kernel/bte_error.c198
-rw-r--r--arch/ia64/sn/kernel/huberror.c201
-rw-r--r--arch/ia64/sn/kernel/idle.c30
-rw-r--r--arch/ia64/sn/kernel/io_init.c411
-rw-r--r--arch/ia64/sn/kernel/iomv.c70
-rw-r--r--arch/ia64/sn/kernel/irq.c431
-rw-r--r--arch/ia64/sn/kernel/klconflib.c108
-rw-r--r--arch/ia64/sn/kernel/machvec.c11
-rw-r--r--arch/ia64/sn/kernel/mca.c135
-rw-r--r--arch/ia64/sn/kernel/setup.c621
-rw-r--r--arch/ia64/sn/kernel/sn2/Makefile13
-rw-r--r--arch/ia64/sn/kernel/sn2/cache.c34
-rw-r--r--arch/ia64/sn/kernel/sn2/io.c101
-rw-r--r--arch/ia64/sn/kernel/sn2/prominfo_proc.c279
-rw-r--r--arch/ia64/sn/kernel/sn2/ptc_deadlock.S82
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c295
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c690
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_proc_fs.c149
-rw-r--r--arch/ia64/sn/kernel/sn2/timer.c36
-rw-r--r--arch/ia64/sn/kernel/sn2/timer_interrupt.c63
-rw-r--r--arch/ia64/sn/pci/Makefile10
-rw-r--r--arch/ia64/sn/pci/pci_dma.c363
-rw-r--r--arch/ia64/sn/pci/pcibr/Makefile11
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_ate.c188
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_dma.c379
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c170
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_reg.c282
40 files changed, 7149 insertions, 0 deletions
diff --git a/arch/ia64/sn/Makefile b/arch/ia64/sn/Makefile
new file mode 100644
index 000000000000..a269f6d84c29
--- /dev/null
+++ b/arch/ia64/sn/Makefile
@@ -0,0 +1,14 @@
1# arch/ia64/sn/Makefile
2#
3# This file is subject to the terms and conditions of the GNU General Public
4# License. See the file "COPYING" in the main directory of this archive
5# for more details.
6#
7# Copyright (C) 2004 Silicon Graphics, Inc. All Rights Reserved.
8#
9# Makefile for the sn ia64 subplatform
10#
11
12CPPFLAGS += -I$(srctree)/arch/ia64/sn/include
13
14obj-y += kernel/ pci/
diff --git a/arch/ia64/sn/include/ioerror.h b/arch/ia64/sn/include/ioerror.h
new file mode 100644
index 000000000000..e68f2b0789a7
--- /dev/null
+++ b/arch/ia64/sn/include/ioerror.h
@@ -0,0 +1,81 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
7 */
8#ifndef _ASM_IA64_SN_IOERROR_H
9#define _ASM_IA64_SN_IOERROR_H
10
11/*
12 * IO error structure.
13 *
14 * This structure would expand to hold the information retrieved from
15 * all IO related error registers.
16 *
17 * This structure is defined to hold all system specific
18 * information related to a single error.
19 *
20 * This serves a couple of purpose.
21 * - Error handling often involves translating one form of address to other
22 * form. So, instead of having different data structures at each level,
23 * we have a single structure, and the appropriate fields get filled in
24 * at each layer.
25 * - This provides a way to dump all error related information in any layer
26 * of erorr handling (debugging aid).
27 *
28 * A second possibility is to allow each layer to define its own error
29 * data structure, and fill in the proper fields. This has the advantage
30 * of isolating the layers.
31 * A big concern is the potential stack usage (and overflow), if each layer
32 * defines these structures on stack (assuming we don't want to do kmalloc.
33 *
34 * Any layer wishing to pass extra information to a layer next to it in
35 * error handling hierarchy, can do so as a separate parameter.
36 */
37
38typedef struct io_error_s {
39 /* Bit fields indicating which structure fields are valid */
40 union {
41 struct {
42 unsigned ievb_errortype:1;
43 unsigned ievb_widgetnum:1;
44 unsigned ievb_widgetdev:1;
45 unsigned ievb_srccpu:1;
46 unsigned ievb_srcnode:1;
47 unsigned ievb_errnode:1;
48 unsigned ievb_sysioaddr:1;
49 unsigned ievb_xtalkaddr:1;
50 unsigned ievb_busspace:1;
51 unsigned ievb_busaddr:1;
52 unsigned ievb_vaddr:1;
53 unsigned ievb_memaddr:1;
54 unsigned ievb_epc:1;
55 unsigned ievb_ef:1;
56 unsigned ievb_tnum:1;
57 } iev_b;
58 unsigned iev_a;
59 } ie_v;
60
61 short ie_errortype; /* error type: extra info about error */
62 short ie_widgetnum; /* Widget number that's in error */
63 short ie_widgetdev; /* Device within widget in error */
64 cpuid_t ie_srccpu; /* CPU on srcnode generating error */
65 cnodeid_t ie_srcnode; /* Node which caused the error */
66 cnodeid_t ie_errnode; /* Node where error was noticed */
67 iopaddr_t ie_sysioaddr; /* Sys specific IO address */
68 iopaddr_t ie_xtalkaddr; /* Xtalk (48bit) addr of Error */
69 iopaddr_t ie_busspace; /* Bus specific address space */
70 iopaddr_t ie_busaddr; /* Bus specific address */
71 caddr_t ie_vaddr; /* Virtual address of error */
72 iopaddr_t ie_memaddr; /* Physical memory address */
73 caddr_t ie_epc; /* pc when error reported */
74 caddr_t ie_ef; /* eframe when error reported */
75 short ie_tnum; /* Xtalk TNUM field */
76} ioerror_t;
77
78#define IOERROR_INIT(e) do { (e)->ie_v.iev_a = 0; } while (0)
79#define IOERROR_SETVALUE(e,f,v) do { (e)->ie_ ## f = (v); (e)->ie_v.iev_b.ievb_ ## f = 1; } while (0)
80
81#endif /* _ASM_IA64_SN_IOERROR_H */
diff --git a/arch/ia64/sn/include/pci/pcibr_provider.h b/arch/ia64/sn/include/pci/pcibr_provider.h
new file mode 100644
index 000000000000..b1f05ffec70b
--- /dev/null
+++ b/arch/ia64/sn/include/pci/pcibr_provider.h
@@ -0,0 +1,149 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8#ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
9#define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
10
11/* Workarounds */
12#define PV907516 (1 << 1) /* TIOCP: Don't write the write buffer flush reg */
13
14#define BUSTYPE_MASK 0x1
15
16/* Macros given a pcibus structure */
17#define IS_PCIX(ps) ((ps)->pbi_bridge_mode & BUSTYPE_MASK)
18#define IS_PCI_BRIDGE_ASIC(asic) (asic == PCIIO_ASIC_TYPE_PIC || \
19 asic == PCIIO_ASIC_TYPE_TIOCP)
20#define IS_PIC_SOFT(ps) (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_PIC)
21
22
23/*
24 * The different PCI Bridge types supported on the SGI Altix platforms
25 */
26#define PCIBR_BRIDGETYPE_UNKNOWN -1
27#define PCIBR_BRIDGETYPE_PIC 2
28#define PCIBR_BRIDGETYPE_TIOCP 3
29
30/*
31 * Bridge 64bit Direct Map Attributes
32 */
33#define PCI64_ATTR_PREF (1ull << 59)
34#define PCI64_ATTR_PREC (1ull << 58)
35#define PCI64_ATTR_VIRTUAL (1ull << 57)
36#define PCI64_ATTR_BAR (1ull << 56)
37#define PCI64_ATTR_SWAP (1ull << 55)
38#define PCI64_ATTR_VIRTUAL1 (1ull << 54)
39
40#define PCI32_LOCAL_BASE 0
41#define PCI32_MAPPED_BASE 0x40000000
42#define PCI32_DIRECT_BASE 0x80000000
43
44#define IS_PCI32_MAPPED(x) ((uint64_t)(x) < PCI32_DIRECT_BASE && \
45 (uint64_t)(x) >= PCI32_MAPPED_BASE)
46#define IS_PCI32_DIRECT(x) ((uint64_t)(x) >= PCI32_MAPPED_BASE)
47
48
49/*
50 * Bridge PMU Address Transaltion Entry Attibutes
51 */
52#define PCI32_ATE_V (0x1 << 0)
53#define PCI32_ATE_CO (0x1 << 1)
54#define PCI32_ATE_PREC (0x1 << 2)
55#define PCI32_ATE_PREF (0x1 << 3)
56#define PCI32_ATE_BAR (0x1 << 4)
57#define PCI32_ATE_ADDR_SHFT 12
58
59#define MINIMAL_ATES_REQUIRED(addr, size) \
60 (IOPG(IOPGOFF(addr) + (size) - 1) == IOPG((size) - 1))
61
62#define MINIMAL_ATE_FLAG(addr, size) \
63 (MINIMAL_ATES_REQUIRED((uint64_t)addr, size) ? 1 : 0)
64
65/* bit 29 of the pci address is the SWAP bit */
66#define ATE_SWAPSHIFT 29
67#define ATE_SWAP_ON(x) ((x) |= (1 << ATE_SWAPSHIFT))
68#define ATE_SWAP_OFF(x) ((x) &= ~(1 << ATE_SWAPSHIFT))
69
70/*
71 * I/O page size
72 */
73#if PAGE_SIZE < 16384
74#define IOPFNSHIFT 12 /* 4K per mapped page */
75#else
76#define IOPFNSHIFT 14 /* 16K per mapped page */
77#endif
78
79#define IOPGSIZE (1 << IOPFNSHIFT)
80#define IOPG(x) ((x) >> IOPFNSHIFT)
81#define IOPGOFF(x) ((x) & (IOPGSIZE-1))
82
83#define PCIBR_DEV_SWAP_DIR (1ull << 19)
84#define PCIBR_CTRL_PAGE_SIZE (0x1 << 21)
85
86/*
87 * PMU resources.
88 */
89struct ate_resource{
90 uint64_t *ate;
91 uint64_t num_ate;
92 uint64_t lowest_free_index;
93};
94
95struct pcibus_info {
96 struct pcibus_bussoft pbi_buscommon; /* common header */
97 uint32_t pbi_moduleid;
98 short pbi_bridge_type;
99 short pbi_bridge_mode;
100
101 struct ate_resource pbi_int_ate_resource;
102 uint64_t pbi_int_ate_size;
103
104 uint64_t pbi_dir_xbase;
105 char pbi_hub_xid;
106
107 uint64_t pbi_devreg[8];
108 spinlock_t pbi_lock;
109
110 uint32_t pbi_valid_devices;
111 uint32_t pbi_enabled_devices;
112};
113
114/*
115 * pcibus_info structure locking macros
116 */
117inline static unsigned long
118pcibr_lock(struct pcibus_info *pcibus_info)
119{
120 unsigned long flag;
121 spin_lock_irqsave(&pcibus_info->pbi_lock, flag);
122 return(flag);
123}
124#define pcibr_unlock(pcibus_info, flag) spin_unlock_irqrestore(&pcibus_info->pbi_lock, flag)
125
126extern void *pcibr_bus_fixup(struct pcibus_bussoft *);
127extern uint64_t pcibr_dma_map(struct pcidev_info *, unsigned long, size_t, unsigned int);
128extern void pcibr_dma_unmap(struct pcidev_info *, dma_addr_t, int);
129
130/*
131 * prototypes for the bridge asic register access routines in pcibr_reg.c
132 */
133extern void pcireg_control_bit_clr(struct pcibus_info *, uint64_t);
134extern void pcireg_control_bit_set(struct pcibus_info *, uint64_t);
135extern uint64_t pcireg_tflush_get(struct pcibus_info *);
136extern uint64_t pcireg_intr_status_get(struct pcibus_info *);
137extern void pcireg_intr_enable_bit_clr(struct pcibus_info *, uint64_t);
138extern void pcireg_intr_enable_bit_set(struct pcibus_info *, uint64_t);
139extern void pcireg_intr_addr_addr_set(struct pcibus_info *, int, uint64_t);
140extern void pcireg_force_intr_set(struct pcibus_info *, int);
141extern uint64_t pcireg_wrb_flush_get(struct pcibus_info *, int);
142extern void pcireg_int_ate_set(struct pcibus_info *, int, uint64_t);
143extern uint64_t * pcireg_int_ate_addr(struct pcibus_info *, int);
144extern void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info);
145extern void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info);
146extern int pcibr_ate_alloc(struct pcibus_info *, int);
147extern void pcibr_ate_free(struct pcibus_info *, int);
148extern void ate_write(struct pcibus_info *, int, int, uint64_t);
149#endif
diff --git a/arch/ia64/sn/include/pci/pcibus_provider_defs.h b/arch/ia64/sn/include/pci/pcibus_provider_defs.h
new file mode 100644
index 000000000000..07065615bbea
--- /dev/null
+++ b/arch/ia64/sn/include/pci/pcibus_provider_defs.h
@@ -0,0 +1,43 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8#ifndef _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
9#define _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
10
11/*
12 * SN pci asic types. Do not ever renumber these or reuse values. The
13 * values must agree with what prom thinks they are.
14 */
15
16#define PCIIO_ASIC_TYPE_UNKNOWN 0
17#define PCIIO_ASIC_TYPE_PPB 1
18#define PCIIO_ASIC_TYPE_PIC 2
19#define PCIIO_ASIC_TYPE_TIOCP 3
20
21/*
22 * Common pciio bus provider data. There should be one of these as the
23 * first field in any pciio based provider soft structure (e.g. pcibr_soft
24 * tioca_soft, etc).
25 */
26
27struct pcibus_bussoft {
28 uint32_t bs_asic_type; /* chipset type */
29 uint32_t bs_xid; /* xwidget id */
30 uint64_t bs_persist_busnum; /* Persistent Bus Number */
31 uint64_t bs_legacy_io; /* legacy io pio addr */
32 uint64_t bs_legacy_mem; /* legacy mem pio addr */
33 uint64_t bs_base; /* widget base */
34 struct xwidget_info *bs_xwidget_info;
35};
36
37/*
38 * DMA mapping flags
39 */
40
41#define SN_PCIDMA_CONSISTENT 0x0001
42
43#endif /* _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H */
diff --git a/arch/ia64/sn/include/pci/pcidev.h b/arch/ia64/sn/include/pci/pcidev.h
new file mode 100644
index 000000000000..81eb95d3bf47
--- /dev/null
+++ b/arch/ia64/sn/include/pci/pcidev.h
@@ -0,0 +1,54 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8#ifndef _ASM_IA64_SN_PCI_PCIDEV_H
9#define _ASM_IA64_SN_PCI_PCIDEV_H
10
11#include <linux/pci.h>
12
13extern struct sn_irq_info **sn_irq;
14
15#define SN_PCIDEV_INFO(pci_dev) \
16 ((struct pcidev_info *)(pci_dev)->sysdata)
17
18/*
19 * Given a pci_bus, return the sn pcibus_bussoft struct. Note that
20 * this only works for root busses, not for busses represented by PPB's.
21 */
22
23#define SN_PCIBUS_BUSSOFT(pci_bus) \
24 ((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data))
25
26/*
27 * Given a struct pci_dev, return the sn pcibus_bussoft struct. Note
28 * that this is not equivalent to SN_PCIBUS_BUSSOFT(pci_dev->bus) due
29 * due to possible PPB's in the path.
30 */
31
32#define SN_PCIDEV_BUSSOFT(pci_dev) \
33 (SN_PCIDEV_INFO(pci_dev)->pdi_host_pcidev_info->pdi_pcibus_info)
34
35#define PCIIO_BUS_NONE 255 /* bus 255 reserved */
36#define PCIIO_SLOT_NONE 255
37#define PCIIO_FUNC_NONE 255
38#define PCIIO_VENDOR_ID_NONE (-1)
39
40struct pcidev_info {
41 uint64_t pdi_pio_mapped_addr[7]; /* 6 BARs PLUS 1 ROM */
42 uint64_t pdi_slot_host_handle; /* Bus and devfn Host pci_dev */
43
44 struct pcibus_bussoft *pdi_pcibus_info; /* Kernel common bus soft */
45 struct pcidev_info *pdi_host_pcidev_info; /* Kernel Host pci_dev */
46 struct pci_dev *pdi_linux_pcidev; /* Kernel pci_dev */
47
48 struct sn_irq_info *pdi_sn_irq_info;
49};
50
51extern void sn_irq_fixup(struct pci_dev *pci_dev,
52 struct sn_irq_info *sn_irq_info);
53
54#endif /* _ASM_IA64_SN_PCI_PCIDEV_H */
diff --git a/arch/ia64/sn/include/pci/pic.h b/arch/ia64/sn/include/pci/pic.h
new file mode 100644
index 000000000000..fd18acecb1e6
--- /dev/null
+++ b/arch/ia64/sn/include/pci/pic.h
@@ -0,0 +1,261 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
7 */
8#ifndef _ASM_IA64_SN_PCI_PIC_H
9#define _ASM_IA64_SN_PCI_PIC_H
10
11/*
12 * PIC AS DEVICE ZERO
13 * ------------------
14 *
15 * PIC handles PCI/X busses. PCI/X requires that the 'bridge' (i.e. PIC)
16 * be designated as 'device 0'. That is a departure from earlier SGI
17 * PCI bridges. Because of that we use config space 1 to access the
18 * config space of the first actual PCI device on the bus.
19 * Here's what the PIC manual says:
20 *
21 * The current PCI-X bus specification now defines that the parent
22 * hosts bus bridge (PIC for example) must be device 0 on bus 0. PIC
23 * reduced the total number of devices from 8 to 4 and removed the
24 * device registers and windows, now only supporting devices 0,1,2, and
25 * 3. PIC did leave all 8 configuration space windows. The reason was
26 * there was nothing to gain by removing them. Here in lies the problem.
27 * The device numbering we do using 0 through 3 is unrelated to the device
28 * numbering which PCI-X requires in configuration space. In the past we
29 * correlated Configs pace and our device space 0 <-> 0, 1 <-> 1, etc.
30 * PCI-X requires we start a 1, not 0 and currently the PX brick
31 * does associate our:
32 *
33 * device 0 with configuration space window 1,
34 * device 1 with configuration space window 2,
35 * device 2 with configuration space window 3,
36 * device 3 with configuration space window 4.
37 *
38 * The net effect is that all config space access are off-by-one with
39 * relation to other per-slot accesses on the PIC.
40 * Here is a table that shows some of that:
41 *
42 * Internal Slot#
43 * |
44 * | 0 1 2 3
45 * ----------|---------------------------------------
46 * config | 0x21000 0x22000 0x23000 0x24000
47 * |
48 * even rrb | 0[0] n/a 1[0] n/a [] == implied even/odd
49 * |
50 * odd rrb | n/a 0[1] n/a 1[1]
51 * |
52 * int dev | 00 01 10 11
53 * |
54 * ext slot# | 1 2 3 4
55 * ----------|---------------------------------------
56 */
57
58#define PIC_ATE_TARGETID_SHFT 8
59#define PIC_HOST_INTR_ADDR 0x0000FFFFFFFFFFFFUL
60#define PIC_PCI64_ATTR_TARG_SHFT 60
61
62
63/*****************************************************************************
64 *********************** PIC MMR structure mapping ***************************
65 *****************************************************************************/
66
67/* NOTE: PIC WAR. PV#854697. PIC does not allow writes just to [31:0]
68 * of a 64-bit register. When writing PIC registers, always write the
69 * entire 64 bits.
70 */
71
72struct pic {
73
74 /* 0x000000-0x00FFFF -- Local Registers */
75
76 /* 0x000000-0x000057 -- Standard Widget Configuration */
77 uint64_t p_wid_id; /* 0x000000 */
78 uint64_t p_wid_stat; /* 0x000008 */
79 uint64_t p_wid_err_upper; /* 0x000010 */
80 uint64_t p_wid_err_lower; /* 0x000018 */
81 #define p_wid_err p_wid_err_lower
82 uint64_t p_wid_control; /* 0x000020 */
83 uint64_t p_wid_req_timeout; /* 0x000028 */
84 uint64_t p_wid_int_upper; /* 0x000030 */
85 uint64_t p_wid_int_lower; /* 0x000038 */
86 #define p_wid_int p_wid_int_lower
87 uint64_t p_wid_err_cmdword; /* 0x000040 */
88 uint64_t p_wid_llp; /* 0x000048 */
89 uint64_t p_wid_tflush; /* 0x000050 */
90
91 /* 0x000058-0x00007F -- Bridge-specific Widget Configuration */
92 uint64_t p_wid_aux_err; /* 0x000058 */
93 uint64_t p_wid_resp_upper; /* 0x000060 */
94 uint64_t p_wid_resp_lower; /* 0x000068 */
95 #define p_wid_resp p_wid_resp_lower
96 uint64_t p_wid_tst_pin_ctrl; /* 0x000070 */
97 uint64_t p_wid_addr_lkerr; /* 0x000078 */
98
99 /* 0x000080-0x00008F -- PMU & MAP */
100 uint64_t p_dir_map; /* 0x000080 */
101 uint64_t _pad_000088; /* 0x000088 */
102
103 /* 0x000090-0x00009F -- SSRAM */
104 uint64_t p_map_fault; /* 0x000090 */
105 uint64_t _pad_000098; /* 0x000098 */
106
107 /* 0x0000A0-0x0000AF -- Arbitration */
108 uint64_t p_arb; /* 0x0000A0 */
109 uint64_t _pad_0000A8; /* 0x0000A8 */
110
111 /* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */
112 uint64_t p_ate_parity_err; /* 0x0000B0 */
113 uint64_t _pad_0000B8; /* 0x0000B8 */
114
115 /* 0x0000C0-0x0000FF -- PCI/GIO */
116 uint64_t p_bus_timeout; /* 0x0000C0 */
117 uint64_t p_pci_cfg; /* 0x0000C8 */
118 uint64_t p_pci_err_upper; /* 0x0000D0 */
119 uint64_t p_pci_err_lower; /* 0x0000D8 */
120 #define p_pci_err p_pci_err_lower
121 uint64_t _pad_0000E0[4]; /* 0x0000{E0..F8} */
122
123 /* 0x000100-0x0001FF -- Interrupt */
124 uint64_t p_int_status; /* 0x000100 */
125 uint64_t p_int_enable; /* 0x000108 */
126 uint64_t p_int_rst_stat; /* 0x000110 */
127 uint64_t p_int_mode; /* 0x000118 */
128 uint64_t p_int_device; /* 0x000120 */
129 uint64_t p_int_host_err; /* 0x000128 */
130 uint64_t p_int_addr[8]; /* 0x0001{30,,,68} */
131 uint64_t p_err_int_view; /* 0x000170 */
132 uint64_t p_mult_int; /* 0x000178 */
133 uint64_t p_force_always[8]; /* 0x0001{80,,,B8} */
134 uint64_t p_force_pin[8]; /* 0x0001{C0,,,F8} */
135
136 /* 0x000200-0x000298 -- Device */
137 uint64_t p_device[4]; /* 0x0002{00,,,18} */
138 uint64_t _pad_000220[4]; /* 0x0002{20,,,38} */
139 uint64_t p_wr_req_buf[4]; /* 0x0002{40,,,58} */
140 uint64_t _pad_000260[4]; /* 0x0002{60,,,78} */
141 uint64_t p_rrb_map[2]; /* 0x0002{80,,,88} */
142 #define p_even_resp p_rrb_map[0] /* 0x000280 */
143 #define p_odd_resp p_rrb_map[1] /* 0x000288 */
144 uint64_t p_resp_status; /* 0x000290 */
145 uint64_t p_resp_clear; /* 0x000298 */
146
147 uint64_t _pad_0002A0[12]; /* 0x0002{A0..F8} */
148
149 /* 0x000300-0x0003F8 -- Buffer Address Match Registers */
150 struct {
151 uint64_t upper; /* 0x0003{00,,,F0} */
152 uint64_t lower; /* 0x0003{08,,,F8} */
153 } p_buf_addr_match[16];
154
155 /* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */
156 struct {
157 uint64_t flush_w_touch; /* 0x000{400,,,5C0} */
158 uint64_t flush_wo_touch; /* 0x000{408,,,5C8} */
159 uint64_t inflight; /* 0x000{410,,,5D0} */
160 uint64_t prefetch; /* 0x000{418,,,5D8} */
161 uint64_t total_pci_retry; /* 0x000{420,,,5E0} */
162 uint64_t max_pci_retry; /* 0x000{428,,,5E8} */
163 uint64_t max_latency; /* 0x000{430,,,5F0} */
164 uint64_t clear_all; /* 0x000{438,,,5F8} */
165 } p_buf_count[8];
166
167
168 /* 0x000600-0x0009FF -- PCI/X registers */
169 uint64_t p_pcix_bus_err_addr; /* 0x000600 */
170 uint64_t p_pcix_bus_err_attr; /* 0x000608 */
171 uint64_t p_pcix_bus_err_data; /* 0x000610 */
172 uint64_t p_pcix_pio_split_addr; /* 0x000618 */
173 uint64_t p_pcix_pio_split_attr; /* 0x000620 */
174 uint64_t p_pcix_dma_req_err_attr; /* 0x000628 */
175 uint64_t p_pcix_dma_req_err_addr; /* 0x000630 */
176 uint64_t p_pcix_timeout; /* 0x000638 */
177
178 uint64_t _pad_000640[120]; /* 0x000{640,,,9F8} */
179
180 /* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */
181 struct {
182 uint64_t p_buf_addr; /* 0x000{A00,,,AF0} */
183 uint64_t p_buf_attr; /* 0X000{A08,,,AF8} */
184 } p_pcix_read_buf_64[16];
185
186 struct {
187 uint64_t p_buf_addr; /* 0x000{B00,,,BE0} */
188 uint64_t p_buf_attr; /* 0x000{B08,,,BE8} */
189 uint64_t p_buf_valid; /* 0x000{B10,,,BF0} */
190 uint64_t __pad1; /* 0x000{B18,,,BF8} */
191 } p_pcix_write_buf_64[8];
192
193 /* End of Local Registers -- Start of Address Map space */
194
195 char _pad_000c00[0x010000 - 0x000c00];
196
197 /* 0x010000-0x011fff -- Internal ATE RAM (Auto Parity Generation) */
198 uint64_t p_int_ate_ram[1024]; /* 0x010000-0x011fff */
199
200 /* 0x012000-0x013fff -- Internal ATE RAM (Manual Parity Generation) */
201 uint64_t p_int_ate_ram_mp[1024]; /* 0x012000-0x013fff */
202
203 char _pad_014000[0x18000 - 0x014000];
204
205 /* 0x18000-0x197F8 -- PIC Write Request Ram */
206 uint64_t p_wr_req_lower[256]; /* 0x18000 - 0x187F8 */
207 uint64_t p_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */
208 uint64_t p_wr_req_parity[256]; /* 0x19000 - 0x197F8 */
209
210 char _pad_019800[0x20000 - 0x019800];
211
212 /* 0x020000-0x027FFF -- PCI Device Configuration Spaces */
213 union {
214 uint8_t c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */
215 uint16_t s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */
216 uint32_t l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */
217 uint64_t d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */
218 union {
219 uint8_t c[0x100 / 1];
220 uint16_t s[0x100 / 2];
221 uint32_t l[0x100 / 4];
222 uint64_t d[0x100 / 8];
223 } f[8];
224 } p_type0_cfg_dev[8]; /* 0x02{0000,,,7FFF} */
225
226 /* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */
227 union {
228 uint8_t c[0x1000 / 1]; /* 0x028000-0x029000 */
229 uint16_t s[0x1000 / 2]; /* 0x028000-0x029000 */
230 uint32_t l[0x1000 / 4]; /* 0x028000-0x029000 */
231 uint64_t d[0x1000 / 8]; /* 0x028000-0x029000 */
232 union {
233 uint8_t c[0x100 / 1];
234 uint16_t s[0x100 / 2];
235 uint32_t l[0x100 / 4];
236 uint64_t d[0x100 / 8];
237 } f[8];
238 } p_type1_cfg; /* 0x028000-0x029000 */
239
240 char _pad_029000[0x030000-0x029000];
241
242 /* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */
243 union {
244 uint8_t c[8 / 1];
245 uint16_t s[8 / 2];
246 uint32_t l[8 / 4];
247 uint64_t d[8 / 8];
248 } p_pci_iack; /* 0x030000-0x030007 */
249
250 char _pad_030007[0x040000-0x030008];
251
252 /* 0x040000-0x030007 -- PCIX Special Cycle */
253 union {
254 uint8_t c[8 / 1];
255 uint16_t s[8 / 2];
256 uint32_t l[8 / 4];
257 uint64_t d[8 / 8];
258 } p_pcix_cycle; /* 0x040000-0x040007 */
259};
260
261#endif /* _ASM_IA64_SN_PCI_PIC_H */
diff --git a/arch/ia64/sn/include/pci/tiocp.h b/arch/ia64/sn/include/pci/tiocp.h
new file mode 100644
index 000000000000..f07c83b2bf6e
--- /dev/null
+++ b/arch/ia64/sn/include/pci/tiocp.h
@@ -0,0 +1,256 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8#ifndef _ASM_IA64_SN_PCI_TIOCP_H
9#define _ASM_IA64_SN_PCI_TIOCP_H
10
11#define TIOCP_HOST_INTR_ADDR 0x003FFFFFFFFFFFFFUL
12#define TIOCP_PCI64_CMDTYPE_MEM (0x1ull << 60)
13
14
15/*****************************************************************************
16 *********************** TIOCP MMR structure mapping ***************************
17 *****************************************************************************/
18
19struct tiocp{
20
21 /* 0x000000-0x00FFFF -- Local Registers */
22
23 /* 0x000000-0x000057 -- (Legacy Widget Space) Configuration */
24 uint64_t cp_id; /* 0x000000 */
25 uint64_t cp_stat; /* 0x000008 */
26 uint64_t cp_err_upper; /* 0x000010 */
27 uint64_t cp_err_lower; /* 0x000018 */
28 #define cp_err cp_err_lower
29 uint64_t cp_control; /* 0x000020 */
30 uint64_t cp_req_timeout; /* 0x000028 */
31 uint64_t cp_intr_upper; /* 0x000030 */
32 uint64_t cp_intr_lower; /* 0x000038 */
33 #define cp_intr cp_intr_lower
34 uint64_t cp_err_cmdword; /* 0x000040 */
35 uint64_t _pad_000048; /* 0x000048 */
36 uint64_t cp_tflush; /* 0x000050 */
37
38 /* 0x000058-0x00007F -- Bridge-specific Configuration */
39 uint64_t cp_aux_err; /* 0x000058 */
40 uint64_t cp_resp_upper; /* 0x000060 */
41 uint64_t cp_resp_lower; /* 0x000068 */
42 #define cp_resp cp_resp_lower
43 uint64_t cp_tst_pin_ctrl; /* 0x000070 */
44 uint64_t cp_addr_lkerr; /* 0x000078 */
45
46 /* 0x000080-0x00008F -- PMU & MAP */
47 uint64_t cp_dir_map; /* 0x000080 */
48 uint64_t _pad_000088; /* 0x000088 */
49
50 /* 0x000090-0x00009F -- SSRAM */
51 uint64_t cp_map_fault; /* 0x000090 */
52 uint64_t _pad_000098; /* 0x000098 */
53
54 /* 0x0000A0-0x0000AF -- Arbitration */
55 uint64_t cp_arb; /* 0x0000A0 */
56 uint64_t _pad_0000A8; /* 0x0000A8 */
57
58 /* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */
59 uint64_t cp_ate_parity_err; /* 0x0000B0 */
60 uint64_t _pad_0000B8; /* 0x0000B8 */
61
62 /* 0x0000C0-0x0000FF -- PCI/GIO */
63 uint64_t cp_bus_timeout; /* 0x0000C0 */
64 uint64_t cp_pci_cfg; /* 0x0000C8 */
65 uint64_t cp_pci_err_upper; /* 0x0000D0 */
66 uint64_t cp_pci_err_lower; /* 0x0000D8 */
67 #define cp_pci_err cp_pci_err_lower
68 uint64_t _pad_0000E0[4]; /* 0x0000{E0..F8} */
69
70 /* 0x000100-0x0001FF -- Interrupt */
71 uint64_t cp_int_status; /* 0x000100 */
72 uint64_t cp_int_enable; /* 0x000108 */
73 uint64_t cp_int_rst_stat; /* 0x000110 */
74 uint64_t cp_int_mode; /* 0x000118 */
75 uint64_t cp_int_device; /* 0x000120 */
76 uint64_t cp_int_host_err; /* 0x000128 */
77 uint64_t cp_int_addr[8]; /* 0x0001{30,,,68} */
78 uint64_t cp_err_int_view; /* 0x000170 */
79 uint64_t cp_mult_int; /* 0x000178 */
80 uint64_t cp_force_always[8]; /* 0x0001{80,,,B8} */
81 uint64_t cp_force_pin[8]; /* 0x0001{C0,,,F8} */
82
83 /* 0x000200-0x000298 -- Device */
84 uint64_t cp_device[4]; /* 0x0002{00,,,18} */
85 uint64_t _pad_000220[4]; /* 0x0002{20,,,38} */
86 uint64_t cp_wr_req_buf[4]; /* 0x0002{40,,,58} */
87 uint64_t _pad_000260[4]; /* 0x0002{60,,,78} */
88 uint64_t cp_rrb_map[2]; /* 0x0002{80,,,88} */
89 #define cp_even_resp cp_rrb_map[0] /* 0x000280 */
90 #define cp_odd_resp cp_rrb_map[1] /* 0x000288 */
91 uint64_t cp_resp_status; /* 0x000290 */
92 uint64_t cp_resp_clear; /* 0x000298 */
93
94 uint64_t _pad_0002A0[12]; /* 0x0002{A0..F8} */
95
96 /* 0x000300-0x0003F8 -- Buffer Address Match Registers */
97 struct {
98 uint64_t upper; /* 0x0003{00,,,F0} */
99 uint64_t lower; /* 0x0003{08,,,F8} */
100 } cp_buf_addr_match[16];
101
102 /* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */
103 struct {
104 uint64_t flush_w_touch; /* 0x000{400,,,5C0} */
105 uint64_t flush_wo_touch; /* 0x000{408,,,5C8} */
106 uint64_t inflight; /* 0x000{410,,,5D0} */
107 uint64_t prefetch; /* 0x000{418,,,5D8} */
108 uint64_t total_pci_retry; /* 0x000{420,,,5E0} */
109 uint64_t max_pci_retry; /* 0x000{428,,,5E8} */
110 uint64_t max_latency; /* 0x000{430,,,5F0} */
111 uint64_t clear_all; /* 0x000{438,,,5F8} */
112 } cp_buf_count[8];
113
114
115 /* 0x000600-0x0009FF -- PCI/X registers */
116 uint64_t cp_pcix_bus_err_addr; /* 0x000600 */
117 uint64_t cp_pcix_bus_err_attr; /* 0x000608 */
118 uint64_t cp_pcix_bus_err_data; /* 0x000610 */
119 uint64_t cp_pcix_pio_split_addr; /* 0x000618 */
120 uint64_t cp_pcix_pio_split_attr; /* 0x000620 */
121 uint64_t cp_pcix_dma_req_err_attr; /* 0x000628 */
122 uint64_t cp_pcix_dma_req_err_addr; /* 0x000630 */
123 uint64_t cp_pcix_timeout; /* 0x000638 */
124
125 uint64_t _pad_000640[24]; /* 0x000{640,,,6F8} */
126
127 /* 0x000700-0x000737 -- Debug Registers */
128 uint64_t cp_ct_debug_ctl; /* 0x000700 */
129 uint64_t cp_br_debug_ctl; /* 0x000708 */
130 uint64_t cp_mux3_debug_ctl; /* 0x000710 */
131 uint64_t cp_mux4_debug_ctl; /* 0x000718 */
132 uint64_t cp_mux5_debug_ctl; /* 0x000720 */
133 uint64_t cp_mux6_debug_ctl; /* 0x000728 */
134 uint64_t cp_mux7_debug_ctl; /* 0x000730 */
135
136 uint64_t _pad_000738[89]; /* 0x000{738,,,9F8} */
137
138 /* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */
139 struct {
140 uint64_t cp_buf_addr; /* 0x000{A00,,,AF0} */
141 uint64_t cp_buf_attr; /* 0X000{A08,,,AF8} */
142 } cp_pcix_read_buf_64[16];
143
144 struct {
145 uint64_t cp_buf_addr; /* 0x000{B00,,,BE0} */
146 uint64_t cp_buf_attr; /* 0x000{B08,,,BE8} */
147 uint64_t cp_buf_valid; /* 0x000{B10,,,BF0} */
148 uint64_t __pad1; /* 0x000{B18,,,BF8} */
149 } cp_pcix_write_buf_64[8];
150
151 /* End of Local Registers -- Start of Address Map space */
152
153 char _pad_000c00[0x010000 - 0x000c00];
154
155 /* 0x010000-0x011FF8 -- Internal ATE RAM (Auto Parity Generation) */
156 uint64_t cp_int_ate_ram[1024]; /* 0x010000-0x011FF8 */
157
158 char _pad_012000[0x14000 - 0x012000];
159
160 /* 0x014000-0x015FF8 -- Internal ATE RAM (Manual Parity Generation) */
161 uint64_t cp_int_ate_ram_mp[1024]; /* 0x014000-0x015FF8 */
162
163 char _pad_016000[0x18000 - 0x016000];
164
165 /* 0x18000-0x197F8 -- TIOCP Write Request Ram */
166 uint64_t cp_wr_req_lower[256]; /* 0x18000 - 0x187F8 */
167 uint64_t cp_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */
168 uint64_t cp_wr_req_parity[256]; /* 0x19000 - 0x197F8 */
169
170 char _pad_019800[0x1C000 - 0x019800];
171
172 /* 0x1C000-0x1EFF8 -- TIOCP Read Response Ram */
173 uint64_t cp_rd_resp_lower[512]; /* 0x1C000 - 0x1CFF8 */
174 uint64_t cp_rd_resp_upper[512]; /* 0x1D000 - 0x1DFF8 */
175 uint64_t cp_rd_resp_parity[512]; /* 0x1E000 - 0x1EFF8 */
176
177 char _pad_01F000[0x20000 - 0x01F000];
178
179 /* 0x020000-0x021FFF -- Host Device (CP) Configuration Space (not used) */
180 char _pad_020000[0x021000 - 0x20000];
181
182 /* 0x021000-0x027FFF -- PCI Device Configuration Spaces */
183 union {
184 uint8_t c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */
185 uint16_t s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */
186 uint32_t l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */
187 uint64_t d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */
188 union {
189 uint8_t c[0x100 / 1];
190 uint16_t s[0x100 / 2];
191 uint32_t l[0x100 / 4];
192 uint64_t d[0x100 / 8];
193 } f[8];
194 } cp_type0_cfg_dev[7]; /* 0x02{1000,,,7FFF} */
195
196 /* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */
197 union {
198 uint8_t c[0x1000 / 1]; /* 0x028000-0x029000 */
199 uint16_t s[0x1000 / 2]; /* 0x028000-0x029000 */
200 uint32_t l[0x1000 / 4]; /* 0x028000-0x029000 */
201 uint64_t d[0x1000 / 8]; /* 0x028000-0x029000 */
202 union {
203 uint8_t c[0x100 / 1];
204 uint16_t s[0x100 / 2];
205 uint32_t l[0x100 / 4];
206 uint64_t d[0x100 / 8];
207 } f[8];
208 } cp_type1_cfg; /* 0x028000-0x029000 */
209
210 char _pad_029000[0x030000-0x029000];
211
212 /* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */
213 union {
214 uint8_t c[8 / 1];
215 uint16_t s[8 / 2];
216 uint32_t l[8 / 4];
217 uint64_t d[8 / 8];
218 } cp_pci_iack; /* 0x030000-0x030007 */
219
220 char _pad_030007[0x040000-0x030008];
221
222 /* 0x040000-0x040007 -- PCIX Special Cycle */
223 union {
224 uint8_t c[8 / 1];
225 uint16_t s[8 / 2];
226 uint32_t l[8 / 4];
227 uint64_t d[8 / 8];
228 } cp_pcix_cycle; /* 0x040000-0x040007 */
229
230 char _pad_040007[0x200000-0x040008];
231
232 /* 0x200000-0x7FFFFF -- PCI/GIO Device Spaces */
233 union {
234 uint8_t c[0x100000 / 1];
235 uint16_t s[0x100000 / 2];
236 uint32_t l[0x100000 / 4];
237 uint64_t d[0x100000 / 8];
238 } cp_devio_raw[6]; /* 0x200000-0x7FFFFF */
239
240 #define cp_devio(n) cp_devio_raw[((n)<2)?(n*2):(n+2)]
241
242 char _pad_800000[0xA00000-0x800000];
243
244 /* 0xA00000-0xBFFFFF -- PCI/GIO Device Spaces w/flush */
245 union {
246 uint8_t c[0x100000 / 1];
247 uint16_t s[0x100000 / 2];
248 uint32_t l[0x100000 / 4];
249 uint64_t d[0x100000 / 8];
250 } cp_devio_raw_flush[6]; /* 0xA00000-0xBFFFFF */
251
252 #define cp_devio_flush(n) cp_devio_raw_flush[((n)<2)?(n*2):(n+2)]
253
254};
255
256#endif /* _ASM_IA64_SN_PCI_TIOCP_H */
diff --git a/arch/ia64/sn/include/tio.h b/arch/ia64/sn/include/tio.h
new file mode 100644
index 000000000000..0139124dd54a
--- /dev/null
+++ b/arch/ia64/sn/include/tio.h
@@ -0,0 +1,37 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#ifndef _ASM_IA64_SN_TIO_H
10#define _ASM_IA64_SN_TIO_H
11
12#define TIO_MMR_ADDR_MOD
13
14#define TIO_NODE_ID TIO_MMR_ADDR_MOD(0x0000000090060e80)
15
16#define TIO_ITTE_BASE 0xb0008800 /* base of translation table entries */
17#define TIO_ITTE(bigwin) (TIO_ITTE_BASE + 8*(bigwin))
18
19#define TIO_ITTE_OFFSET_BITS 8 /* size of offset field */
20#define TIO_ITTE_OFFSET_MASK ((1<<TIO_ITTE_OFFSET_BITS)-1)
21#define TIO_ITTE_OFFSET_SHIFT 0
22
23#define TIO_ITTE_WIDGET_BITS 2 /* size of widget field */
24#define TIO_ITTE_WIDGET_MASK ((1<<TIO_ITTE_WIDGET_BITS)-1)
25#define TIO_ITTE_WIDGET_SHIFT 12
26#define TIO_ITTE_VALID_MASK 0x1
27#define TIO_ITTE_VALID_SHIFT 16
28
29
30#define TIO_ITTE_PUT(nasid, bigwin, widget, addr, valid) \
31 REMOTE_HUB_S((nasid), TIO_ITTE(bigwin), \
32 (((((addr) >> TIO_BWIN_SIZE_BITS) & \
33 TIO_ITTE_OFFSET_MASK) << TIO_ITTE_OFFSET_SHIFT) | \
34 (((widget) & TIO_ITTE_WIDGET_MASK) << TIO_ITTE_WIDGET_SHIFT)) | \
35 (( (valid) & TIO_ITTE_VALID_MASK) << TIO_ITTE_VALID_SHIFT))
36
37#endif /* _ASM_IA64_SN_TIO_H */
diff --git a/arch/ia64/sn/include/xtalk/hubdev.h b/arch/ia64/sn/include/xtalk/hubdev.h
new file mode 100644
index 000000000000..868e7ecae84b
--- /dev/null
+++ b/arch/ia64/sn/include/xtalk/hubdev.h
@@ -0,0 +1,67 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8#ifndef _ASM_IA64_SN_XTALK_HUBDEV_H
9#define _ASM_IA64_SN_XTALK_HUBDEV_H
10
11#define HUB_WIDGET_ID_MAX 0xf
12#define DEV_PER_WIDGET (2*2*8)
13#define IIO_ITTE_WIDGET_BITS 4 /* size of widget field */
14#define IIO_ITTE_WIDGET_MASK ((1<<IIO_ITTE_WIDGET_BITS)-1)
15#define IIO_ITTE_WIDGET_SHIFT 8
16
17/*
18 * Use the top big window as a surrogate for the first small window
19 */
20#define SWIN0_BIGWIN HUB_NUM_BIG_WINDOW
21#define IIO_NUM_ITTES 7
22#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1)
23
24struct sn_flush_device_list {
25 int sfdl_bus;
26 int sfdl_slot;
27 int sfdl_pin;
28 struct bar_list {
29 unsigned long start;
30 unsigned long end;
31 } sfdl_bar_list[6];
32 unsigned long sfdl_force_int_addr;
33 unsigned long sfdl_flush_value;
34 volatile unsigned long *sfdl_flush_addr;
35 uint64_t sfdl_persistent_busnum;
36 struct pcibus_info *sfdl_pcibus_info;
37 spinlock_t sfdl_flush_lock;
38};
39
40/*
41 * **widget_p - Used as an array[wid_num][device] of sn_flush_device_list.
42 */
43struct sn_flush_nasid_entry {
44 struct sn_flush_device_list **widget_p; /* Used as a array of wid_num */
45 uint64_t iio_itte[8];
46};
47
48struct hubdev_info {
49 geoid_t hdi_geoid;
50 short hdi_nasid;
51 short hdi_peer_nasid; /* Dual Porting Peer */
52
53 struct sn_flush_nasid_entry hdi_flush_nasid_list;
54 struct xwidget_info hdi_xwidget_info[HUB_WIDGET_ID_MAX + 1];
55
56
57 void *hdi_nodepda;
58 void *hdi_node_vertex;
59 void *hdi_xtalk_vertex;
60};
61
62extern void hubdev_init_node(nodepda_t *, cnodeid_t);
63extern void hub_error_init(struct hubdev_info *);
64extern void ice_error_init(struct hubdev_info *);
65
66
67#endif /* _ASM_IA64_SN_XTALK_HUBDEV_H */
diff --git a/arch/ia64/sn/include/xtalk/xbow.h b/arch/ia64/sn/include/xtalk/xbow.h
new file mode 100644
index 000000000000..ec56b3432f17
--- /dev/null
+++ b/arch/ia64/sn/include/xtalk/xbow.h
@@ -0,0 +1,291 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All Rights Reserved.
7 */
8#ifndef _ASM_IA64_SN_XTALK_XBOW_H
9#define _ASM_IA64_SN_XTALK_XBOW_H
10
11#define XBOW_PORT_8 0x8
12#define XBOW_PORT_C 0xc
13#define XBOW_PORT_F 0xf
14
15#define MAX_XBOW_PORTS 8 /* number of ports on xbow chip */
16#define BASE_XBOW_PORT XBOW_PORT_8 /* Lowest external port */
17
18#define XBOW_CREDIT 4
19
20#define MAX_XBOW_NAME 16
21
22/* Register set for each xbow link */
23typedef volatile struct xb_linkregs_s {
24/*
25 * we access these through synergy unswizzled space, so the address
26 * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.)
27 * That's why we put the register first and filler second.
28 */
29 uint32_t link_ibf;
30 uint32_t filler0; /* filler for proper alignment */
31 uint32_t link_control;
32 uint32_t filler1;
33 uint32_t link_status;
34 uint32_t filler2;
35 uint32_t link_arb_upper;
36 uint32_t filler3;
37 uint32_t link_arb_lower;
38 uint32_t filler4;
39 uint32_t link_status_clr;
40 uint32_t filler5;
41 uint32_t link_reset;
42 uint32_t filler6;
43 uint32_t link_aux_status;
44 uint32_t filler7;
45} xb_linkregs_t;
46
47typedef volatile struct xbow_s {
48 /* standard widget configuration 0x000000-0x000057 */
49 struct widget_cfg xb_widget; /* 0x000000 */
50
51 /* helper fieldnames for accessing bridge widget */
52
53#define xb_wid_id xb_widget.w_id
54#define xb_wid_stat xb_widget.w_status
55#define xb_wid_err_upper xb_widget.w_err_upper_addr
56#define xb_wid_err_lower xb_widget.w_err_lower_addr
57#define xb_wid_control xb_widget.w_control
58#define xb_wid_req_timeout xb_widget.w_req_timeout
59#define xb_wid_int_upper xb_widget.w_intdest_upper_addr
60#define xb_wid_int_lower xb_widget.w_intdest_lower_addr
61#define xb_wid_err_cmdword xb_widget.w_err_cmd_word
62#define xb_wid_llp xb_widget.w_llp_cfg
63#define xb_wid_stat_clr xb_widget.w_tflush
64
65/*
66 * we access these through synergy unswizzled space, so the address
67 * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.)
68 * That's why we put the register first and filler second.
69 */
70 /* xbow-specific widget configuration 0x000058-0x0000FF */
71 uint32_t xb_wid_arb_reload; /* 0x00005C */
72 uint32_t _pad_000058;
73 uint32_t xb_perf_ctr_a; /* 0x000064 */
74 uint32_t _pad_000060;
75 uint32_t xb_perf_ctr_b; /* 0x00006c */
76 uint32_t _pad_000068;
77 uint32_t xb_nic; /* 0x000074 */
78 uint32_t _pad_000070;
79
80 /* Xbridge only */
81 uint32_t xb_w0_rst_fnc; /* 0x00007C */
82 uint32_t _pad_000078;
83 uint32_t xb_l8_rst_fnc; /* 0x000084 */
84 uint32_t _pad_000080;
85 uint32_t xb_l9_rst_fnc; /* 0x00008c */
86 uint32_t _pad_000088;
87 uint32_t xb_la_rst_fnc; /* 0x000094 */
88 uint32_t _pad_000090;
89 uint32_t xb_lb_rst_fnc; /* 0x00009c */
90 uint32_t _pad_000098;
91 uint32_t xb_lc_rst_fnc; /* 0x0000a4 */
92 uint32_t _pad_0000a0;
93 uint32_t xb_ld_rst_fnc; /* 0x0000ac */
94 uint32_t _pad_0000a8;
95 uint32_t xb_le_rst_fnc; /* 0x0000b4 */
96 uint32_t _pad_0000b0;
97 uint32_t xb_lf_rst_fnc; /* 0x0000bc */
98 uint32_t _pad_0000b8;
99 uint32_t xb_lock; /* 0x0000c4 */
100 uint32_t _pad_0000c0;
101 uint32_t xb_lock_clr; /* 0x0000cc */
102 uint32_t _pad_0000c8;
103 /* end of Xbridge only */
104 uint32_t _pad_0000d0[12];
105
106 /* Link Specific Registers, port 8..15 0x000100-0x000300 */
107 xb_linkregs_t xb_link_raw[MAX_XBOW_PORTS];
108#define xb_link(p) xb_link_raw[(p) & (MAX_XBOW_PORTS - 1)]
109
110} xbow_t;
111
112#define XB_FLAGS_EXISTS 0x1 /* device exists */
113#define XB_FLAGS_MASTER 0x2
114#define XB_FLAGS_SLAVE 0x0
115#define XB_FLAGS_GBR 0x4
116#define XB_FLAGS_16BIT 0x8
117#define XB_FLAGS_8BIT 0x0
118
119/* is widget port number valid? (based on version 7.0 of xbow spec) */
120#define XBOW_WIDGET_IS_VALID(wid) ((wid) >= XBOW_PORT_8 && (wid) <= XBOW_PORT_F)
121
122/* whether to use upper or lower arbitration register, given source widget id */
123#define XBOW_ARB_IS_UPPER(wid) ((wid) >= XBOW_PORT_8 && (wid) <= XBOW_PORT_B)
124#define XBOW_ARB_IS_LOWER(wid) ((wid) >= XBOW_PORT_C && (wid) <= XBOW_PORT_F)
125
126/* offset of arbitration register, given source widget id */
127#define XBOW_ARB_OFF(wid) (XBOW_ARB_IS_UPPER(wid) ? 0x1c : 0x24)
128
129#define XBOW_WID_ID WIDGET_ID
130#define XBOW_WID_STAT WIDGET_STATUS
131#define XBOW_WID_ERR_UPPER WIDGET_ERR_UPPER_ADDR
132#define XBOW_WID_ERR_LOWER WIDGET_ERR_LOWER_ADDR
133#define XBOW_WID_CONTROL WIDGET_CONTROL
134#define XBOW_WID_REQ_TO WIDGET_REQ_TIMEOUT
135#define XBOW_WID_INT_UPPER WIDGET_INTDEST_UPPER_ADDR
136#define XBOW_WID_INT_LOWER WIDGET_INTDEST_LOWER_ADDR
137#define XBOW_WID_ERR_CMDWORD WIDGET_ERR_CMD_WORD
138#define XBOW_WID_LLP WIDGET_LLP_CFG
139#define XBOW_WID_STAT_CLR WIDGET_TFLUSH
140#define XBOW_WID_ARB_RELOAD 0x5c
141#define XBOW_WID_PERF_CTR_A 0x64
142#define XBOW_WID_PERF_CTR_B 0x6c
143#define XBOW_WID_NIC 0x74
144
145/* Xbridge only */
146#define XBOW_W0_RST_FNC 0x00007C
147#define XBOW_L8_RST_FNC 0x000084
148#define XBOW_L9_RST_FNC 0x00008c
149#define XBOW_LA_RST_FNC 0x000094
150#define XBOW_LB_RST_FNC 0x00009c
151#define XBOW_LC_RST_FNC 0x0000a4
152#define XBOW_LD_RST_FNC 0x0000ac
153#define XBOW_LE_RST_FNC 0x0000b4
154#define XBOW_LF_RST_FNC 0x0000bc
155#define XBOW_RESET_FENCE(x) ((x) > 7 && (x) < 16) ? \
156 (XBOW_W0_RST_FNC + ((x) - 7) * 8) : \
157 ((x) == 0) ? XBOW_W0_RST_FNC : 0
158#define XBOW_LOCK 0x0000c4
159#define XBOW_LOCK_CLR 0x0000cc
160/* End of Xbridge only */
161
162/* used only in ide, but defined here within the reserved portion */
163/* of the widget0 address space (before 0xf4) */
164#define XBOW_WID_UNDEF 0xe4
165
166/* xbow link register set base, legal value for x is 0x8..0xf */
167#define XB_LINK_BASE 0x100
168#define XB_LINK_OFFSET 0x40
169#define XB_LINK_REG_BASE(x) (XB_LINK_BASE + ((x) & (MAX_XBOW_PORTS - 1)) * XB_LINK_OFFSET)
170
171#define XB_LINK_IBUF_FLUSH(x) (XB_LINK_REG_BASE(x) + 0x4)
172#define XB_LINK_CTRL(x) (XB_LINK_REG_BASE(x) + 0xc)
173#define XB_LINK_STATUS(x) (XB_LINK_REG_BASE(x) + 0x14)
174#define XB_LINK_ARB_UPPER(x) (XB_LINK_REG_BASE(x) + 0x1c)
175#define XB_LINK_ARB_LOWER(x) (XB_LINK_REG_BASE(x) + 0x24)
176#define XB_LINK_STATUS_CLR(x) (XB_LINK_REG_BASE(x) + 0x2c)
177#define XB_LINK_RESET(x) (XB_LINK_REG_BASE(x) + 0x34)
178#define XB_LINK_AUX_STATUS(x) (XB_LINK_REG_BASE(x) + 0x3c)
179
180/* link_control(x) */
181#define XB_CTRL_LINKALIVE_IE 0x80000000 /* link comes alive */
182 /* reserved: 0x40000000 */
183#define XB_CTRL_PERF_CTR_MODE_MSK 0x30000000 /* perf counter mode */
184#define XB_CTRL_IBUF_LEVEL_MSK 0x0e000000 /* input packet buffer level */
185#define XB_CTRL_8BIT_MODE 0x01000000 /* force link into 8 bit mode */
186#define XB_CTRL_BAD_LLP_PKT 0x00800000 /* force bad LLP packet */
187#define XB_CTRL_WIDGET_CR_MSK 0x007c0000 /* LLP widget credit mask */
188#define XB_CTRL_WIDGET_CR_SHFT 18 /* LLP widget credit shift */
189#define XB_CTRL_ILLEGAL_DST_IE 0x00020000 /* illegal destination */
190#define XB_CTRL_OALLOC_IBUF_IE 0x00010000 /* overallocated input buffer */
191 /* reserved: 0x0000fe00 */
192#define XB_CTRL_BNDWDTH_ALLOC_IE 0x00000100 /* bandwidth alloc */
193#define XB_CTRL_RCV_CNT_OFLOW_IE 0x00000080 /* rcv retry overflow */
194#define XB_CTRL_XMT_CNT_OFLOW_IE 0x00000040 /* xmt retry overflow */
195#define XB_CTRL_XMT_MAX_RTRY_IE 0x00000020 /* max transmit retry */
196#define XB_CTRL_RCV_IE 0x00000010 /* receive */
197#define XB_CTRL_XMT_RTRY_IE 0x00000008 /* transmit retry */
198 /* reserved: 0x00000004 */
199#define XB_CTRL_MAXREQ_TOUT_IE 0x00000002 /* maximum request timeout */
200#define XB_CTRL_SRC_TOUT_IE 0x00000001 /* source timeout */
201
202/* link_status(x) */
203#define XB_STAT_LINKALIVE XB_CTRL_LINKALIVE_IE
204 /* reserved: 0x7ff80000 */
205#define XB_STAT_MULTI_ERR 0x00040000 /* multi error */
206#define XB_STAT_ILLEGAL_DST_ERR XB_CTRL_ILLEGAL_DST_IE
207#define XB_STAT_OALLOC_IBUF_ERR XB_CTRL_OALLOC_IBUF_IE
208#define XB_STAT_BNDWDTH_ALLOC_ID_MSK 0x0000ff00 /* port bitmask */
209#define XB_STAT_RCV_CNT_OFLOW_ERR XB_CTRL_RCV_CNT_OFLOW_IE
210#define XB_STAT_XMT_CNT_OFLOW_ERR XB_CTRL_XMT_CNT_OFLOW_IE
211#define XB_STAT_XMT_MAX_RTRY_ERR XB_CTRL_XMT_MAX_RTRY_IE
212#define XB_STAT_RCV_ERR XB_CTRL_RCV_IE
213#define XB_STAT_XMT_RTRY_ERR XB_CTRL_XMT_RTRY_IE
214 /* reserved: 0x00000004 */
215#define XB_STAT_MAXREQ_TOUT_ERR XB_CTRL_MAXREQ_TOUT_IE
216#define XB_STAT_SRC_TOUT_ERR XB_CTRL_SRC_TOUT_IE
217
218/* link_aux_status(x) */
219#define XB_AUX_STAT_RCV_CNT 0xff000000
220#define XB_AUX_STAT_XMT_CNT 0x00ff0000
221#define XB_AUX_STAT_TOUT_DST 0x0000ff00
222#define XB_AUX_LINKFAIL_RST_BAD 0x00000040
223#define XB_AUX_STAT_PRESENT 0x00000020
224#define XB_AUX_STAT_PORT_WIDTH 0x00000010
225 /* reserved: 0x0000000f */
226
227/*
228 * link_arb_upper/link_arb_lower(x), (reg) should be the link_arb_upper
229 * register if (x) is 0x8..0xb, link_arb_lower if (x) is 0xc..0xf
230 */
231#define XB_ARB_GBR_MSK 0x1f
232#define XB_ARB_RR_MSK 0x7
233#define XB_ARB_GBR_SHFT(x) (((x) & 0x3) * 8)
234#define XB_ARB_RR_SHFT(x) (((x) & 0x3) * 8 + 5)
235#define XB_ARB_GBR_CNT(reg,x) ((reg) >> XB_ARB_GBR_SHFT(x) & XB_ARB_GBR_MSK)
236#define XB_ARB_RR_CNT(reg,x) ((reg) >> XB_ARB_RR_SHFT(x) & XB_ARB_RR_MSK)
237
238/* XBOW_WID_STAT */
239#define XB_WID_STAT_LINK_INTR_SHFT (24)
240#define XB_WID_STAT_LINK_INTR_MASK (0xFF << XB_WID_STAT_LINK_INTR_SHFT)
241#define XB_WID_STAT_LINK_INTR(x) (0x1 << (((x)&7) + XB_WID_STAT_LINK_INTR_SHFT))
242#define XB_WID_STAT_WIDGET0_INTR 0x00800000
243#define XB_WID_STAT_SRCID_MASK 0x000003c0 /* Xbridge only */
244#define XB_WID_STAT_REG_ACC_ERR 0x00000020
245#define XB_WID_STAT_RECV_TOUT 0x00000010 /* Xbridge only */
246#define XB_WID_STAT_ARB_TOUT 0x00000008 /* Xbridge only */
247#define XB_WID_STAT_XTALK_ERR 0x00000004
248#define XB_WID_STAT_DST_TOUT 0x00000002 /* Xbridge only */
249#define XB_WID_STAT_MULTI_ERR 0x00000001
250
251#define XB_WID_STAT_SRCID_SHFT 6
252
253/* XBOW_WID_CONTROL */
254#define XB_WID_CTRL_REG_ACC_IE XB_WID_STAT_REG_ACC_ERR
255#define XB_WID_CTRL_RECV_TOUT XB_WID_STAT_RECV_TOUT
256#define XB_WID_CTRL_ARB_TOUT XB_WID_STAT_ARB_TOUT
257#define XB_WID_CTRL_XTALK_IE XB_WID_STAT_XTALK_ERR
258
259/* XBOW_WID_INT_UPPER */
260/* defined in xwidget.h for WIDGET_INTDEST_UPPER_ADDR */
261
262/* XBOW WIDGET part number, in the ID register */
263#define XBOW_WIDGET_PART_NUM 0x0 /* crossbow */
264#define XXBOW_WIDGET_PART_NUM 0xd000 /* Xbridge */
265#define XBOW_WIDGET_MFGR_NUM 0x0
266#define XXBOW_WIDGET_MFGR_NUM 0x0
267#define PXBOW_WIDGET_PART_NUM 0xd100 /* PIC */
268
269#define XBOW_REV_1_0 0x1 /* xbow rev 1.0 is "1" */
270#define XBOW_REV_1_1 0x2 /* xbow rev 1.1 is "2" */
271#define XBOW_REV_1_2 0x3 /* xbow rev 1.2 is "3" */
272#define XBOW_REV_1_3 0x4 /* xbow rev 1.3 is "4" */
273#define XBOW_REV_2_0 0x5 /* xbow rev 2.0 is "5" */
274
275#define XXBOW_PART_REV_1_0 (XXBOW_WIDGET_PART_NUM << 4 | 0x1 )
276#define XXBOW_PART_REV_2_0 (XXBOW_WIDGET_PART_NUM << 4 | 0x2 )
277
278/* XBOW_WID_ARB_RELOAD */
279#define XBOW_WID_ARB_RELOAD_INT 0x3f /* GBR reload interval */
280
281#define IS_XBRIDGE_XBOW(wid) \
282 (XWIDGET_PART_NUM(wid) == XXBOW_WIDGET_PART_NUM && \
283 XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM)
284
285#define IS_PIC_XBOW(wid) \
286 (XWIDGET_PART_NUM(wid) == PXBOW_WIDGET_PART_NUM && \
287 XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM)
288
289#define XBOW_WAR_ENABLED(pv, widid) ((1 << XWIDGET_REV_NUM(widid)) & pv)
290
291#endif /* _ASM_IA64_SN_XTALK_XBOW_H */
diff --git a/arch/ia64/sn/include/xtalk/xwidgetdev.h b/arch/ia64/sn/include/xtalk/xwidgetdev.h
new file mode 100644
index 000000000000..c5f4bc5cc033
--- /dev/null
+++ b/arch/ia64/sn/include/xtalk/xwidgetdev.h
@@ -0,0 +1,70 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All Rights Reserved.
7 */
8#ifndef _ASM_IA64_SN_XTALK_XWIDGET_H
9#define _ASM_IA64_SN_XTALK_XWIDGET_H
10
11/* WIDGET_ID */
12#define WIDGET_REV_NUM 0xf0000000
13#define WIDGET_PART_NUM 0x0ffff000
14#define WIDGET_MFG_NUM 0x00000ffe
15#define WIDGET_REV_NUM_SHFT 28
16#define WIDGET_PART_NUM_SHFT 12
17#define WIDGET_MFG_NUM_SHFT 1
18
19#define XWIDGET_PART_NUM(widgetid) (((widgetid) & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT)
20#define XWIDGET_REV_NUM(widgetid) (((widgetid) & WIDGET_REV_NUM) >> WIDGET_REV_NUM_SHFT)
21#define XWIDGET_MFG_NUM(widgetid) (((widgetid) & WIDGET_MFG_NUM) >> WIDGET_MFG_NUM_SHFT)
22#define XWIDGET_PART_REV_NUM(widgetid) ((XWIDGET_PART_NUM(widgetid) << 4) | \
23 XWIDGET_REV_NUM(widgetid))
24#define XWIDGET_PART_REV_NUM_REV(partrev) (partrev & 0xf)
25
26/* widget configuration registers */
27struct widget_cfg{
28 uint32_t w_id; /* 0x04 */
29 uint32_t w_pad_0; /* 0x00 */
30 uint32_t w_status; /* 0x0c */
31 uint32_t w_pad_1; /* 0x08 */
32 uint32_t w_err_upper_addr; /* 0x14 */
33 uint32_t w_pad_2; /* 0x10 */
34 uint32_t w_err_lower_addr; /* 0x1c */
35 uint32_t w_pad_3; /* 0x18 */
36 uint32_t w_control; /* 0x24 */
37 uint32_t w_pad_4; /* 0x20 */
38 uint32_t w_req_timeout; /* 0x2c */
39 uint32_t w_pad_5; /* 0x28 */
40 uint32_t w_intdest_upper_addr; /* 0x34 */
41 uint32_t w_pad_6; /* 0x30 */
42 uint32_t w_intdest_lower_addr; /* 0x3c */
43 uint32_t w_pad_7; /* 0x38 */
44 uint32_t w_err_cmd_word; /* 0x44 */
45 uint32_t w_pad_8; /* 0x40 */
46 uint32_t w_llp_cfg; /* 0x4c */
47 uint32_t w_pad_9; /* 0x48 */
48 uint32_t w_tflush; /* 0x54 */
49 uint32_t w_pad_10; /* 0x50 */
50};
51
52/*
53 * Crosstalk Widget Hardware Identification, as defined in the Crosstalk spec.
54 */
55struct xwidget_hwid{
56 int mfg_num;
57 int rev_num;
58 int part_num;
59};
60
61struct xwidget_info{
62
63 struct xwidget_hwid xwi_hwid; /* Widget Identification */
64 char xwi_masterxid; /* Hub's Widget Port Number */
65 void *xwi_hubinfo; /* Hub's provider private info */
66 uint64_t *xwi_hub_provider; /* prom provider functions */
67 void *xwi_vertex;
68};
69
70#endif /* _ASM_IA64_SN_XTALK_XWIDGET_H */
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile
new file mode 100644
index 000000000000..6c7f4d9e8ea0
--- /dev/null
+++ b/arch/ia64/sn/kernel/Makefile
@@ -0,0 +1,12 @@
1# arch/ia64/sn/kernel/Makefile
2#
3# This file is subject to the terms and conditions of the GNU General Public
4# License. See the file "COPYING" in the main directory of this archive
5# for more details.
6#
7# Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved.
8#
9
10obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
11 huberror.o io_init.o iomv.o klconflib.o sn2/
12obj-$(CONFIG_IA64_GENERIC) += machvec.o
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
new file mode 100644
index 000000000000..ce0bc4085eae
--- /dev/null
+++ b/arch/ia64/sn/kernel/bte.c
@@ -0,0 +1,453 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9#include <linux/config.h>
10#include <linux/module.h>
11#include <asm/sn/nodepda.h>
12#include <asm/sn/addrs.h>
13#include <asm/sn/arch.h>
14#include <asm/sn/sn_cpuid.h>
15#include <asm/sn/pda.h>
16#include <asm/sn/shubio.h>
17#include <asm/nodedata.h>
18#include <asm/delay.h>
19
20#include <linux/bootmem.h>
21#include <linux/string.h>
22#include <linux/sched.h>
23
24#include <asm/sn/bte.h>
25
26#ifndef L1_CACHE_MASK
27#define L1_CACHE_MASK (L1_CACHE_BYTES - 1)
28#endif
29
30/* two interfaces on two btes */
31#define MAX_INTERFACES_TO_TRY 4
32
33static struct bteinfo_s *bte_if_on_node(nasid_t nasid, int interface)
34{
35 nodepda_t *tmp_nodepda;
36
37 tmp_nodepda = NODEPDA(nasid_to_cnodeid(nasid));
38 return &tmp_nodepda->bte_if[interface];
39
40}
41
42/************************************************************************
43 * Block Transfer Engine copy related functions.
44 *
45 ***********************************************************************/
46
47/*
48 * bte_copy(src, dest, len, mode, notification)
49 *
50 * Use the block transfer engine to move kernel memory from src to dest
51 * using the assigned mode.
52 *
53 * Paramaters:
54 * src - physical address of the transfer source.
55 * dest - physical address of the transfer destination.
56 * len - number of bytes to transfer from source to dest.
57 * mode - hardware defined. See reference information
58 * for IBCT0/1 in the SHUB Programmers Reference
59 * notification - kernel virtual address of the notification cache
60 * line. If NULL, the default is used and
61 * the bte_copy is synchronous.
62 *
63 * NOTE: This function requires src, dest, and len to
64 * be cacheline aligned.
65 */
66bte_result_t bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
67{
68 u64 transfer_size;
69 u64 transfer_stat;
70 struct bteinfo_s *bte;
71 bte_result_t bte_status;
72 unsigned long irq_flags;
73 unsigned long itc_end = 0;
74 struct bteinfo_s *btes_to_try[MAX_INTERFACES_TO_TRY];
75 int bte_if_index;
76 int bte_pri, bte_sec;
77
78 BTE_PRINTK(("bte_copy(0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%p)\n",
79 src, dest, len, mode, notification));
80
81 if (len == 0) {
82 return BTE_SUCCESS;
83 }
84
85 BUG_ON((len & L1_CACHE_MASK) ||
86 (src & L1_CACHE_MASK) || (dest & L1_CACHE_MASK));
87 BUG_ON(!(len < ((BTE_LEN_MASK + 1) << L1_CACHE_SHIFT)));
88
89 /* CPU 0 (per node) tries bte0 first, CPU 1 try bte1 first */
90 if (cpuid_to_subnode(smp_processor_id()) == 0) {
91 bte_pri = 0;
92 bte_sec = 1;
93 } else {
94 bte_pri = 1;
95 bte_sec = 0;
96 }
97
98 if (mode & BTE_USE_DEST) {
99 /* try remote then local */
100 btes_to_try[0] = bte_if_on_node(NASID_GET(dest), bte_pri);
101 btes_to_try[1] = bte_if_on_node(NASID_GET(dest), bte_sec);
102 if (mode & BTE_USE_ANY) {
103 btes_to_try[2] = bte_if_on_node(get_nasid(), bte_pri);
104 btes_to_try[3] = bte_if_on_node(get_nasid(), bte_sec);
105 } else {
106 btes_to_try[2] = NULL;
107 btes_to_try[3] = NULL;
108 }
109 } else {
110 /* try local then remote */
111 btes_to_try[0] = bte_if_on_node(get_nasid(), bte_pri);
112 btes_to_try[1] = bte_if_on_node(get_nasid(), bte_sec);
113 if (mode & BTE_USE_ANY) {
114 btes_to_try[2] = bte_if_on_node(NASID_GET(dest), bte_pri);
115 btes_to_try[3] = bte_if_on_node(NASID_GET(dest), bte_sec);
116 } else {
117 btes_to_try[2] = NULL;
118 btes_to_try[3] = NULL;
119 }
120 }
121
122retry_bteop:
123 do {
124 local_irq_save(irq_flags);
125
126 bte_if_index = 0;
127
128 /* Attempt to lock one of the BTE interfaces. */
129 while (bte_if_index < MAX_INTERFACES_TO_TRY) {
130 bte = btes_to_try[bte_if_index++];
131
132 if (bte == NULL) {
133 continue;
134 }
135
136 if (spin_trylock(&bte->spinlock)) {
137 if (!(*bte->most_rcnt_na & BTE_WORD_AVAILABLE) ||
138 (BTE_LNSTAT_LOAD(bte) & BTE_ACTIVE)) {
139 /* Got the lock but BTE still busy */
140 spin_unlock(&bte->spinlock);
141 } else {
142 /* we got the lock and it's not busy */
143 break;
144 }
145 }
146 bte = NULL;
147 }
148
149 if (bte != NULL) {
150 break;
151 }
152
153 local_irq_restore(irq_flags);
154
155 if (!(mode & BTE_WACQUIRE)) {
156 return BTEFAIL_NOTAVAIL;
157 }
158 } while (1);
159
160 if (notification == NULL) {
161 /* User does not want to be notified. */
162 bte->most_rcnt_na = &bte->notify;
163 } else {
164 bte->most_rcnt_na = notification;
165 }
166
167 /* Calculate the number of cache lines to transfer. */
168 transfer_size = ((len >> L1_CACHE_SHIFT) & BTE_LEN_MASK);
169
170 /* Initialize the notification to a known value. */
171 *bte->most_rcnt_na = BTE_WORD_BUSY;
172
173 /* Set the status reg busy bit and transfer length */
174 BTE_PRINTKV(("IBLS = 0x%lx\n", IBLS_BUSY | transfer_size));
175 BTE_LNSTAT_STORE(bte, IBLS_BUSY | transfer_size);
176
177 /* Set the source and destination registers */
178 BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src))));
179 BTE_SRC_STORE(bte, TO_PHYS(src));
180 BTE_PRINTKV(("IBDA = 0x%lx)\n", (TO_PHYS(dest))));
181 BTE_DEST_STORE(bte, TO_PHYS(dest));
182
183 /* Set the notification register */
184 BTE_PRINTKV(("IBNA = 0x%lx)\n",
185 TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na))));
186 BTE_NOTIF_STORE(bte,
187 TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na)));
188
189 /* Initiate the transfer */
190 BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode)));
191 BTE_CTRL_STORE(bte, BTE_VALID_MODE(mode));
192
193 itc_end = ia64_get_itc() + (40000000 * local_cpu_data->cyc_per_usec);
194
195 spin_unlock_irqrestore(&bte->spinlock, irq_flags);
196
197 if (notification != NULL) {
198 return BTE_SUCCESS;
199 }
200
201 while ((transfer_stat = *bte->most_rcnt_na) == BTE_WORD_BUSY) {
202 if (ia64_get_itc() > itc_end) {
203 BTE_PRINTK(("BTE timeout nasid 0x%x bte%d IBLS = 0x%lx na 0x%lx\n",
204 NASID_GET(bte->bte_base_addr), bte->bte_num,
205 BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na) );
206 bte->bte_error_count++;
207 bte->bh_error = IBLS_ERROR;
208 bte_error_handler((unsigned long)NODEPDA(bte->bte_cnode));
209 *bte->most_rcnt_na = BTE_WORD_AVAILABLE;
210 goto retry_bteop;
211 }
212 }
213
214 BTE_PRINTKV((" Delay Done. IBLS = 0x%lx, most_rcnt_na = 0x%lx\n",
215 BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
216
217 if (transfer_stat & IBLS_ERROR) {
218 bte_status = transfer_stat & ~IBLS_ERROR;
219 } else {
220 bte_status = BTE_SUCCESS;
221 }
222 *bte->most_rcnt_na = BTE_WORD_AVAILABLE;
223
224 BTE_PRINTK(("Returning status is 0x%lx and most_rcnt_na is 0x%lx\n",
225 BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
226
227 return bte_status;
228}
229
230EXPORT_SYMBOL(bte_copy);
231
232/*
233 * bte_unaligned_copy(src, dest, len, mode)
234 *
235 * use the block transfer engine to move kernel
236 * memory from src to dest using the assigned mode.
237 *
238 * Paramaters:
239 * src - physical address of the transfer source.
240 * dest - physical address of the transfer destination.
241 * len - number of bytes to transfer from source to dest.
242 * mode - hardware defined. See reference information
243 * for IBCT0/1 in the SGI documentation.
244 *
245 * NOTE: If the source, dest, and len are all cache line aligned,
246 * then it would be _FAR_ preferrable to use bte_copy instead.
247 */
248bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
249{
250 int destFirstCacheOffset;
251 u64 headBteSource;
252 u64 headBteLen;
253 u64 headBcopySrcOffset;
254 u64 headBcopyDest;
255 u64 headBcopyLen;
256 u64 footBteSource;
257 u64 footBteLen;
258 u64 footBcopyDest;
259 u64 footBcopyLen;
260 bte_result_t rv;
261 char *bteBlock, *bteBlock_unaligned;
262
263 if (len == 0) {
264 return BTE_SUCCESS;
265 }
266
267 /* temporary buffer used during unaligned transfers */
268 bteBlock_unaligned = kmalloc(len + 3 * L1_CACHE_BYTES,
269 GFP_KERNEL | GFP_DMA);
270 if (bteBlock_unaligned == NULL) {
271 return BTEFAIL_NOTAVAIL;
272 }
273 bteBlock = (char *)L1_CACHE_ALIGN((u64) bteBlock_unaligned);
274
275 headBcopySrcOffset = src & L1_CACHE_MASK;
276 destFirstCacheOffset = dest & L1_CACHE_MASK;
277
278 /*
279 * At this point, the transfer is broken into
280 * (up to) three sections. The first section is
281 * from the start address to the first physical
282 * cache line, the second is from the first physical
283 * cache line to the last complete cache line,
284 * and the third is from the last cache line to the
285 * end of the buffer. The first and third sections
286 * are handled by bte copying into a temporary buffer
287 * and then bcopy'ing the necessary section into the
288 * final location. The middle section is handled with
289 * a standard bte copy.
290 *
291 * One nasty exception to the above rule is when the
292 * source and destination are not symetrically
293 * mis-aligned. If the source offset from the first
294 * cache line is different from the destination offset,
295 * we make the first section be the entire transfer
296 * and the bcopy the entire block into place.
297 */
298 if (headBcopySrcOffset == destFirstCacheOffset) {
299
300 /*
301 * Both the source and destination are the same
302 * distance from a cache line boundary so we can
303 * use the bte to transfer the bulk of the
304 * data.
305 */
306 headBteSource = src & ~L1_CACHE_MASK;
307 headBcopyDest = dest;
308 if (headBcopySrcOffset) {
309 headBcopyLen =
310 (len >
311 (L1_CACHE_BYTES -
312 headBcopySrcOffset) ? L1_CACHE_BYTES
313 - headBcopySrcOffset : len);
314 headBteLen = L1_CACHE_BYTES;
315 } else {
316 headBcopyLen = 0;
317 headBteLen = 0;
318 }
319
320 if (len > headBcopyLen) {
321 footBcopyLen = (len - headBcopyLen) & L1_CACHE_MASK;
322 footBteLen = L1_CACHE_BYTES;
323
324 footBteSource = src + len - footBcopyLen;
325 footBcopyDest = dest + len - footBcopyLen;
326
327 if (footBcopyDest == (headBcopyDest + headBcopyLen)) {
328 /*
329 * We have two contigous bcopy
330 * blocks. Merge them.
331 */
332 headBcopyLen += footBcopyLen;
333 headBteLen += footBteLen;
334 } else if (footBcopyLen > 0) {
335 rv = bte_copy(footBteSource,
336 ia64_tpa((unsigned long)bteBlock),
337 footBteLen, mode, NULL);
338 if (rv != BTE_SUCCESS) {
339 kfree(bteBlock_unaligned);
340 return rv;
341 }
342
343 memcpy(__va(footBcopyDest),
344 (char *)bteBlock, footBcopyLen);
345 }
346 } else {
347 footBcopyLen = 0;
348 footBteLen = 0;
349 }
350
351 if (len > (headBcopyLen + footBcopyLen)) {
352 /* now transfer the middle. */
353 rv = bte_copy((src + headBcopyLen),
354 (dest +
355 headBcopyLen),
356 (len - headBcopyLen -
357 footBcopyLen), mode, NULL);
358 if (rv != BTE_SUCCESS) {
359 kfree(bteBlock_unaligned);
360 return rv;
361 }
362
363 }
364 } else {
365
366 /*
367 * The transfer is not symetric, we will
368 * allocate a buffer large enough for all the
369 * data, bte_copy into that buffer and then
370 * bcopy to the destination.
371 */
372
373 /* Add the leader from source */
374 headBteLen = len + (src & L1_CACHE_MASK);
375 /* Add the trailing bytes from footer. */
376 headBteLen += L1_CACHE_BYTES - (headBteLen & L1_CACHE_MASK);
377 headBteSource = src & ~L1_CACHE_MASK;
378 headBcopySrcOffset = src & L1_CACHE_MASK;
379 headBcopyDest = dest;
380 headBcopyLen = len;
381 }
382
383 if (headBcopyLen > 0) {
384 rv = bte_copy(headBteSource,
385 ia64_tpa((unsigned long)bteBlock), headBteLen,
386 mode, NULL);
387 if (rv != BTE_SUCCESS) {
388 kfree(bteBlock_unaligned);
389 return rv;
390 }
391
392 memcpy(__va(headBcopyDest), ((char *)bteBlock +
393 headBcopySrcOffset), headBcopyLen);
394 }
395 kfree(bteBlock_unaligned);
396 return BTE_SUCCESS;
397}
398
399EXPORT_SYMBOL(bte_unaligned_copy);
400
401/************************************************************************
402 * Block Transfer Engine initialization functions.
403 *
404 ***********************************************************************/
405
406/*
407 * bte_init_node(nodepda, cnode)
408 *
409 * Initialize the nodepda structure with BTE base addresses and
410 * spinlocks.
411 */
412void bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode)
413{
414 int i;
415
416 /*
417 * Indicate that all the block transfer engines on this node
418 * are available.
419 */
420
421 /*
422 * Allocate one bte_recover_t structure per node. It holds
423 * the recovery lock for node. All the bte interface structures
424 * will point at this one bte_recover structure to get the lock.
425 */
426 spin_lock_init(&mynodepda->bte_recovery_lock);
427 init_timer(&mynodepda->bte_recovery_timer);
428 mynodepda->bte_recovery_timer.function = bte_error_handler;
429 mynodepda->bte_recovery_timer.data = (unsigned long)mynodepda;
430
431 for (i = 0; i < BTES_PER_NODE; i++) {
432 /* Which link status register should we use? */
433 unsigned long link_status = (i == 0 ? IIO_IBLS0 : IIO_IBLS1);
434 mynodepda->bte_if[i].bte_base_addr = (u64 *)
435 REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode), link_status);
436
437 /*
438 * Initialize the notification and spinlock
439 * so the first transfer can occur.
440 */
441 mynodepda->bte_if[i].most_rcnt_na =
442 &(mynodepda->bte_if[i].notify);
443 mynodepda->bte_if[i].notify = BTE_WORD_AVAILABLE;
444 spin_lock_init(&mynodepda->bte_if[i].spinlock);
445
446 mynodepda->bte_if[i].bte_cnode = cnode;
447 mynodepda->bte_if[i].bte_error_count = 0;
448 mynodepda->bte_if[i].bte_num = i;
449 mynodepda->bte_if[i].cleanup_active = 0;
450 mynodepda->bte_if[i].bh_error = 0;
451 }
452
453}
diff --git a/arch/ia64/sn/kernel/bte_error.c b/arch/ia64/sn/kernel/bte_error.c
new file mode 100644
index 000000000000..fd104312c6bd
--- /dev/null
+++ b/arch/ia64/sn/kernel/bte_error.c
@@ -0,0 +1,198 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9#include <linux/types.h>
10#include <asm/sn/sn_sal.h>
11#include "ioerror.h"
12#include <asm/sn/addrs.h>
13#include <asm/sn/shubio.h>
14#include <asm/sn/geo.h>
15#include "xtalk/xwidgetdev.h"
16#include "xtalk/hubdev.h"
17#include <asm/sn/bte.h>
18#include <asm/param.h>
19
20/*
21 * Bte error handling is done in two parts. The first captures
22 * any crb related errors. Since there can be multiple crbs per
23 * interface and multiple interfaces active, we need to wait until
24 * all active crbs are completed. This is the first job of the
25 * second part error handler. When all bte related CRBs are cleanly
26 * completed, it resets the interfaces and gets them ready for new
27 * transfers to be queued.
28 */
29
30void bte_error_handler(unsigned long);
31
32/*
33 * Wait until all BTE related CRBs are completed
34 * and then reset the interfaces.
35 */
36void bte_error_handler(unsigned long _nodepda)
37{
38 struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
39 spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
40 struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer;
41 nasid_t nasid;
42 int i;
43 int valid_crbs;
44 unsigned long irq_flags;
45 volatile u64 *notify;
46 bte_result_t bh_error;
47 ii_imem_u_t imem; /* II IMEM Register */
48 ii_icrb0_d_u_t icrbd; /* II CRB Register D */
49 ii_ibcr_u_t ibcr;
50 ii_icmr_u_t icmr;
51 ii_ieclr_u_t ieclr;
52
53 BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda,
54 smp_processor_id()));
55
56 spin_lock_irqsave(recovery_lock, irq_flags);
57
58 if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) &&
59 (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) {
60 BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda,
61 smp_processor_id()));
62 spin_unlock_irqrestore(recovery_lock, irq_flags);
63 return;
64 }
65 /*
66 * Lock all interfaces on this node to prevent new transfers
67 * from being queued.
68 */
69 for (i = 0; i < BTES_PER_NODE; i++) {
70 if (err_nodepda->bte_if[i].cleanup_active) {
71 continue;
72 }
73 spin_lock(&err_nodepda->bte_if[i].spinlock);
74 BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda,
75 smp_processor_id(), i));
76 err_nodepda->bte_if[i].cleanup_active = 1;
77 }
78
79 /* Determine information about our hub */
80 nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
81
82 /*
83 * A BTE transfer can use multiple CRBs. We need to make sure
84 * that all the BTE CRBs are complete (or timed out) before
85 * attempting to clean up the error. Resetting the BTE while
86 * there are still BTE CRBs active will hang the BTE.
87 * We should look at all the CRBs to see if they are allocated
88 * to the BTE and see if they are still active. When none
89 * are active, we can continue with the cleanup.
90 *
91 * We also want to make sure that the local NI port is up.
92 * When a router resets the NI port can go down, while it
93 * goes through the LLP handshake, but then comes back up.
94 */
95 icmr.ii_icmr_regval = REMOTE_HUB_L(nasid, IIO_ICMR);
96 if (icmr.ii_icmr_fld_s.i_crb_mark != 0) {
97 /*
98 * There are errors which still need to be cleaned up by
99 * hubiio_crb_error_handler
100 */
101 mod_timer(recovery_timer, HZ * 5);
102 BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
103 smp_processor_id()));
104 spin_unlock_irqrestore(recovery_lock, irq_flags);
105 return;
106 }
107 if (icmr.ii_icmr_fld_s.i_crb_vld != 0) {
108
109 valid_crbs = icmr.ii_icmr_fld_s.i_crb_vld;
110
111 for (i = 0; i < IIO_NUM_CRBS; i++) {
112 if (!((1 << i) & valid_crbs)) {
113 /* This crb was not marked as valid, ignore */
114 continue;
115 }
116 icrbd.ii_icrb0_d_regval =
117 REMOTE_HUB_L(nasid, IIO_ICRB_D(i));
118 if (icrbd.d_bteop) {
119 mod_timer(recovery_timer, HZ * 5);
120 BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n",
121 err_nodepda, smp_processor_id(),
122 i));
123 spin_unlock_irqrestore(recovery_lock,
124 irq_flags);
125 return;
126 }
127 }
128 }
129
130 BTE_PRINTK(("eh:%p:%d Cleaning up\n", err_nodepda, smp_processor_id()));
131 /* Reenable both bte interfaces */
132 imem.ii_imem_regval = REMOTE_HUB_L(nasid, IIO_IMEM);
133 imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1;
134 REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval);
135
136 /* Clear BTE0/1 error bits */
137 ieclr.ii_ieclr_regval = 0;
138 if (err_nodepda->bte_if[0].bh_error != BTE_SUCCESS)
139 ieclr.ii_ieclr_fld_s.i_e_bte_0 = 1;
140 if (err_nodepda->bte_if[1].bh_error != BTE_SUCCESS)
141 ieclr.ii_ieclr_fld_s.i_e_bte_1 = 1;
142 REMOTE_HUB_S(nasid, IIO_IECLR, ieclr.ii_ieclr_regval);
143
144 /* Reinitialize both BTE state machines. */
145 ibcr.ii_ibcr_regval = REMOTE_HUB_L(nasid, IIO_IBCR);
146 ibcr.ii_ibcr_fld_s.i_soft_reset = 1;
147 REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval);
148
149 for (i = 0; i < BTES_PER_NODE; i++) {
150 bh_error = err_nodepda->bte_if[i].bh_error;
151 if (bh_error != BTE_SUCCESS) {
152 /* There is an error which needs to be notified */
153 notify = err_nodepda->bte_if[i].most_rcnt_na;
154 BTE_PRINTK(("cnode %d bte %d error=0x%lx\n",
155 err_nodepda->bte_if[i].bte_cnode,
156 err_nodepda->bte_if[i].bte_num,
157 IBLS_ERROR | (u64) bh_error));
158 *notify = IBLS_ERROR | bh_error;
159 err_nodepda->bte_if[i].bh_error = BTE_SUCCESS;
160 }
161
162 err_nodepda->bte_if[i].cleanup_active = 0;
163 BTE_PRINTK(("eh:%p:%d Unlocked %d\n", err_nodepda,
164 smp_processor_id(), i));
165 spin_unlock(&err_nodepda->bte_if[i].spinlock);
166 }
167
168 del_timer(recovery_timer);
169
170 spin_unlock_irqrestore(recovery_lock, irq_flags);
171}
172
173/*
174 * First part error handler. This is called whenever any error CRB interrupt
175 * is generated by the II.
176 */
177void
178bte_crb_error_handler(cnodeid_t cnode, int btenum,
179 int crbnum, ioerror_t * ioe, int bteop)
180{
181 struct bteinfo_s *bte;
182
183
184 bte = &(NODEPDA(cnode)->bte_if[btenum]);
185
186 /*
187 * The caller has already figured out the error type, we save that
188 * in the bte handle structure for the thread excercising the
189 * interface to consume.
190 */
191 bte->bh_error = ioe->ie_errortype + BTEFAIL_OFFSET;
192 bte->bte_error_count++;
193
194 BTE_PRINTK(("Got an error on cnode %d bte %d: HW error type 0x%x\n",
195 bte->bte_cnode, bte->bte_num, ioe->ie_errortype));
196 bte_error_handler((unsigned long) NODEPDA(cnode));
197}
198
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c
new file mode 100644
index 000000000000..2bdf684c5066
--- /dev/null
+++ b/arch/ia64/sn/kernel/huberror.c
@@ -0,0 +1,201 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 - 1997, 2000,2002-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <linux/types.h>
10#include <linux/interrupt.h>
11#include <linux/pci.h>
12#include <asm/delay.h>
13#include <asm/sn/sn_sal.h>
14#include "ioerror.h"
15#include <asm/sn/addrs.h>
16#include <asm/sn/shubio.h>
17#include <asm/sn/geo.h>
18#include "xtalk/xwidgetdev.h"
19#include "xtalk/hubdev.h"
20#include <asm/sn/bte.h>
21
22void hubiio_crb_error_handler(struct hubdev_info *hubdev_info);
23extern void bte_crb_error_handler(cnodeid_t, int, int, ioerror_t *,
24 int);
25static irqreturn_t hub_eint_handler(int irq, void *arg, struct pt_regs *ep)
26{
27 struct hubdev_info *hubdev_info;
28 struct ia64_sal_retval ret_stuff;
29 nasid_t nasid;
30
31 ret_stuff.status = 0;
32 ret_stuff.v0 = 0;
33 hubdev_info = (struct hubdev_info *)arg;
34 nasid = hubdev_info->hdi_nasid;
35 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT,
36 (u64) nasid, 0, 0, 0, 0, 0, 0);
37
38 if ((int)ret_stuff.v0)
39 panic("hubii_eint_handler(): Fatal TIO Error");
40
41 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
42 (void)hubiio_crb_error_handler(hubdev_info);
43
44 return IRQ_HANDLED;
45}
46
47/*
48 * Free the hub CRB "crbnum" which encountered an error.
49 * Assumption is, error handling was successfully done,
50 * and we now want to return the CRB back to Hub for normal usage.
51 *
52 * In order to free the CRB, all that's needed is to de-allocate it
53 *
54 * Assumption:
55 * No other processor is mucking around with the hub control register.
56 * So, upper layer has to single thread this.
57 */
58void hubiio_crb_free(struct hubdev_info *hubdev_info, int crbnum)
59{
60 ii_icrb0_b_u_t icrbb;
61
62 /*
63 * The hardware does NOT clear the mark bit, so it must get cleared
64 * here to be sure the error is not processed twice.
65 */
66 icrbb.ii_icrb0_b_regval = REMOTE_HUB_L(hubdev_info->hdi_nasid,
67 IIO_ICRB_B(crbnum));
68 icrbb.b_mark = 0;
69 REMOTE_HUB_S(hubdev_info->hdi_nasid, IIO_ICRB_B(crbnum),
70 icrbb.ii_icrb0_b_regval);
71 /*
72 * Deallocate the register wait till hub indicates it's done.
73 */
74 REMOTE_HUB_S(hubdev_info->hdi_nasid, IIO_ICDR, (IIO_ICDR_PND | crbnum));
75 while (REMOTE_HUB_L(hubdev_info->hdi_nasid, IIO_ICDR) & IIO_ICDR_PND)
76 udelay(1);
77
78}
79
80/*
81 * hubiio_crb_error_handler
82 *
83 * This routine gets invoked when a hub gets an error
84 * interrupt. So, the routine is running in interrupt context
85 * at error interrupt level.
86 * Action:
87 * It's responsible for identifying ALL the CRBs that are marked
88 * with error, and process them.
89 *
90 * If you find the CRB that's marked with error, map this to the
91 * reason it caused error, and invoke appropriate error handler.
92 *
93 * XXX Be aware of the information in the context register.
94 *
95 * NOTE:
96 * Use REMOTE_HUB_* macro instead of LOCAL_HUB_* so that the interrupt
97 * handler can be run on any node. (not necessarily the node
98 * corresponding to the hub that encountered error).
99 */
100
101void hubiio_crb_error_handler(struct hubdev_info *hubdev_info)
102{
103 nasid_t nasid;
104 ii_icrb0_a_u_t icrba; /* II CRB Register A */
105 ii_icrb0_b_u_t icrbb; /* II CRB Register B */
106 ii_icrb0_c_u_t icrbc; /* II CRB Register C */
107 ii_icrb0_d_u_t icrbd; /* II CRB Register D */
108 ii_icrb0_e_u_t icrbe; /* II CRB Register D */
109 int i;
110 int num_errors = 0; /* Num of errors handled */
111 ioerror_t ioerror;
112
113 nasid = hubdev_info->hdi_nasid;
114
115 /*
116 * XXX - Add locking for any recovery actions
117 */
118 /*
119 * Scan through all CRBs in the Hub, and handle the errors
120 * in any of the CRBs marked.
121 */
122 for (i = 0; i < IIO_NUM_CRBS; i++) {
123 /* Check this crb entry to see if it is in error. */
124 icrbb.ii_icrb0_b_regval = REMOTE_HUB_L(nasid, IIO_ICRB_B(i));
125
126 if (icrbb.b_mark == 0) {
127 continue;
128 }
129
130 icrba.ii_icrb0_a_regval = REMOTE_HUB_L(nasid, IIO_ICRB_A(i));
131
132 IOERROR_INIT(&ioerror);
133
134 /* read other CRB error registers. */
135 icrbc.ii_icrb0_c_regval = REMOTE_HUB_L(nasid, IIO_ICRB_C(i));
136 icrbd.ii_icrb0_d_regval = REMOTE_HUB_L(nasid, IIO_ICRB_D(i));
137 icrbe.ii_icrb0_e_regval = REMOTE_HUB_L(nasid, IIO_ICRB_E(i));
138
139 IOERROR_SETVALUE(&ioerror, errortype, icrbb.b_ecode);
140
141 /* Check if this error is due to BTE operation,
142 * and handle it separately.
143 */
144 if (icrbd.d_bteop ||
145 ((icrbb.b_initiator == IIO_ICRB_INIT_BTE0 ||
146 icrbb.b_initiator == IIO_ICRB_INIT_BTE1) &&
147 (icrbb.b_imsgtype == IIO_ICRB_IMSGT_BTE ||
148 icrbb.b_imsgtype == IIO_ICRB_IMSGT_SN1NET))) {
149
150 int bte_num;
151
152 if (icrbd.d_bteop)
153 bte_num = icrbc.c_btenum;
154 else /* b_initiator bit 2 gives BTE number */
155 bte_num = (icrbb.b_initiator & 0x4) >> 2;
156
157 hubiio_crb_free(hubdev_info, i);
158
159 bte_crb_error_handler(nasid_to_cnodeid(nasid), bte_num,
160 i, &ioerror, icrbd.d_bteop);
161 num_errors++;
162 continue;
163 }
164 }
165}
166
167/*
168 * Function : hub_error_init
169 * Purpose : initialize the error handling requirements for a given hub.
170 * Parameters : cnode, the compact nodeid.
171 * Assumptions : Called only once per hub, either by a local cpu. Or by a
172 * remote cpu, when this hub is headless.(cpuless)
173 * Returns : None
174 */
175void hub_error_init(struct hubdev_info *hubdev_info)
176{
177 if (request_irq(SGI_II_ERROR, (void *)hub_eint_handler, SA_SHIRQ,
178 "SN_hub_error", (void *)hubdev_info))
179 printk("hub_error_init: Failed to request_irq for 0x%p\n",
180 hubdev_info);
181 return;
182}
183
184
185/*
186 * Function : ice_error_init
187 * Purpose : initialize the error handling requirements for a given tio.
188 * Parameters : cnode, the compact nodeid.
189 * Assumptions : Called only once per tio.
190 * Returns : None
191 */
192void ice_error_init(struct hubdev_info *hubdev_info)
193{
194 if (request_irq
195 (SGI_TIO_ERROR, (void *)hub_eint_handler, SA_SHIRQ, "SN_TIO_error",
196 (void *)hubdev_info))
197 printk("ice_error_init: request_irq() error hubdev_info 0x%p\n",
198 hubdev_info);
199 return;
200}
201
diff --git a/arch/ia64/sn/kernel/idle.c b/arch/ia64/sn/kernel/idle.c
new file mode 100644
index 000000000000..49d178f022b5
--- /dev/null
+++ b/arch/ia64/sn/kernel/idle.c
@@ -0,0 +1,30 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2001-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <asm/sn/leds.h>
10
11void snidle(int state)
12{
13 if (state) {
14 if (pda->idle_flag == 0) {
15 /*
16 * Turn the activity LED off.
17 */
18 set_led_bits(0, LED_CPU_ACTIVITY);
19 }
20
21 pda->idle_flag = 1;
22 } else {
23 /*
24 * Turn the activity LED on.
25 */
26 set_led_bits(LED_CPU_ACTIVITY, LED_CPU_ACTIVITY);
27
28 pda->idle_flag = 0;
29 }
30}
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
new file mode 100644
index 000000000000..001880812b7c
--- /dev/null
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -0,0 +1,411 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <linux/bootmem.h>
10#include <linux/nodemask.h>
11#include <asm/sn/types.h>
12#include <asm/sn/sn_sal.h>
13#include <asm/sn/addrs.h>
14#include "pci/pcibus_provider_defs.h"
15#include "pci/pcidev.h"
16#include "pci/pcibr_provider.h"
17#include "xtalk/xwidgetdev.h"
18#include <asm/sn/geo.h>
19#include "xtalk/hubdev.h"
20#include <asm/sn/io.h>
21#include <asm/sn/simulator.h>
22
23char master_baseio_wid;
24nasid_t master_nasid = INVALID_NASID; /* Partition Master */
25
26struct slab_info {
27 struct hubdev_info hubdev;
28};
29
30struct brick {
31 moduleid_t id; /* Module ID of this module */
32 struct slab_info slab_info[MAX_SLABS + 1];
33};
34
35int sn_ioif_inited = 0; /* SN I/O infrastructure initialized? */
36
37/*
38 * Retrieve the DMA Flush List given nasid. This list is needed
39 * to implement the WAR - Flush DMA data on PIO Reads.
40 */
41static inline uint64_t
42sal_get_widget_dmaflush_list(u64 nasid, u64 widget_num, u64 address)
43{
44
45 struct ia64_sal_retval ret_stuff;
46 ret_stuff.status = 0;
47 ret_stuff.v0 = 0;
48
49 SAL_CALL_NOLOCK(ret_stuff,
50 (u64) SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
51 (u64) nasid, (u64) widget_num, (u64) address, 0, 0, 0,
52 0);
53 return ret_stuff.v0;
54
55}
56
57/*
58 * Retrieve the hub device info structure for the given nasid.
59 */
60static inline uint64_t sal_get_hubdev_info(u64 handle, u64 address)
61{
62
63 struct ia64_sal_retval ret_stuff;
64 ret_stuff.status = 0;
65 ret_stuff.v0 = 0;
66
67 SAL_CALL_NOLOCK(ret_stuff,
68 (u64) SN_SAL_IOIF_GET_HUBDEV_INFO,
69 (u64) handle, (u64) address, 0, 0, 0, 0, 0);
70 return ret_stuff.v0;
71}
72
73/*
74 * Retrieve the pci bus information given the bus number.
75 */
76static inline uint64_t sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
77{
78
79 struct ia64_sal_retval ret_stuff;
80 ret_stuff.status = 0;
81 ret_stuff.v0 = 0;
82
83 SAL_CALL_NOLOCK(ret_stuff,
84 (u64) SN_SAL_IOIF_GET_PCIBUS_INFO,
85 (u64) segment, (u64) busnum, (u64) address, 0, 0, 0, 0);
86 return ret_stuff.v0;
87}
88
89/*
90 * Retrieve the pci device information given the bus and device|function number.
91 */
92static inline uint64_t
93sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
94 u64 sn_irq_info)
95{
96 struct ia64_sal_retval ret_stuff;
97 ret_stuff.status = 0;
98 ret_stuff.v0 = 0;
99
100 SAL_CALL_NOLOCK(ret_stuff,
101 (u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
102 (u64) segment, (u64) bus_number, (u64) devfn,
103 (u64) pci_dev,
104 sn_irq_info, 0, 0);
105 return ret_stuff.v0;
106}
107
108/*
109 * sn_alloc_pci_sysdata() - This routine allocates a pci controller
110 * which is expected as the pci_dev and pci_bus sysdata by the Linux
111 * PCI infrastructure.
112 */
113static inline struct pci_controller *sn_alloc_pci_sysdata(void)
114{
115 struct pci_controller *pci_sysdata;
116
117 pci_sysdata = kmalloc(sizeof(*pci_sysdata), GFP_KERNEL);
118 if (!pci_sysdata)
119 BUG();
120
121 memset(pci_sysdata, 0, sizeof(*pci_sysdata));
122 return pci_sysdata;
123}
124
125/*
126 * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for
127 * each node in the system.
128 */
129static void sn_fixup_ionodes(void)
130{
131
132 struct sn_flush_device_list *sn_flush_device_list;
133 struct hubdev_info *hubdev;
134 uint64_t status;
135 uint64_t nasid;
136 int i, widget;
137
138 for (i = 0; i < numionodes; i++) {
139 hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo);
140 nasid = cnodeid_to_nasid(i);
141 status = sal_get_hubdev_info(nasid, (uint64_t) __pa(hubdev));
142 if (status)
143 continue;
144
145 for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
146 hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
147
148 if (!hubdev->hdi_flush_nasid_list.widget_p)
149 continue;
150
151 hubdev->hdi_flush_nasid_list.widget_p =
152 kmalloc((HUB_WIDGET_ID_MAX + 1) *
153 sizeof(struct sn_flush_device_list *), GFP_KERNEL);
154
155 memset(hubdev->hdi_flush_nasid_list.widget_p, 0x0,
156 (HUB_WIDGET_ID_MAX + 1) *
157 sizeof(struct sn_flush_device_list *));
158
159 for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
160 sn_flush_device_list = kmalloc(DEV_PER_WIDGET *
161 sizeof(struct
162 sn_flush_device_list),
163 GFP_KERNEL);
164 memset(sn_flush_device_list, 0x0,
165 DEV_PER_WIDGET *
166 sizeof(struct sn_flush_device_list));
167
168 status =
169 sal_get_widget_dmaflush_list(nasid, widget,
170 (uint64_t)
171 __pa
172 (sn_flush_device_list));
173 if (status) {
174 kfree(sn_flush_device_list);
175 continue;
176 }
177
178 hubdev->hdi_flush_nasid_list.widget_p[widget] =
179 sn_flush_device_list;
180 }
181
182 if (!(i & 1))
183 hub_error_init(hubdev);
184 else
185 ice_error_init(hubdev);
186 }
187
188}
189
190/*
191 * sn_pci_fixup_slot() - This routine sets up a slot's resources
192 * consistent with the Linux PCI abstraction layer. Resources acquired
193 * from our PCI provider include PIO maps to BAR space and interrupt
194 * objects.
195 */
196static void sn_pci_fixup_slot(struct pci_dev *dev)
197{
198 int idx;
199 int segment = 0;
200 uint64_t size;
201 struct sn_irq_info *sn_irq_info;
202 struct pci_dev *host_pci_dev;
203 int status = 0;
204
205 dev->sysdata = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL);
206 if (SN_PCIDEV_INFO(dev) <= 0)
207 BUG(); /* Cannot afford to run out of memory */
208 memset(SN_PCIDEV_INFO(dev), 0, sizeof(struct pcidev_info));
209
210 sn_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
211 if (sn_irq_info <= 0)
212 BUG(); /* Cannot afford to run out of memory */
213 memset(sn_irq_info, 0, sizeof(struct sn_irq_info));
214
215 /* Call to retrieve pci device information needed by kernel. */
216 status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number,
217 dev->devfn,
218 (u64) __pa(SN_PCIDEV_INFO(dev)),
219 (u64) __pa(sn_irq_info));
220 if (status)
221 BUG(); /* Cannot get platform pci device information information */
222
223 /* Copy over PIO Mapped Addresses */
224 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
225 unsigned long start, end, addr;
226
227 if (!SN_PCIDEV_INFO(dev)->pdi_pio_mapped_addr[idx])
228 continue;
229
230 start = dev->resource[idx].start;
231 end = dev->resource[idx].end;
232 size = end - start;
233 addr = SN_PCIDEV_INFO(dev)->pdi_pio_mapped_addr[idx];
234 addr = ((addr << 4) >> 4) | __IA64_UNCACHED_OFFSET;
235 dev->resource[idx].start = addr;
236 dev->resource[idx].end = addr + size;
237 if (dev->resource[idx].flags & IORESOURCE_IO)
238 dev->resource[idx].parent = &ioport_resource;
239 else
240 dev->resource[idx].parent = &iomem_resource;
241 }
242
243 /* set up host bus linkages */
244 host_pci_dev =
245 pci_find_slot(SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32,
246 SN_PCIDEV_INFO(dev)->
247 pdi_slot_host_handle & 0xffffffff);
248 SN_PCIDEV_INFO(dev)->pdi_host_pcidev_info =
249 SN_PCIDEV_INFO(host_pci_dev);
250 SN_PCIDEV_INFO(dev)->pdi_linux_pcidev = dev;
251 SN_PCIDEV_INFO(dev)->pdi_pcibus_info = SN_PCIBUS_BUSSOFT(dev->bus);
252
253 /* Only set up IRQ stuff if this device has a host bus context */
254 if (SN_PCIDEV_BUSSOFT(dev) && sn_irq_info->irq_irq) {
255 SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = sn_irq_info;
256 dev->irq = SN_PCIDEV_INFO(dev)->pdi_sn_irq_info->irq_irq;
257 sn_irq_fixup(dev, sn_irq_info);
258 }
259}
260
261/*
262 * sn_pci_controller_fixup() - This routine sets up a bus's resources
263 * consistent with the Linux PCI abstraction layer.
264 */
265static void sn_pci_controller_fixup(int segment, int busnum)
266{
267 int status = 0;
268 int nasid, cnode;
269 struct pci_bus *bus;
270 struct pci_controller *controller;
271 struct pcibus_bussoft *prom_bussoft_ptr;
272 struct hubdev_info *hubdev_info;
273 void *provider_soft;
274
275 status =
276 sal_get_pcibus_info((u64) segment, (u64) busnum,
277 (u64) ia64_tpa(&prom_bussoft_ptr));
278 if (status > 0) {
279 return; /* bus # does not exist */
280 }
281
282 prom_bussoft_ptr = __va(prom_bussoft_ptr);
283 controller = sn_alloc_pci_sysdata();
284 /* controller non-zero is BUG'd in sn_alloc_pci_sysdata */
285
286 bus = pci_scan_bus(busnum, &pci_root_ops, controller);
287 if (bus == NULL) {
288 return; /* error, or bus already scanned */
289 }
290
291 /*
292 * Per-provider fixup. Copies the contents from prom to local
293 * area and links SN_PCIBUS_BUSSOFT().
294 *
295 * Note: Provider is responsible for ensuring that prom_bussoft_ptr
296 * represents an asic-type that it can handle.
297 */
298
299 if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB) {
300 return; /* no further fixup necessary */
301 }
302
303 provider_soft = pcibr_bus_fixup(prom_bussoft_ptr);
304 if (provider_soft == NULL) {
305 return; /* fixup failed or not applicable */
306 }
307
308 /*
309 * Generic bus fixup goes here. Don't reference prom_bussoft_ptr
310 * after this point.
311 */
312
313 bus->sysdata = controller;
314 PCI_CONTROLLER(bus)->platform_data = provider_soft;
315
316 nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base);
317 cnode = nasid_to_cnodeid(nasid);
318 hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
319 SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info =
320 &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]);
321}
322
323/*
324 * Ugly hack to get PCI setup until we have a proper ACPI namespace.
325 */
326
327#define PCI_BUSES_TO_SCAN 256
328
329static int __init sn_pci_init(void)
330{
331 int i = 0;
332 struct pci_dev *pci_dev = NULL;
333 extern void sn_init_cpei_timer(void);
334#ifdef CONFIG_PROC_FS
335 extern void register_sn_procfs(void);
336#endif
337
338 if (!ia64_platform_is("sn2") || IS_RUNNING_ON_SIMULATOR())
339 return 0;
340
341 /*
342 * This is needed to avoid bounce limit checks in the blk layer
343 */
344 ia64_max_iommu_merge_mask = ~PAGE_MASK;
345 sn_fixup_ionodes();
346 sn_irq = kmalloc(sizeof(struct sn_irq_info *) * NR_IRQS, GFP_KERNEL);
347 if (sn_irq <= 0)
348 BUG(); /* Canno afford to run out of memory. */
349 memset(sn_irq, 0, sizeof(struct sn_irq_info *) * NR_IRQS);
350
351 sn_init_cpei_timer();
352
353#ifdef CONFIG_PROC_FS
354 register_sn_procfs();
355#endif
356
357 for (i = 0; i < PCI_BUSES_TO_SCAN; i++) {
358 sn_pci_controller_fixup(0, i);
359 }
360
361 /*
362 * Generic Linux PCI Layer has created the pci_bus and pci_dev
363 * structures - time for us to add our SN PLatform specific
364 * information.
365 */
366
367 while ((pci_dev =
368 pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL) {
369 sn_pci_fixup_slot(pci_dev);
370 }
371
372 sn_ioif_inited = 1; /* sn I/O infrastructure now initialized */
373
374 return 0;
375}
376
377/*
378 * hubdev_init_node() - Creates the HUB data structure and link them to it's
379 * own NODE specific data area.
380 */
381void hubdev_init_node(nodepda_t * npda, cnodeid_t node)
382{
383
384 struct hubdev_info *hubdev_info;
385
386 if (node >= num_online_nodes()) /* Headless/memless IO nodes */
387 hubdev_info =
388 (struct hubdev_info *)alloc_bootmem_node(NODE_DATA(0),
389 sizeof(struct
390 hubdev_info));
391 else
392 hubdev_info =
393 (struct hubdev_info *)alloc_bootmem_node(NODE_DATA(node),
394 sizeof(struct
395 hubdev_info));
396 npda->pdinfo = (void *)hubdev_info;
397
398}
399
400geoid_t
401cnodeid_get_geoid(cnodeid_t cnode)
402{
403
404 struct hubdev_info *hubdev;
405
406 hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
407 return hubdev->hdi_geoid;
408
409}
410
411subsys_initcall(sn_pci_init);
diff --git a/arch/ia64/sn/kernel/iomv.c b/arch/ia64/sn/kernel/iomv.c
new file mode 100644
index 000000000000..fec6d8b8237b
--- /dev/null
+++ b/arch/ia64/sn/kernel/iomv.c
@@ -0,0 +1,70 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <linux/module.h>
10#include <asm/io.h>
11#include <asm/delay.h>
12#include <asm/sn/nodepda.h>
13#include <asm/sn/simulator.h>
14#include <asm/sn/pda.h>
15#include <asm/sn/sn_cpuid.h>
16#include <asm/sn/shub_mmr.h>
17
18/**
19 * sn_io_addr - convert an in/out port to an i/o address
20 * @port: port to convert
21 *
22 * Legacy in/out instructions are converted to ld/st instructions
23 * on IA64. This routine will convert a port number into a valid
24 * SN i/o address. Used by sn_in*() and sn_out*().
25 */
26void *sn_io_addr(unsigned long port)
27{
28 if (!IS_RUNNING_ON_SIMULATOR()) {
29 /* On sn2, legacy I/O ports don't point at anything */
30 if (port < (64 * 1024))
31 return NULL;
32 return ((void *)(port | __IA64_UNCACHED_OFFSET));
33 } else {
34 /* but the simulator uses them... */
35 unsigned long addr;
36
37 /*
38 * word align port, but need more than 10 bits
39 * for accessing registers in bedrock local block
40 * (so we don't do port&0xfff)
41 */
42 addr = (is_shub2() ? 0xc00000028c000000UL : 0xc0000087cc000000UL) | ((port >> 2) << 12);
43 if ((port >= 0x1f0 && port <= 0x1f7) || port == 0x3f6 || port == 0x3f7)
44 addr |= port;
45 return (void *)addr;
46 }
47}
48
49EXPORT_SYMBOL(sn_io_addr);
50
51/**
52 * __sn_mmiowb - I/O space memory barrier
53 *
54 * See include/asm-ia64/io.h and Documentation/DocBook/deviceiobook.tmpl
55 * for details.
56 *
57 * On SN2, we wait for the PIO_WRITE_STATUS SHub register to clear.
58 * See PV 871084 for details about the WAR about zero value.
59 *
60 */
61void __sn_mmiowb(void)
62{
63 volatile unsigned long *adr = pda->pio_write_status_addr;
64 unsigned long val = pda->pio_write_status_val;
65
66 while ((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != val)
67 cpu_relax();
68}
69
70EXPORT_SYMBOL(__sn_mmiowb);
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
new file mode 100644
index 000000000000..3be44724f6c8
--- /dev/null
+++ b/arch/ia64/sn/kernel/irq.c
@@ -0,0 +1,431 @@
1/*
2 * Platform dependent support for SGI SN
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
9 */
10
11#include <linux/irq.h>
12#include <asm/sn/intr.h>
13#include <asm/sn/addrs.h>
14#include <asm/sn/arch.h>
15#include "xtalk/xwidgetdev.h"
16#include "pci/pcibus_provider_defs.h"
17#include "pci/pcidev.h"
18#include "pci/pcibr_provider.h"
19#include <asm/sn/shub_mmr.h>
20#include <asm/sn/sn_sal.h>
21
22static void force_interrupt(int irq);
23static void register_intr_pda(struct sn_irq_info *sn_irq_info);
24static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
25
26extern int sn_force_interrupt_flag;
27extern int sn_ioif_inited;
28struct sn_irq_info **sn_irq;
29
30static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget,
31 u64 sn_irq_info,
32 int req_irq, nasid_t req_nasid,
33 int req_slice)
34{
35 struct ia64_sal_retval ret_stuff;
36 ret_stuff.status = 0;
37 ret_stuff.v0 = 0;
38
39 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
40 (u64) SAL_INTR_ALLOC, (u64) local_nasid,
41 (u64) local_widget, (u64) sn_irq_info, (u64) req_irq,
42 (u64) req_nasid, (u64) req_slice);
43 return ret_stuff.status;
44}
45
46static inline void sn_intr_free(nasid_t local_nasid, int local_widget,
47 struct sn_irq_info *sn_irq_info)
48{
49 struct ia64_sal_retval ret_stuff;
50 ret_stuff.status = 0;
51 ret_stuff.v0 = 0;
52
53 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
54 (u64) SAL_INTR_FREE, (u64) local_nasid,
55 (u64) local_widget, (u64) sn_irq_info->irq_irq,
56 (u64) sn_irq_info->irq_cookie, 0, 0);
57}
58
59static unsigned int sn_startup_irq(unsigned int irq)
60{
61 return 0;
62}
63
64static void sn_shutdown_irq(unsigned int irq)
65{
66}
67
68static void sn_disable_irq(unsigned int irq)
69{
70}
71
72static void sn_enable_irq(unsigned int irq)
73{
74}
75
76static void sn_ack_irq(unsigned int irq)
77{
78 uint64_t event_occurred, mask = 0;
79 int nasid;
80
81 irq = irq & 0xff;
82 nasid = get_nasid();
83 event_occurred =
84 HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED));
85 if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
86 mask |= (1 << SH_EVENT_OCCURRED_UART_INT_SHFT);
87 }
88 if (event_occurred & SH_EVENT_OCCURRED_IPI_INT_MASK) {
89 mask |= (1 << SH_EVENT_OCCURRED_IPI_INT_SHFT);
90 }
91 if (event_occurred & SH_EVENT_OCCURRED_II_INT0_MASK) {
92 mask |= (1 << SH_EVENT_OCCURRED_II_INT0_SHFT);
93 }
94 if (event_occurred & SH_EVENT_OCCURRED_II_INT1_MASK) {
95 mask |= (1 << SH_EVENT_OCCURRED_II_INT1_SHFT);
96 }
97 HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS),
98 mask);
99 __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
100
101 move_irq(irq);
102}
103
104static void sn_end_irq(unsigned int irq)
105{
106 int nasid;
107 int ivec;
108 uint64_t event_occurred;
109
110 ivec = irq & 0xff;
111 if (ivec == SGI_UART_VECTOR) {
112 nasid = get_nasid();
113 event_occurred = HUB_L((uint64_t *) GLOBAL_MMR_ADDR
114 (nasid, SH_EVENT_OCCURRED));
115 /* If the UART bit is set here, we may have received an
116 * interrupt from the UART that the driver missed. To
117 * make sure, we IPI ourselves to force us to look again.
118 */
119 if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
120 platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR,
121 IA64_IPI_DM_INT, 0);
122 }
123 }
124 __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs);
125 if (sn_force_interrupt_flag)
126 force_interrupt(irq);
127}
128
129static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
130{
131 struct sn_irq_info *sn_irq_info = sn_irq[irq];
132 struct sn_irq_info *tmp_sn_irq_info;
133 int cpuid, cpuphys;
134 nasid_t t_nasid; /* nasid to target */
135 int t_slice; /* slice to target */
136
137 /* allocate a temp sn_irq_info struct to get new target info */
138 tmp_sn_irq_info = kmalloc(sizeof(*tmp_sn_irq_info), GFP_KERNEL);
139 if (!tmp_sn_irq_info)
140 return;
141
142 cpuid = first_cpu(mask);
143 cpuphys = cpu_physical_id(cpuid);
144 t_nasid = cpuid_to_nasid(cpuid);
145 t_slice = cpuid_to_slice(cpuid);
146
147 while (sn_irq_info) {
148 int status;
149 int local_widget;
150 uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge;
151 nasid_t local_nasid = NASID_GET(bridge);
152
153 if (!bridge)
154 break; /* irq is not a device interrupt */
155
156 if (local_nasid & 1)
157 local_widget = TIO_SWIN_WIDGETNUM(bridge);
158 else
159 local_widget = SWIN_WIDGETNUM(bridge);
160
161 /* Free the old PROM sn_irq_info structure */
162 sn_intr_free(local_nasid, local_widget, sn_irq_info);
163
164 /* allocate a new PROM sn_irq_info struct */
165 status = sn_intr_alloc(local_nasid, local_widget,
166 __pa(tmp_sn_irq_info), irq, t_nasid,
167 t_slice);
168
169 if (status == 0) {
170 /* Update kernels sn_irq_info with new target info */
171 unregister_intr_pda(sn_irq_info);
172 sn_irq_info->irq_cpuid = cpuid;
173 sn_irq_info->irq_nasid = t_nasid;
174 sn_irq_info->irq_slice = t_slice;
175 sn_irq_info->irq_xtalkaddr =
176 tmp_sn_irq_info->irq_xtalkaddr;
177 sn_irq_info->irq_cookie = tmp_sn_irq_info->irq_cookie;
178 register_intr_pda(sn_irq_info);
179
180 if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type)) {
181 pcibr_change_devices_irq(sn_irq_info);
182 }
183
184 sn_irq_info = sn_irq_info->irq_next;
185
186#ifdef CONFIG_SMP
187 set_irq_affinity_info((irq & 0xff), cpuphys, 0);
188#endif
189 } else {
190 break; /* snp_affinity failed the intr_alloc */
191 }
192 }
193 kfree(tmp_sn_irq_info);
194}
195
196struct hw_interrupt_type irq_type_sn = {
197 "SN hub",
198 sn_startup_irq,
199 sn_shutdown_irq,
200 sn_enable_irq,
201 sn_disable_irq,
202 sn_ack_irq,
203 sn_end_irq,
204 sn_set_affinity_irq
205};
206
207unsigned int sn_local_vector_to_irq(u8 vector)
208{
209 return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector));
210}
211
212void sn_irq_init(void)
213{
214 int i;
215 irq_desc_t *base_desc = irq_desc;
216
217 for (i = 0; i < NR_IRQS; i++) {
218 if (base_desc[i].handler == &no_irq_type) {
219 base_desc[i].handler = &irq_type_sn;
220 }
221 }
222}
223
224static void register_intr_pda(struct sn_irq_info *sn_irq_info)
225{
226 int irq = sn_irq_info->irq_irq;
227 int cpu = sn_irq_info->irq_cpuid;
228
229 if (pdacpu(cpu)->sn_last_irq < irq) {
230 pdacpu(cpu)->sn_last_irq = irq;
231 }
232
233 if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) {
234 pdacpu(cpu)->sn_first_irq = irq;
235 }
236}
237
238static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
239{
240 int irq = sn_irq_info->irq_irq;
241 int cpu = sn_irq_info->irq_cpuid;
242 struct sn_irq_info *tmp_irq_info;
243 int i, foundmatch;
244
245 if (pdacpu(cpu)->sn_last_irq == irq) {
246 foundmatch = 0;
247 for (i = pdacpu(cpu)->sn_last_irq - 1; i; i--) {
248 tmp_irq_info = sn_irq[i];
249 while (tmp_irq_info) {
250 if (tmp_irq_info->irq_cpuid == cpu) {
251 foundmatch++;
252 break;
253 }
254 tmp_irq_info = tmp_irq_info->irq_next;
255 }
256 if (foundmatch) {
257 break;
258 }
259 }
260 pdacpu(cpu)->sn_last_irq = i;
261 }
262
263 if (pdacpu(cpu)->sn_first_irq == irq) {
264 foundmatch = 0;
265 for (i = pdacpu(cpu)->sn_first_irq + 1; i < NR_IRQS; i++) {
266 tmp_irq_info = sn_irq[i];
267 while (tmp_irq_info) {
268 if (tmp_irq_info->irq_cpuid == cpu) {
269 foundmatch++;
270 break;
271 }
272 tmp_irq_info = tmp_irq_info->irq_next;
273 }
274 if (foundmatch) {
275 break;
276 }
277 }
278 pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i);
279 }
280}
281
282struct sn_irq_info *sn_irq_alloc(nasid_t local_nasid, int local_widget, int irq,
283 nasid_t nasid, int slice)
284{
285 struct sn_irq_info *sn_irq_info;
286 int status;
287
288 sn_irq_info = kmalloc(sizeof(*sn_irq_info), GFP_KERNEL);
289 if (sn_irq_info == NULL)
290 return NULL;
291
292 memset(sn_irq_info, 0x0, sizeof(*sn_irq_info));
293
294 status =
295 sn_intr_alloc(local_nasid, local_widget, __pa(sn_irq_info), irq,
296 nasid, slice);
297
298 if (status) {
299 kfree(sn_irq_info);
300 return NULL;
301 } else {
302 return sn_irq_info;
303 }
304}
305
306void sn_irq_free(struct sn_irq_info *sn_irq_info)
307{
308 uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge;
309 nasid_t local_nasid = NASID_GET(bridge);
310 int local_widget;
311
312 if (local_nasid & 1) /* tio check */
313 local_widget = TIO_SWIN_WIDGETNUM(bridge);
314 else
315 local_widget = SWIN_WIDGETNUM(bridge);
316
317 sn_intr_free(local_nasid, local_widget, sn_irq_info);
318
319 kfree(sn_irq_info);
320}
321
322void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
323{
324 nasid_t nasid = sn_irq_info->irq_nasid;
325 int slice = sn_irq_info->irq_slice;
326 int cpu = nasid_slice_to_cpuid(nasid, slice);
327
328 sn_irq_info->irq_cpuid = cpu;
329 sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev);
330
331 /* link it into the sn_irq[irq] list */
332 sn_irq_info->irq_next = sn_irq[sn_irq_info->irq_irq];
333 sn_irq[sn_irq_info->irq_irq] = sn_irq_info;
334
335 (void)register_intr_pda(sn_irq_info);
336}
337
338static void force_interrupt(int irq)
339{
340 struct sn_irq_info *sn_irq_info;
341
342 if (!sn_ioif_inited)
343 return;
344 sn_irq_info = sn_irq[irq];
345 while (sn_irq_info) {
346 if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) &&
347 (sn_irq_info->irq_bridge != NULL)) {
348 pcibr_force_interrupt(sn_irq_info);
349 }
350 sn_irq_info = sn_irq_info->irq_next;
351 }
352}
353
354/*
355 * Check for lost interrupts. If the PIC int_status reg. says that
356 * an interrupt has been sent, but not handled, and the interrupt
357 * is not pending in either the cpu irr regs or in the soft irr regs,
358 * and the interrupt is not in service, then the interrupt may have
359 * been lost. Force an interrupt on that pin. It is possible that
360 * the interrupt is in flight, so we may generate a spurious interrupt,
361 * but we should never miss a real lost interrupt.
362 */
363static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
364{
365 uint64_t regval;
366 int irr_reg_num;
367 int irr_bit;
368 uint64_t irr_reg;
369 struct pcidev_info *pcidev_info;
370 struct pcibus_info *pcibus_info;
371
372 pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
373 if (!pcidev_info)
374 return;
375
376 pcibus_info =
377 (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
378 pdi_pcibus_info;
379 regval = pcireg_intr_status_get(pcibus_info);
380
381 irr_reg_num = irq_to_vector(irq) / 64;
382 irr_bit = irq_to_vector(irq) % 64;
383 switch (irr_reg_num) {
384 case 0:
385 irr_reg = ia64_getreg(_IA64_REG_CR_IRR0);
386 break;
387 case 1:
388 irr_reg = ia64_getreg(_IA64_REG_CR_IRR1);
389 break;
390 case 2:
391 irr_reg = ia64_getreg(_IA64_REG_CR_IRR2);
392 break;
393 case 3:
394 irr_reg = ia64_getreg(_IA64_REG_CR_IRR3);
395 break;
396 }
397 if (!test_bit(irr_bit, &irr_reg)) {
398 if (!test_bit(irq, pda->sn_soft_irr)) {
399 if (!test_bit(irq, pda->sn_in_service_ivecs)) {
400 regval &= 0xff;
401 if (sn_irq_info->irq_int_bit & regval &
402 sn_irq_info->irq_last_intr) {
403 regval &=
404 ~(sn_irq_info->
405 irq_int_bit & regval);
406 pcibr_force_interrupt(sn_irq_info);
407 }
408 }
409 }
410 }
411 sn_irq_info->irq_last_intr = regval;
412}
413
414void sn_lb_int_war_check(void)
415{
416 int i;
417
418 if (!sn_ioif_inited || pda->sn_first_irq == 0)
419 return;
420 for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) {
421 struct sn_irq_info *sn_irq_info = sn_irq[i];
422 while (sn_irq_info) {
423 /* Only call for PCI bridges that are fully initialized. */
424 if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) &&
425 (sn_irq_info->irq_bridge != NULL)) {
426 sn_check_intr(i, sn_irq_info);
427 }
428 sn_irq_info = sn_irq_info->irq_next;
429 }
430 }
431}
diff --git a/arch/ia64/sn/kernel/klconflib.c b/arch/ia64/sn/kernel/klconflib.c
new file mode 100644
index 000000000000..0f11a3299cd2
--- /dev/null
+++ b/arch/ia64/sn/kernel/klconflib.c
@@ -0,0 +1,108 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <linux/types.h>
10#include <linux/ctype.h>
11#include <linux/string.h>
12#include <linux/kernel.h>
13#include <asm/sn/types.h>
14#include <asm/sn/module.h>
15#include <asm/sn/l1.h>
16
17char brick_types[MAX_BRICK_TYPES + 1] = "cri.xdpn%#=vo^kjbf890123456789...";
18/*
19 * Format a module id for printing.
20 *
21 * There are three possible formats:
22 *
23 * MODULE_FORMAT_BRIEF is the brief 6-character format, including
24 * the actual brick-type as recorded in the
25 * moduleid_t, eg. 002c15 for a C-brick, or
26 * 101#17 for a PX-brick.
27 *
28 * MODULE_FORMAT_LONG is the hwgraph format, eg. rack/002/bay/15
29 * of rack/101/bay/17 (note that the brick
30 * type does not appear in this format).
31 *
32 * MODULE_FORMAT_LCD is like MODULE_FORMAT_BRIEF, except that it
33 * ensures that the module id provided appears
34 * exactly as it would on the LCD display of
35 * the corresponding brick, eg. still 002c15
36 * for a C-brick, but 101p17 for a PX-brick.
37 *
38 * maule (9/13/04): Removed top-level check for (fmt == MODULE_FORMAT_LCD)
39 * making MODULE_FORMAT_LCD equivalent to MODULE_FORMAT_BRIEF. It was
40 * decided that all callers should assume the returned string should be what
41 * is displayed on the brick L1 LCD.
42 */
43void
44format_module_id(char *buffer, moduleid_t m, int fmt)
45{
46 int rack, position;
47 unsigned char brickchar;
48
49 rack = MODULE_GET_RACK(m);
50 brickchar = MODULE_GET_BTCHAR(m);
51
52 /* Be sure we use the same brick type character as displayed
53 * on the brick's LCD
54 */
55 switch (brickchar)
56 {
57 case L1_BRICKTYPE_GA:
58 case L1_BRICKTYPE_OPUS_TIO:
59 brickchar = L1_BRICKTYPE_C;
60 break;
61
62 case L1_BRICKTYPE_PX:
63 case L1_BRICKTYPE_PE:
64 case L1_BRICKTYPE_PA:
65 case L1_BRICKTYPE_SA: /* we can move this to the "I's" later
66 * if that makes more sense
67 */
68 brickchar = L1_BRICKTYPE_P;
69 break;
70
71 case L1_BRICKTYPE_IX:
72 case L1_BRICKTYPE_IA:
73
74 brickchar = L1_BRICKTYPE_I;
75 break;
76 }
77
78 position = MODULE_GET_BPOS(m);
79
80 if ((fmt == MODULE_FORMAT_BRIEF) || (fmt == MODULE_FORMAT_LCD)) {
81 /* Brief module number format, eg. 002c15 */
82
83 /* Decompress the rack number */
84 *buffer++ = '0' + RACK_GET_CLASS(rack);
85 *buffer++ = '0' + RACK_GET_GROUP(rack);
86 *buffer++ = '0' + RACK_GET_NUM(rack);
87
88 /* Add the brick type */
89 *buffer++ = brickchar;
90 }
91 else if (fmt == MODULE_FORMAT_LONG) {
92 /* Fuller hwgraph format, eg. rack/002/bay/15 */
93
94 strcpy(buffer, "rack" "/"); buffer += strlen(buffer);
95
96 *buffer++ = '0' + RACK_GET_CLASS(rack);
97 *buffer++ = '0' + RACK_GET_GROUP(rack);
98 *buffer++ = '0' + RACK_GET_NUM(rack);
99
100 strcpy(buffer, "/" "bay" "/"); buffer += strlen(buffer);
101 }
102
103 /* Add the bay position, using at least two digits */
104 if (position < 10)
105 *buffer++ = '0';
106 sprintf(buffer, "%d", position);
107
108}
diff --git a/arch/ia64/sn/kernel/machvec.c b/arch/ia64/sn/kernel/machvec.c
new file mode 100644
index 000000000000..02bb9155840c
--- /dev/null
+++ b/arch/ia64/sn/kernel/machvec.c
@@ -0,0 +1,11 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9#define MACHVEC_PLATFORM_NAME sn2
10#define MACHVEC_PLATFORM_HEADER <asm/machvec_sn2.h>
11#include <asm/machvec_init.h>
diff --git a/arch/ia64/sn/kernel/mca.c b/arch/ia64/sn/kernel/mca.c
new file mode 100644
index 000000000000..857774bb2c9a
--- /dev/null
+++ b/arch/ia64/sn/kernel/mca.c
@@ -0,0 +1,135 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/timer.h>
12#include <linux/vmalloc.h>
13#include <asm/mca.h>
14#include <asm/sal.h>
15#include <asm/sn/sn_sal.h>
16
17/*
18 * Interval for calling SAL to poll for errors that do NOT cause error
19 * interrupts. SAL will raise a CPEI if any errors are present that
20 * need to be logged.
21 */
22#define CPEI_INTERVAL (5*HZ)
23
24struct timer_list sn_cpei_timer;
25void sn_init_cpei_timer(void);
26
27/* Printing oemdata from mca uses data that is not passed through SAL, it is
28 * global. Only one user at a time.
29 */
30static DECLARE_MUTEX(sn_oemdata_mutex);
31static u8 **sn_oemdata;
32static u64 *sn_oemdata_size, sn_oemdata_bufsize;
33
34/*
35 * print_hook
36 *
37 * This function is the callback routine that SAL calls to log error
38 * info for platform errors. buf is appended to sn_oemdata, resizing as
39 * required.
40 */
41static int print_hook(const char *fmt, ...)
42{
43 char buf[400];
44 int len;
45 va_list args;
46 va_start(args, fmt);
47 vsnprintf(buf, sizeof(buf), fmt, args);
48 va_end(args);
49 len = strlen(buf);
50 while (*sn_oemdata_size + len + 1 > sn_oemdata_bufsize) {
51 u8 *newbuf = vmalloc(sn_oemdata_bufsize += 1000);
52 if (!newbuf) {
53 printk(KERN_ERR "%s: unable to extend sn_oemdata\n",
54 __FUNCTION__);
55 return 0;
56 }
57 memcpy(newbuf, *sn_oemdata, *sn_oemdata_size);
58 vfree(*sn_oemdata);
59 *sn_oemdata = newbuf;
60 }
61 memcpy(*sn_oemdata + *sn_oemdata_size, buf, len + 1);
62 *sn_oemdata_size += len;
63 return 0;
64}
65
66static void sn_cpei_handler(int irq, void *devid, struct pt_regs *regs)
67{
68 /*
69 * this function's sole purpose is to call SAL when we receive
70 * a CE interrupt from SHUB or when the timer routine decides
71 * we need to call SAL to check for CEs.
72 */
73
74 /* CALL SAL_LOG_CE */
75
76 ia64_sn_plat_cpei_handler();
77}
78
79static void sn_cpei_timer_handler(unsigned long dummy)
80{
81 sn_cpei_handler(-1, NULL, NULL);
82 mod_timer(&sn_cpei_timer, jiffies + CPEI_INTERVAL);
83}
84
85void sn_init_cpei_timer(void)
86{
87 init_timer(&sn_cpei_timer);
88 sn_cpei_timer.expires = jiffies + CPEI_INTERVAL;
89 sn_cpei_timer.function = sn_cpei_timer_handler;
90 add_timer(&sn_cpei_timer);
91}
92
93static int
94sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
95 u64 * oemdata_size)
96{
97 down(&sn_oemdata_mutex);
98 sn_oemdata = oemdata;
99 sn_oemdata_size = oemdata_size;
100 sn_oemdata_bufsize = 0;
101 ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header);
102 up(&sn_oemdata_mutex);
103 return 0;
104}
105
106/* Callback when userspace salinfo wants to decode oem data via the platform
107 * kernel and/or prom.
108 */
109int sn_salinfo_platform_oemdata(const u8 *sect_header, u8 **oemdata, u64 *oemdata_size)
110{
111 efi_guid_t guid = *(efi_guid_t *)sect_header;
112 int valid = 0;
113 *oemdata_size = 0;
114 vfree(*oemdata);
115 *oemdata = NULL;
116 if (efi_guidcmp(guid, SAL_PLAT_SPECIFIC_ERR_SECT_GUID) == 0) {
117 sal_log_plat_specific_err_info_t *psei = (sal_log_plat_specific_err_info_t *)sect_header;
118 valid = psei->valid.oem_data;
119 } else if (efi_guidcmp(guid, SAL_PLAT_MEM_DEV_ERR_SECT_GUID) == 0) {
120 sal_log_mem_dev_err_info_t *mdei = (sal_log_mem_dev_err_info_t *)sect_header;
121 valid = mdei->valid.oem_data;
122 }
123 if (valid)
124 return sn_platform_plat_specific_err_print(sect_header, oemdata, oemdata_size);
125 else
126 return 0;
127}
128
129static int __init sn_salinfo_init(void)
130{
131 salinfo_platform_oemdata = &sn_salinfo_platform_oemdata;
132 return 0;
133}
134
135module_init(sn_salinfo_init)
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
new file mode 100644
index 000000000000..f0306b516afb
--- /dev/null
+++ b/arch/ia64/sn/kernel/setup.c
@@ -0,0 +1,621 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999,2001-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <linux/config.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/delay.h>
13#include <linux/kernel.h>
14#include <linux/kdev_t.h>
15#include <linux/string.h>
16#include <linux/tty.h>
17#include <linux/console.h>
18#include <linux/timex.h>
19#include <linux/sched.h>
20#include <linux/ioport.h>
21#include <linux/mm.h>
22#include <linux/serial.h>
23#include <linux/irq.h>
24#include <linux/bootmem.h>
25#include <linux/mmzone.h>
26#include <linux/interrupt.h>
27#include <linux/acpi.h>
28#include <linux/compiler.h>
29#include <linux/sched.h>
30#include <linux/root_dev.h>
31#include <linux/nodemask.h>
32
33#include <asm/io.h>
34#include <asm/sal.h>
35#include <asm/machvec.h>
36#include <asm/system.h>
37#include <asm/processor.h>
38#include <asm/sn/arch.h>
39#include <asm/sn/addrs.h>
40#include <asm/sn/pda.h>
41#include <asm/sn/nodepda.h>
42#include <asm/sn/sn_cpuid.h>
43#include <asm/sn/simulator.h>
44#include <asm/sn/leds.h>
45#include <asm/sn/bte.h>
46#include <asm/sn/shub_mmr.h>
47#include <asm/sn/clksupport.h>
48#include <asm/sn/sn_sal.h>
49#include <asm/sn/geo.h>
50#include "xtalk/xwidgetdev.h"
51#include "xtalk/hubdev.h"
52#include <asm/sn/klconfig.h>
53
54
55DEFINE_PER_CPU(struct pda_s, pda_percpu);
56
57#define MAX_PHYS_MEMORY (1UL << 49) /* 1 TB */
58
59lboard_t *root_lboard[MAX_COMPACT_NODES];
60
61extern void bte_init_node(nodepda_t *, cnodeid_t);
62
63extern void sn_timer_init(void);
64extern unsigned long last_time_offset;
65extern void (*ia64_mark_idle) (int);
66extern void snidle(int);
67extern unsigned char acpi_kbd_controller_present;
68
69unsigned long sn_rtc_cycles_per_second;
70EXPORT_SYMBOL(sn_rtc_cycles_per_second);
71
72DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
73EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
74
75partid_t sn_partid = -1;
76EXPORT_SYMBOL(sn_partid);
77char sn_system_serial_number_string[128];
78EXPORT_SYMBOL(sn_system_serial_number_string);
79u64 sn_partition_serial_number;
80EXPORT_SYMBOL(sn_partition_serial_number);
81u8 sn_partition_id;
82EXPORT_SYMBOL(sn_partition_id);
83u8 sn_system_size;
84EXPORT_SYMBOL(sn_system_size);
85u8 sn_sharing_domain_size;
86EXPORT_SYMBOL(sn_sharing_domain_size);
87u8 sn_coherency_id;
88EXPORT_SYMBOL(sn_coherency_id);
89u8 sn_region_size;
90EXPORT_SYMBOL(sn_region_size);
91
92short physical_node_map[MAX_PHYSNODE_ID];
93
94EXPORT_SYMBOL(physical_node_map);
95
96int numionodes;
97
98static void sn_init_pdas(char **);
99static void scan_for_ionodes(void);
100
101static nodepda_t *nodepdaindr[MAX_COMPACT_NODES];
102
103/*
104 * The format of "screen_info" is strange, and due to early i386-setup
105 * code. This is just enough to make the console code think we're on a
106 * VGA color display.
107 */
108struct screen_info sn_screen_info = {
109 .orig_x = 0,
110 .orig_y = 0,
111 .orig_video_mode = 3,
112 .orig_video_cols = 80,
113 .orig_video_ega_bx = 3,
114 .orig_video_lines = 25,
115 .orig_video_isVGA = 1,
116 .orig_video_points = 16
117};
118
119/*
120 * This is here so we can use the CMOS detection in ide-probe.c to
121 * determine what drives are present. In theory, we don't need this
122 * as the auto-detection could be done via ide-probe.c:do_probe() but
123 * in practice that would be much slower, which is painful when
124 * running in the simulator. Note that passing zeroes in DRIVE_INFO
125 * is sufficient (the IDE driver will autodetect the drive geometry).
126 */
127#ifdef CONFIG_IA64_GENERIC
128extern char drive_info[4 * 16];
129#else
130char drive_info[4 * 16];
131#endif
132
133/*
134 * Get nasid of current cpu early in boot before nodepda is initialized
135 */
136static int
137boot_get_nasid(void)
138{
139 int nasid;
140
141 if (ia64_sn_get_sapic_info(get_sapicid(), &nasid, NULL, NULL))
142 BUG();
143 return nasid;
144}
145
146/*
147 * This routine can only be used during init, since
148 * smp_boot_data is an init data structure.
149 * We have to use smp_boot_data.cpu_phys_id to find
150 * the physical id of the processor because the normal
151 * cpu_physical_id() relies on data structures that
152 * may not be initialized yet.
153 */
154
155static int __init pxm_to_nasid(int pxm)
156{
157 int i;
158 int nid;
159
160 nid = pxm_to_nid_map[pxm];
161 for (i = 0; i < num_node_memblks; i++) {
162 if (node_memblk[i].nid == nid) {
163 return NASID_GET(node_memblk[i].start_paddr);
164 }
165 }
166 return -1;
167}
168
169/**
170 * early_sn_setup - early setup routine for SN platforms
171 *
172 * Sets up an initial console to aid debugging. Intended primarily
173 * for bringup. See start_kernel() in init/main.c.
174 */
175
176void __init early_sn_setup(void)
177{
178 efi_system_table_t *efi_systab;
179 efi_config_table_t *config_tables;
180 struct ia64_sal_systab *sal_systab;
181 struct ia64_sal_desc_entry_point *ep;
182 char *p;
183 int i, j;
184
185 /*
186 * Parse enough of the SAL tables to locate the SAL entry point. Since, console
187 * IO on SN2 is done via SAL calls, early_printk won't work without this.
188 *
189 * This code duplicates some of the ACPI table parsing that is in efi.c & sal.c.
190 * Any changes to those file may have to be made hereas well.
191 */
192 efi_systab = (efi_system_table_t *) __va(ia64_boot_param->efi_systab);
193 config_tables = __va(efi_systab->tables);
194 for (i = 0; i < efi_systab->nr_tables; i++) {
195 if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) ==
196 0) {
197 sal_systab = __va(config_tables[i].table);
198 p = (char *)(sal_systab + 1);
199 for (j = 0; j < sal_systab->entry_count; j++) {
200 if (*p == SAL_DESC_ENTRY_POINT) {
201 ep = (struct ia64_sal_desc_entry_point
202 *)p;
203 ia64_sal_handler_init(__va
204 (ep->sal_proc),
205 __va(ep->gp));
206 return;
207 }
208 p += SAL_DESC_SIZE(*p);
209 }
210 }
211 }
212 /* Uh-oh, SAL not available?? */
213 printk(KERN_ERR "failed to find SAL entry point\n");
214}
215
216extern int platform_intr_list[];
217extern nasid_t master_nasid;
218static int shub_1_1_found __initdata;
219
220/*
221 * sn_check_for_wars
222 *
223 * Set flag for enabling shub specific wars
224 */
225
226static inline int __init is_shub_1_1(int nasid)
227{
228 unsigned long id;
229 int rev;
230
231 if (is_shub2())
232 return 0;
233 id = REMOTE_HUB_L(nasid, SH1_SHUB_ID);
234 rev = (id & SH1_SHUB_ID_REVISION_MASK) >> SH1_SHUB_ID_REVISION_SHFT;
235 return rev <= 2;
236}
237
238static void __init sn_check_for_wars(void)
239{
240 int cnode;
241
242 if (is_shub2()) {
243 /* none yet */
244 } else {
245 for_each_online_node(cnode) {
246 if (is_shub_1_1(cnodeid_to_nasid(cnode)))
247 sn_hub_info->shub_1_1_found = 1;
248 }
249 }
250}
251
252/**
253 * sn_setup - SN platform setup routine
254 * @cmdline_p: kernel command line
255 *
256 * Handles platform setup for SN machines. This includes determining
257 * the RTC frequency (via a SAL call), initializing secondary CPUs, and
258 * setting up per-node data areas. The console is also initialized here.
259 */
260void __init sn_setup(char **cmdline_p)
261{
262 long status, ticks_per_sec, drift;
263 int pxm;
264 int major = sn_sal_rev_major(), minor = sn_sal_rev_minor();
265 extern void sn_cpu_init(void);
266
267 /*
268 * If the generic code has enabled vga console support - lets
269 * get rid of it again. This is a kludge for the fact that ACPI
270 * currtently has no way of informing us if legacy VGA is available
271 * or not.
272 */
273#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
274 if (conswitchp == &vga_con) {
275 printk(KERN_DEBUG "SGI: Disabling VGA console\n");
276#ifdef CONFIG_DUMMY_CONSOLE
277 conswitchp = &dummy_con;
278#else
279 conswitchp = NULL;
280#endif /* CONFIG_DUMMY_CONSOLE */
281 }
282#endif /* def(CONFIG_VT) && def(CONFIG_VGA_CONSOLE) */
283
284 MAX_DMA_ADDRESS = PAGE_OFFSET + MAX_PHYS_MEMORY;
285
286 memset(physical_node_map, -1, sizeof(physical_node_map));
287 for (pxm = 0; pxm < MAX_PXM_DOMAINS; pxm++)
288 if (pxm_to_nid_map[pxm] != -1)
289 physical_node_map[pxm_to_nasid(pxm)] =
290 pxm_to_nid_map[pxm];
291
292 /*
293 * Old PROMs do not provide an ACPI FADT. Disable legacy keyboard
294 * support here so we don't have to listen to failed keyboard probe
295 * messages.
296 */
297 if ((major < 2 || (major == 2 && minor <= 9)) &&
298 acpi_kbd_controller_present) {
299 printk(KERN_INFO "Disabling legacy keyboard support as prom "
300 "is too old and doesn't provide FADT\n");
301 acpi_kbd_controller_present = 0;
302 }
303
304 printk("SGI SAL version %x.%02x\n", major, minor);
305
306 /*
307 * Confirm the SAL we're running on is recent enough...
308 */
309 if ((major < SN_SAL_MIN_MAJOR) || (major == SN_SAL_MIN_MAJOR &&
310 minor < SN_SAL_MIN_MINOR)) {
311 printk(KERN_ERR "This kernel needs SGI SAL version >= "
312 "%x.%02x\n", SN_SAL_MIN_MAJOR, SN_SAL_MIN_MINOR);
313 panic("PROM version too old\n");
314 }
315
316 master_nasid = boot_get_nasid();
317
318 status =
319 ia64_sal_freq_base(SAL_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec,
320 &drift);
321 if (status != 0 || ticks_per_sec < 100000) {
322 printk(KERN_WARNING
323 "unable to determine platform RTC clock frequency, guessing.\n");
324 /* PROM gives wrong value for clock freq. so guess */
325 sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
326 } else
327 sn_rtc_cycles_per_second = ticks_per_sec;
328
329 platform_intr_list[ACPI_INTERRUPT_CPEI] = IA64_CPE_VECTOR;
330
331 /*
332 * we set the default root device to /dev/hda
333 * to make simulation easy
334 */
335 ROOT_DEV = Root_HDA1;
336
337 /*
338 * Create the PDAs and NODEPDAs for all the cpus.
339 */
340 sn_init_pdas(cmdline_p);
341
342 ia64_mark_idle = &snidle;
343
344 /*
345 * For the bootcpu, we do this here. All other cpus will make the
346 * call as part of cpu_init in slave cpu initialization.
347 */
348 sn_cpu_init();
349
350#ifdef CONFIG_SMP
351 init_smp_config();
352#endif
353 screen_info = sn_screen_info;
354
355 sn_timer_init();
356}
357
358/**
359 * sn_init_pdas - setup node data areas
360 *
361 * One time setup for Node Data Area. Called by sn_setup().
362 */
363static void __init sn_init_pdas(char **cmdline_p)
364{
365 cnodeid_t cnode;
366
367 memset(pda->cnodeid_to_nasid_table, -1,
368 sizeof(pda->cnodeid_to_nasid_table));
369 for_each_online_node(cnode)
370 pda->cnodeid_to_nasid_table[cnode] =
371 pxm_to_nasid(nid_to_pxm_map[cnode]);
372
373 numionodes = num_online_nodes();
374 scan_for_ionodes();
375
376 /*
377 * Allocate & initalize the nodepda for each node.
378 */
379 for_each_online_node(cnode) {
380 nodepdaindr[cnode] =
381 alloc_bootmem_node(NODE_DATA(cnode), sizeof(nodepda_t));
382 memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
383 memset(nodepdaindr[cnode]->phys_cpuid, -1,
384 sizeof(nodepdaindr[cnode]->phys_cpuid));
385 }
386
387 /*
388 * Allocate & initialize nodepda for TIOs. For now, put them on node 0.
389 */
390 for (cnode = num_online_nodes(); cnode < numionodes; cnode++) {
391 nodepdaindr[cnode] =
392 alloc_bootmem_node(NODE_DATA(0), sizeof(nodepda_t));
393 memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
394 }
395
396 /*
397 * Now copy the array of nodepda pointers to each nodepda.
398 */
399 for (cnode = 0; cnode < numionodes; cnode++)
400 memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr,
401 sizeof(nodepdaindr));
402
403 /*
404 * Set up IO related platform-dependent nodepda fields.
405 * The following routine actually sets up the hubinfo struct
406 * in nodepda.
407 */
408 for_each_online_node(cnode) {
409 bte_init_node(nodepdaindr[cnode], cnode);
410 }
411
412 /*
413 * Initialize the per node hubdev. This includes IO Nodes and
414 * headless/memless nodes.
415 */
416 for (cnode = 0; cnode < numionodes; cnode++) {
417 hubdev_init_node(nodepdaindr[cnode], cnode);
418 }
419}
420
421/**
422 * sn_cpu_init - initialize per-cpu data areas
423 * @cpuid: cpuid of the caller
424 *
425 * Called during cpu initialization on each cpu as it starts.
426 * Currently, initializes the per-cpu data area for SNIA.
427 * Also sets up a few fields in the nodepda. Also known as
428 * platform_cpu_init() by the ia64 machvec code.
429 */
430void __init sn_cpu_init(void)
431{
432 int cpuid;
433 int cpuphyid;
434 int nasid;
435 int subnode;
436 int slice;
437 int cnode;
438 int i;
439 static int wars_have_been_checked;
440
441 memset(pda, 0, sizeof(pda));
442 if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2, &sn_hub_info->nasid_bitmask, &sn_hub_info->nasid_shift,
443 &sn_system_size, &sn_sharing_domain_size, &sn_partition_id,
444 &sn_coherency_id, &sn_region_size))
445 BUG();
446 sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2;
447
448 /*
449 * The boot cpu makes this call again after platform initialization is
450 * complete.
451 */
452 if (nodepdaindr[0] == NULL)
453 return;
454
455 cpuid = smp_processor_id();
456 cpuphyid = get_sapicid();
457
458 if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice))
459 BUG();
460
461 for (i=0; i < MAX_NUMNODES; i++) {
462 if (nodepdaindr[i]) {
463 nodepdaindr[i]->phys_cpuid[cpuid].nasid = nasid;
464 nodepdaindr[i]->phys_cpuid[cpuid].slice = slice;
465 nodepdaindr[i]->phys_cpuid[cpuid].subnode = subnode;
466 }
467 }
468
469 cnode = nasid_to_cnodeid(nasid);
470
471 pda->p_nodepda = nodepdaindr[cnode];
472 pda->led_address =
473 (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT));
474 pda->led_state = LED_ALWAYS_SET;
475 pda->hb_count = HZ / 2;
476 pda->hb_state = 0;
477 pda->idle_flag = 0;
478
479 if (cpuid != 0) {
480 memcpy(pda->cnodeid_to_nasid_table,
481 pdacpu(0)->cnodeid_to_nasid_table,
482 sizeof(pda->cnodeid_to_nasid_table));
483 }
484
485 /*
486 * Check for WARs.
487 * Only needs to be done once, on BSP.
488 * Has to be done after loop above, because it uses pda.cnodeid_to_nasid_table[i].
489 * Has to be done before assignment below.
490 */
491 if (!wars_have_been_checked) {
492 sn_check_for_wars();
493 wars_have_been_checked = 1;
494 }
495 sn_hub_info->shub_1_1_found = shub_1_1_found;
496
497 /*
498 * Set up addresses of PIO/MEM write status registers.
499 */
500 {
501 u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0};
502 u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_1,
503 SH2_PIO_WRITE_STATUS_2, SH2_PIO_WRITE_STATUS_3};
504 u64 *pio;
505 pio = is_shub1() ? pio1 : pio2;
506 pda->pio_write_status_addr = (volatile unsigned long *) LOCAL_MMR_ADDR(pio[slice]);
507 pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0;
508 }
509
510 /*
511 * WAR addresses for SHUB 1.x.
512 */
513 if (local_node_data->active_cpu_count++ == 0 && is_shub1()) {
514 int buddy_nasid;
515 buddy_nasid =
516 cnodeid_to_nasid(numa_node_id() ==
517 num_online_nodes() - 1 ? 0 : numa_node_id() + 1);
518 pda->pio_shub_war_cam_addr =
519 (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid,
520 SH1_PI_CAM_CONTROL);
521 }
522}
523
524/*
525 * Scan klconfig for ionodes. Add the nasids to the
526 * physical_node_map and the pda and increment numionodes.
527 */
528
529static void __init scan_for_ionodes(void)
530{
531 int nasid = 0;
532 lboard_t *brd;
533
534 /* Setup ionodes with memory */
535 for (nasid = 0; nasid < MAX_PHYSNODE_ID; nasid += 2) {
536 char *klgraph_header;
537 cnodeid_t cnodeid;
538
539 if (physical_node_map[nasid] == -1)
540 continue;
541
542 cnodeid = -1;
543 klgraph_header = __va(ia64_sn_get_klconfig_addr(nasid));
544 if (!klgraph_header) {
545 if (IS_RUNNING_ON_SIMULATOR())
546 continue;
547 BUG(); /* All nodes must have klconfig tables! */
548 }
549 cnodeid = nasid_to_cnodeid(nasid);
550 root_lboard[cnodeid] = (lboard_t *)
551 NODE_OFFSET_TO_LBOARD((nasid),
552 ((kl_config_hdr_t
553 *) (klgraph_header))->
554 ch_board_info);
555 }
556
557 /* Scan headless/memless IO Nodes. */
558 for (nasid = 0; nasid < MAX_PHYSNODE_ID; nasid += 2) {
559 /* if there's no nasid, don't try to read the klconfig on the node */
560 if (physical_node_map[nasid] == -1)
561 continue;
562 brd = find_lboard_any((lboard_t *)
563 root_lboard[nasid_to_cnodeid(nasid)],
564 KLTYPE_SNIA);
565 if (brd) {
566 brd = KLCF_NEXT_ANY(brd); /* Skip this node's lboard */
567 if (!brd)
568 continue;
569 }
570
571 brd = find_lboard_any(brd, KLTYPE_SNIA);
572
573 while (brd) {
574 pda->cnodeid_to_nasid_table[numionodes] =
575 brd->brd_nasid;
576 physical_node_map[brd->brd_nasid] = numionodes;
577 root_lboard[numionodes] = brd;
578 numionodes++;
579 brd = KLCF_NEXT_ANY(brd);
580 if (!brd)
581 break;
582
583 brd = find_lboard_any(brd, KLTYPE_SNIA);
584 }
585 }
586
587 /* Scan for TIO nodes. */
588 for (nasid = 0; nasid < MAX_PHYSNODE_ID; nasid += 2) {
589 /* if there's no nasid, don't try to read the klconfig on the node */
590 if (physical_node_map[nasid] == -1)
591 continue;
592 brd = find_lboard_any((lboard_t *)
593 root_lboard[nasid_to_cnodeid(nasid)],
594 KLTYPE_TIO);
595 while (brd) {
596 pda->cnodeid_to_nasid_table[numionodes] =
597 brd->brd_nasid;
598 physical_node_map[brd->brd_nasid] = numionodes;
599 root_lboard[numionodes] = brd;
600 numionodes++;
601 brd = KLCF_NEXT_ANY(brd);
602 if (!brd)
603 break;
604
605 brd = find_lboard_any(brd, KLTYPE_TIO);
606 }
607 }
608
609}
610
611int
612nasid_slice_to_cpuid(int nasid, int slice)
613{
614 long cpu;
615
616 for (cpu=0; cpu < NR_CPUS; cpu++)
617 if (nodepda->phys_cpuid[cpu].nasid == nasid && nodepda->phys_cpuid[cpu].slice == slice)
618 return cpu;
619
620 return -1;
621}
diff --git a/arch/ia64/sn/kernel/sn2/Makefile b/arch/ia64/sn/kernel/sn2/Makefile
new file mode 100644
index 000000000000..170bde4549da
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/Makefile
@@ -0,0 +1,13 @@
1# arch/ia64/sn/kernel/sn2/Makefile
2#
3# This file is subject to the terms and conditions of the GNU General Public
4# License. See the file "COPYING" in the main directory of this archive
5# for more details.
6#
7# Copyright (C) 1999,2001-2002 Silicon Graphics, Inc. All rights reserved.
8#
9# sn2 specific kernel files
10#
11
12obj-y += cache.o io.o ptc_deadlock.o sn2_smp.o sn_proc_fs.o \
13 prominfo_proc.o timer.o timer_interrupt.o sn_hwperf.o
diff --git a/arch/ia64/sn/kernel/sn2/cache.c b/arch/ia64/sn/kernel/sn2/cache.c
new file mode 100644
index 000000000000..bc3cfa17cd0f
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/cache.c
@@ -0,0 +1,34 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
7 *
8 */
9#include <linux/module.h>
10#include <asm/pgalloc.h>
11
12/**
13 * sn_flush_all_caches - flush a range of address from all caches (incl. L4)
14 * @flush_addr: identity mapped region 7 address to start flushing
15 * @bytes: number of bytes to flush
16 *
17 * Flush a range of addresses from all caches including L4.
18 * All addresses fully or partially contained within
19 * @flush_addr to @flush_addr + @bytes are flushed
20 * from the all caches.
21 */
22void
23sn_flush_all_caches(long flush_addr, long bytes)
24{
25 flush_icache_range(flush_addr, flush_addr+bytes);
26 /*
27 * The last call may have returned before the caches
28 * were actually flushed, so we call it again to make
29 * sure.
30 */
31 flush_icache_range(flush_addr, flush_addr+bytes);
32 mb();
33}
34EXPORT_SYMBOL(sn_flush_all_caches);
diff --git a/arch/ia64/sn/kernel/sn2/io.c b/arch/ia64/sn/kernel/sn2/io.c
new file mode 100644
index 000000000000..a12c0586de38
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/io.c
@@ -0,0 +1,101 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
7 *
8 * The generic kernel requires function pointers to these routines, so
9 * we wrap the inlines from asm/ia64/sn/sn2/io.h here.
10 */
11
12#include <asm/sn/io.h>
13
14#ifdef CONFIG_IA64_GENERIC
15
16#undef __sn_inb
17#undef __sn_inw
18#undef __sn_inl
19#undef __sn_outb
20#undef __sn_outw
21#undef __sn_outl
22#undef __sn_readb
23#undef __sn_readw
24#undef __sn_readl
25#undef __sn_readq
26#undef __sn_readb_relaxed
27#undef __sn_readw_relaxed
28#undef __sn_readl_relaxed
29#undef __sn_readq_relaxed
30
31unsigned int __sn_inb(unsigned long port)
32{
33 return ___sn_inb(port);
34}
35
36unsigned int __sn_inw(unsigned long port)
37{
38 return ___sn_inw(port);
39}
40
41unsigned int __sn_inl(unsigned long port)
42{
43 return ___sn_inl(port);
44}
45
46void __sn_outb(unsigned char val, unsigned long port)
47{
48 ___sn_outb(val, port);
49}
50
51void __sn_outw(unsigned short val, unsigned long port)
52{
53 ___sn_outw(val, port);
54}
55
56void __sn_outl(unsigned int val, unsigned long port)
57{
58 ___sn_outl(val, port);
59}
60
61unsigned char __sn_readb(void __iomem *addr)
62{
63 return ___sn_readb(addr);
64}
65
66unsigned short __sn_readw(void __iomem *addr)
67{
68 return ___sn_readw(addr);
69}
70
71unsigned int __sn_readl(void __iomem *addr)
72{
73 return ___sn_readl(addr);
74}
75
76unsigned long __sn_readq(void __iomem *addr)
77{
78 return ___sn_readq(addr);
79}
80
81unsigned char __sn_readb_relaxed(void __iomem *addr)
82{
83 return ___sn_readb_relaxed(addr);
84}
85
86unsigned short __sn_readw_relaxed(void __iomem *addr)
87{
88 return ___sn_readw_relaxed(addr);
89}
90
91unsigned int __sn_readl_relaxed(void __iomem *addr)
92{
93 return ___sn_readl_relaxed(addr);
94}
95
96unsigned long __sn_readq_relaxed(void __iomem *addr)
97{
98 return ___sn_readq_relaxed(addr);
99}
100
101#endif
diff --git a/arch/ia64/sn/kernel/sn2/prominfo_proc.c b/arch/ia64/sn/kernel/sn2/prominfo_proc.c
new file mode 100644
index 000000000000..81c63b2f8ae9
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/prominfo_proc.c
@@ -0,0 +1,279 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999,2001-2004 Silicon Graphics, Inc. All Rights Reserved.
7 *
8 * Module to export the system's Firmware Interface Tables, including
9 * PROM revision numbers and banners, in /proc
10 */
11#include <linux/config.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/proc_fs.h>
15#include <linux/nodemask.h>
16#include <asm/system.h>
17#include <asm/io.h>
18#include <asm/sn/sn_sal.h>
19#include <asm/sn/sn_cpuid.h>
20#include <asm/sn/addrs.h>
21
22MODULE_DESCRIPTION("PROM version reporting for /proc");
23MODULE_AUTHOR("Chad Talbott");
24MODULE_LICENSE("GPL");
25
26/* Standard Intel FIT entry types */
27#define FIT_ENTRY_FIT_HEADER 0x00 /* FIT header entry */
28#define FIT_ENTRY_PAL_B 0x01 /* PAL_B entry */
29/* Entries 0x02 through 0x0D reserved by Intel */
30#define FIT_ENTRY_PAL_A_PROC 0x0E /* Processor-specific PAL_A entry */
31#define FIT_ENTRY_PAL_A 0x0F /* PAL_A entry, same as... */
32#define FIT_ENTRY_PAL_A_GEN 0x0F /* ...Generic PAL_A entry */
33#define FIT_ENTRY_UNUSED 0x7F /* Unused (reserved by Intel?) */
34/* OEM-defined entries range from 0x10 to 0x7E. */
35#define FIT_ENTRY_SAL_A 0x10 /* SAL_A entry */
36#define FIT_ENTRY_SAL_B 0x11 /* SAL_B entry */
37#define FIT_ENTRY_SALRUNTIME 0x12 /* SAL runtime entry */
38#define FIT_ENTRY_EFI 0x1F /* EFI entry */
39#define FIT_ENTRY_FPSWA 0x20 /* embedded fpswa entry */
40#define FIT_ENTRY_VMLINUX 0x21 /* embedded vmlinux entry */
41
42#define FIT_MAJOR_SHIFT (32 + 8)
43#define FIT_MAJOR_MASK ((1 << 8) - 1)
44#define FIT_MINOR_SHIFT 32
45#define FIT_MINOR_MASK ((1 << 8) - 1)
46
47#define FIT_MAJOR(q) \
48 ((unsigned) ((q) >> FIT_MAJOR_SHIFT) & FIT_MAJOR_MASK)
49#define FIT_MINOR(q) \
50 ((unsigned) ((q) >> FIT_MINOR_SHIFT) & FIT_MINOR_MASK)
51
52#define FIT_TYPE_SHIFT (32 + 16)
53#define FIT_TYPE_MASK ((1 << 7) - 1)
54
55#define FIT_TYPE(q) \
56 ((unsigned) ((q) >> FIT_TYPE_SHIFT) & FIT_TYPE_MASK)
57
58struct fit_type_map_t {
59 unsigned char type;
60 const char *name;
61};
62
63static const struct fit_type_map_t fit_entry_types[] = {
64 {FIT_ENTRY_FIT_HEADER, "FIT Header"},
65 {FIT_ENTRY_PAL_A_GEN, "Generic PAL_A"},
66 {FIT_ENTRY_PAL_A_PROC, "Processor-specific PAL_A"},
67 {FIT_ENTRY_PAL_A, "PAL_A"},
68 {FIT_ENTRY_PAL_B, "PAL_B"},
69 {FIT_ENTRY_SAL_A, "SAL_A"},
70 {FIT_ENTRY_SAL_B, "SAL_B"},
71 {FIT_ENTRY_SALRUNTIME, "SAL runtime"},
72 {FIT_ENTRY_EFI, "EFI"},
73 {FIT_ENTRY_VMLINUX, "Embedded Linux"},
74 {FIT_ENTRY_FPSWA, "Embedded FPSWA"},
75 {FIT_ENTRY_UNUSED, "Unused"},
76 {0xff, "Error"},
77};
78
79static const char *fit_type_name(unsigned char type)
80{
81 struct fit_type_map_t const *mapp;
82
83 for (mapp = fit_entry_types; mapp->type != 0xff; mapp++)
84 if (type == mapp->type)
85 return mapp->name;
86
87 if ((type > FIT_ENTRY_PAL_A) && (type < FIT_ENTRY_UNUSED))
88 return "OEM type";
89 if ((type > FIT_ENTRY_PAL_B) && (type < FIT_ENTRY_PAL_A))
90 return "Reserved";
91
92 return "Unknown type";
93}
94
95static int
96get_fit_entry(unsigned long nasid, int index, unsigned long *fentry,
97 char *banner, int banlen)
98{
99 return ia64_sn_get_fit_compt(nasid, index, fentry, banner, banlen);
100}
101
102
103/*
104 * These two routines display the FIT table for each node.
105 */
106static int dump_fit_entry(char *page, unsigned long *fentry)
107{
108 unsigned type;
109
110 type = FIT_TYPE(fentry[1]);
111 return sprintf(page, "%02x %-25s %x.%02x %016lx %u\n",
112 type,
113 fit_type_name(type),
114 FIT_MAJOR(fentry[1]), FIT_MINOR(fentry[1]),
115 fentry[0],
116 /* mult by sixteen to get size in bytes */
117 (unsigned)(fentry[1] & 0xffffff) * 16);
118}
119
120
121/*
122 * We assume that the fit table will be small enough that we can print
123 * the whole thing into one page. (This is true for our default 16kB
124 * pages -- each entry is about 60 chars wide when printed.) I read
125 * somewhere that the maximum size of the FIT is 128 entries, so we're
126 * OK except for 4kB pages (and no one is going to do that on SN
127 * anyway).
128 */
129static int
130dump_fit(char *page, unsigned long nasid)
131{
132 unsigned long fentry[2];
133 int index;
134 char *p;
135
136 p = page;
137 for (index=0;;index++) {
138 BUG_ON(index * 60 > PAGE_SIZE);
139 if (get_fit_entry(nasid, index, fentry, NULL, 0))
140 break;
141 p += dump_fit_entry(p, fentry);
142 }
143
144 return p - page;
145}
146
147static int
148dump_version(char *page, unsigned long nasid)
149{
150 unsigned long fentry[2];
151 char banner[128];
152 int index;
153 int len;
154
155 for (index = 0; ; index++) {
156 if (get_fit_entry(nasid, index, fentry, banner,
157 sizeof(banner)))
158 return 0;
159 if (FIT_TYPE(fentry[1]) == FIT_ENTRY_SAL_A)
160 break;
161 }
162
163 len = sprintf(page, "%x.%02x\n", FIT_MAJOR(fentry[1]),
164 FIT_MINOR(fentry[1]));
165 page += len;
166
167 if (banner[0])
168 len += snprintf(page, PAGE_SIZE-len, "%s\n", banner);
169
170 return len;
171}
172
173/* same as in proc_misc.c */
174static int
175proc_calc_metrics(char *page, char **start, off_t off, int count, int *eof,
176 int len)
177{
178 if (len <= off + count)
179 *eof = 1;
180 *start = page + off;
181 len -= off;
182 if (len > count)
183 len = count;
184 if (len < 0)
185 len = 0;
186 return len;
187}
188
189static int
190read_version_entry(char *page, char **start, off_t off, int count, int *eof,
191 void *data)
192{
193 int len = 0;
194
195 /* data holds the NASID of the node */
196 len = dump_version(page, (unsigned long)data);
197 len = proc_calc_metrics(page, start, off, count, eof, len);
198 return len;
199}
200
201static int
202read_fit_entry(char *page, char **start, off_t off, int count, int *eof,
203 void *data)
204{
205 int len = 0;
206
207 /* data holds the NASID of the node */
208 len = dump_fit(page, (unsigned long)data);
209 len = proc_calc_metrics(page, start, off, count, eof, len);
210
211 return len;
212}
213
214/* module entry points */
215int __init prominfo_init(void);
216void __exit prominfo_exit(void);
217
218module_init(prominfo_init);
219module_exit(prominfo_exit);
220
221static struct proc_dir_entry **proc_entries;
222static struct proc_dir_entry *sgi_prominfo_entry;
223
224#define NODE_NAME_LEN 11
225
226int __init prominfo_init(void)
227{
228 struct proc_dir_entry **entp;
229 struct proc_dir_entry *p;
230 cnodeid_t cnodeid;
231 unsigned long nasid;
232 char name[NODE_NAME_LEN];
233
234 if (!ia64_platform_is("sn2"))
235 return 0;
236
237 proc_entries = kmalloc(num_online_nodes() * sizeof(struct proc_dir_entry *),
238 GFP_KERNEL);
239
240 sgi_prominfo_entry = proc_mkdir("sgi_prominfo", NULL);
241
242 entp = proc_entries;
243 for_each_online_node(cnodeid) {
244 sprintf(name, "node%d", cnodeid);
245 *entp = proc_mkdir(name, sgi_prominfo_entry);
246 nasid = cnodeid_to_nasid(cnodeid);
247 p = create_proc_read_entry(
248 "fit", 0, *entp, read_fit_entry,
249 (void *)nasid);
250 if (p)
251 p->owner = THIS_MODULE;
252 p = create_proc_read_entry(
253 "version", 0, *entp, read_version_entry,
254 (void *)nasid);
255 if (p)
256 p->owner = THIS_MODULE;
257 entp++;
258 }
259
260 return 0;
261}
262
263void __exit prominfo_exit(void)
264{
265 struct proc_dir_entry **entp;
266 unsigned cnodeid;
267 char name[NODE_NAME_LEN];
268
269 entp = proc_entries;
270 for_each_online_node(cnodeid) {
271 remove_proc_entry("fit", *entp);
272 remove_proc_entry("version", *entp);
273 sprintf(name, "node%d", cnodeid);
274 remove_proc_entry(name, sgi_prominfo_entry);
275 entp++;
276 }
277 remove_proc_entry("sgi_prominfo", NULL);
278 kfree(proc_entries);
279}
diff --git a/arch/ia64/sn/kernel/sn2/ptc_deadlock.S b/arch/ia64/sn/kernel/sn2/ptc_deadlock.S
new file mode 100644
index 000000000000..7947312801ec
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/ptc_deadlock.S
@@ -0,0 +1,82 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <asm/sn/shub_mmr.h>
10
11#define DEADLOCKBIT SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT
12#define WRITECOUNTMASK SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK
13#define ALIAS_OFFSET (SH1_PIO_WRITE_STATUS_0_ALIAS-SH1_PIO_WRITE_STATUS_0)
14
15
16 .global sn2_ptc_deadlock_recovery_core
17 .proc sn2_ptc_deadlock_recovery_core
18
19sn2_ptc_deadlock_recovery_core:
20 .regstk 6,0,0,0
21
22 ptc0 = in0
23 data0 = in1
24 ptc1 = in2
25 data1 = in3
26 piowc = in4
27 zeroval = in5
28 piowcphy = r30
29 psrsave = r2
30 scr1 = r16
31 scr2 = r17
32 mask = r18
33
34
35 extr.u piowcphy=piowc,0,61;; // Convert piowc to uncached physical address
36 dep piowcphy=-1,piowcphy,63,1
37 movl mask=WRITECOUNTMASK
38
391:
40 add scr2=ALIAS_OFFSET,piowc // Address of WRITE_STATUS alias register
41 mov scr1=7;; // Clear DEADLOCK, WRITE_ERROR, MULTI_WRITE_ERROR
42 st8.rel [scr2]=scr1;;
43
445: ld8.acq scr1=[piowc];; // Wait for PIOs to complete.
45 and scr2=scr1,mask;; // mask of writecount bits
46 cmp.ne p6,p0=zeroval,scr2
47(p6) br.cond.sptk 5b
48
49
50
51 ////////////// BEGIN PHYSICAL MODE ////////////////////
52 mov psrsave=psr // Disable IC (no PMIs)
53 rsm psr.i | psr.dt | psr.ic;;
54 srlz.i;;
55
56 st8.rel [ptc0]=data0 // Write PTC0 & wait for completion.
57
585: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete.
59 and scr2=scr1,mask;; // mask of writecount bits
60 cmp.ne p6,p0=zeroval,scr2
61(p6) br.cond.sptk 5b;;
62
63 tbit.nz p8,p7=scr1,DEADLOCKBIT;;// Test for DEADLOCK
64(p7) cmp.ne p7,p0=r0,ptc1;; // Test for non-null ptc1
65
66(p7) st8.rel [ptc1]=data1;; // Now write PTC1.
67
685: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete.
69 and scr2=scr1,mask;; // mask of writecount bits
70 cmp.ne p6,p0=zeroval,scr2
71(p6) br.cond.sptk 5b
72
73 tbit.nz p8,p0=scr1,DEADLOCKBIT;;// Test for DEADLOCK
74
75 mov psr.l=psrsave;; // Reenable IC
76 srlz.i;;
77 ////////////// END PHYSICAL MODE ////////////////////
78
79(p8) br.cond.spnt 1b;; // Repeat if DEADLOCK occurred.
80
81 br.ret.sptk rp
82 .endp sn2_ptc_deadlock_recovery_core
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
new file mode 100644
index 000000000000..7af05a7ac743
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -0,0 +1,295 @@
1/*
2 * SN2 Platform specific SMP Support
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
9 */
10
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/spinlock.h>
14#include <linux/threads.h>
15#include <linux/sched.h>
16#include <linux/smp.h>
17#include <linux/interrupt.h>
18#include <linux/irq.h>
19#include <linux/mmzone.h>
20#include <linux/module.h>
21#include <linux/bitops.h>
22#include <linux/nodemask.h>
23
24#include <asm/processor.h>
25#include <asm/irq.h>
26#include <asm/sal.h>
27#include <asm/system.h>
28#include <asm/delay.h>
29#include <asm/io.h>
30#include <asm/smp.h>
31#include <asm/tlb.h>
32#include <asm/numa.h>
33#include <asm/hw_irq.h>
34#include <asm/current.h>
35#include <asm/sn/sn_cpuid.h>
36#include <asm/sn/sn_sal.h>
37#include <asm/sn/addrs.h>
38#include <asm/sn/shub_mmr.h>
39#include <asm/sn/nodepda.h>
40#include <asm/sn/rw_mmr.h>
41
42void sn2_ptc_deadlock_recovery(volatile unsigned long *, unsigned long data0,
43 volatile unsigned long *, unsigned long data1);
44
45static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock);
46
47static unsigned long sn2_ptc_deadlock_count;
48
49static inline unsigned long wait_piowc(void)
50{
51 volatile unsigned long *piows, zeroval;
52 unsigned long ws;
53
54 piows = pda->pio_write_status_addr;
55 zeroval = pda->pio_write_status_val;
56 do {
57 cpu_relax();
58 } while (((ws = *piows) & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != zeroval);
59 return ws;
60}
61
62void sn_tlb_migrate_finish(struct mm_struct *mm)
63{
64 if (mm == current->mm)
65 flush_tlb_mm(mm);
66}
67
68/**
69 * sn2_global_tlb_purge - globally purge translation cache of virtual address range
70 * @start: start of virtual address range
71 * @end: end of virtual address range
72 * @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits & 0xfc))
73 *
74 * Purges the translation caches of all processors of the given virtual address
75 * range.
76 *
77 * Note:
78 * - cpu_vm_mask is a bit mask that indicates which cpus have loaded the context.
79 * - cpu_vm_mask is converted into a nodemask of the nodes containing the
80 * cpus in cpu_vm_mask.
81 * - if only one bit is set in cpu_vm_mask & it is the current cpu,
82 * then only the local TLB needs to be flushed. This flushing can be done
83 * using ptc.l. This is the common case & avoids the global spinlock.
84 * - if multiple cpus have loaded the context, then flushing has to be
85 * done with ptc.g/MMRs under protection of the global ptc_lock.
86 */
87
88void
89sn2_global_tlb_purge(unsigned long start, unsigned long end,
90 unsigned long nbits)
91{
92 int i, shub1, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0;
93 volatile unsigned long *ptc0, *ptc1;
94 unsigned long flags = 0, data0 = 0, data1 = 0;
95 struct mm_struct *mm = current->active_mm;
96 short nasids[MAX_NUMNODES], nix;
97 nodemask_t nodes_flushed;
98
99 nodes_clear(nodes_flushed);
100 i = 0;
101
102 for_each_cpu_mask(cpu, mm->cpu_vm_mask) {
103 cnode = cpu_to_node(cpu);
104 node_set(cnode, nodes_flushed);
105 lcpu = cpu;
106 i++;
107 }
108
109 preempt_disable();
110
111 if (likely(i == 1 && lcpu == smp_processor_id())) {
112 do {
113 ia64_ptcl(start, nbits << 2);
114 start += (1UL << nbits);
115 } while (start < end);
116 ia64_srlz_i();
117 preempt_enable();
118 return;
119 }
120
121 if (atomic_read(&mm->mm_users) == 1) {
122 flush_tlb_mm(mm);
123 preempt_enable();
124 return;
125 }
126
127 nix = 0;
128 for_each_node_mask(cnode, nodes_flushed)
129 nasids[nix++] = cnodeid_to_nasid(cnode);
130
131 shub1 = is_shub1();
132 if (shub1) {
133 data0 = (1UL << SH1_PTC_0_A_SHFT) |
134 (nbits << SH1_PTC_0_PS_SHFT) |
135 ((ia64_get_rr(start) >> 8) << SH1_PTC_0_RID_SHFT) |
136 (1UL << SH1_PTC_0_START_SHFT);
137 ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0);
138 ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1);
139 } else {
140 data0 = (1UL << SH2_PTC_A_SHFT) |
141 (nbits << SH2_PTC_PS_SHFT) |
142 (1UL << SH2_PTC_START_SHFT);
143 ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC +
144 ((ia64_get_rr(start) >> 8) << SH2_PTC_RID_SHFT) );
145 ptc1 = NULL;
146 }
147
148
149 mynasid = get_nasid();
150
151 spin_lock_irqsave(&sn2_global_ptc_lock, flags);
152
153 do {
154 if (shub1)
155 data1 = start | (1UL << SH1_PTC_1_START_SHFT);
156 else
157 data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK);
158 for (i = 0; i < nix; i++) {
159 nasid = nasids[i];
160 if (unlikely(nasid == mynasid)) {
161 ia64_ptcga(start, nbits << 2);
162 ia64_srlz_i();
163 } else {
164 ptc0 = CHANGE_NASID(nasid, ptc0);
165 if (ptc1)
166 ptc1 = CHANGE_NASID(nasid, ptc1);
167 pio_atomic_phys_write_mmrs(ptc0, data0, ptc1,
168 data1);
169 flushed = 1;
170 }
171 }
172
173 if (flushed
174 && (wait_piowc() &
175 SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK)) {
176 sn2_ptc_deadlock_recovery(ptc0, data0, ptc1, data1);
177 }
178
179 start += (1UL << nbits);
180
181 } while (start < end);
182
183 spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
184
185 preempt_enable();
186}
187
188/*
189 * sn2_ptc_deadlock_recovery
190 *
191 * Recover from PTC deadlocks conditions. Recovery requires stepping thru each
192 * TLB flush transaction. The recovery sequence is somewhat tricky & is
193 * coded in assembly language.
194 */
195void sn2_ptc_deadlock_recovery(volatile unsigned long *ptc0, unsigned long data0,
196 volatile unsigned long *ptc1, unsigned long data1)
197{
198 extern void sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
199 volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long);
200 int cnode, mycnode, nasid;
201 volatile unsigned long *piows;
202 volatile unsigned long zeroval;
203
204 sn2_ptc_deadlock_count++;
205
206 piows = pda->pio_write_status_addr;
207 zeroval = pda->pio_write_status_val;
208
209 mycnode = numa_node_id();
210
211 for_each_online_node(cnode) {
212 if (is_headless_node(cnode) || cnode == mycnode)
213 continue;
214 nasid = cnodeid_to_nasid(cnode);
215 ptc0 = CHANGE_NASID(nasid, ptc0);
216 if (ptc1)
217 ptc1 = CHANGE_NASID(nasid, ptc1);
218 sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval);
219 }
220}
221
222/**
223 * sn_send_IPI_phys - send an IPI to a Nasid and slice
224 * @nasid: nasid to receive the interrupt (may be outside partition)
225 * @physid: physical cpuid to receive the interrupt.
226 * @vector: command to send
227 * @delivery_mode: delivery mechanism
228 *
229 * Sends an IPI (interprocessor interrupt) to the processor specified by
230 * @physid
231 *
232 * @delivery_mode can be one of the following
233 *
234 * %IA64_IPI_DM_INT - pend an interrupt
235 * %IA64_IPI_DM_PMI - pend a PMI
236 * %IA64_IPI_DM_NMI - pend an NMI
237 * %IA64_IPI_DM_INIT - pend an INIT interrupt
238 */
239void sn_send_IPI_phys(int nasid, long physid, int vector, int delivery_mode)
240{
241 long val;
242 unsigned long flags = 0;
243 volatile long *p;
244
245 p = (long *)GLOBAL_MMR_PHYS_ADDR(nasid, SH_IPI_INT);
246 val = (1UL << SH_IPI_INT_SEND_SHFT) |
247 (physid << SH_IPI_INT_PID_SHFT) |
248 ((long)delivery_mode << SH_IPI_INT_TYPE_SHFT) |
249 ((long)vector << SH_IPI_INT_IDX_SHFT) |
250 (0x000feeUL << SH_IPI_INT_BASE_SHFT);
251
252 mb();
253 if (enable_shub_wars_1_1()) {
254 spin_lock_irqsave(&sn2_global_ptc_lock, flags);
255 }
256 pio_phys_write_mmr(p, val);
257 if (enable_shub_wars_1_1()) {
258 wait_piowc();
259 spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
260 }
261
262}
263
264EXPORT_SYMBOL(sn_send_IPI_phys);
265
266/**
267 * sn2_send_IPI - send an IPI to a processor
268 * @cpuid: target of the IPI
269 * @vector: command to send
270 * @delivery_mode: delivery mechanism
271 * @redirect: redirect the IPI?
272 *
273 * Sends an IPI (InterProcessor Interrupt) to the processor specified by
274 * @cpuid. @vector specifies the command to send, while @delivery_mode can
275 * be one of the following
276 *
277 * %IA64_IPI_DM_INT - pend an interrupt
278 * %IA64_IPI_DM_PMI - pend a PMI
279 * %IA64_IPI_DM_NMI - pend an NMI
280 * %IA64_IPI_DM_INIT - pend an INIT interrupt
281 */
282void sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect)
283{
284 long physid;
285 int nasid;
286
287 physid = cpu_physical_id(cpuid);
288 nasid = cpuid_to_nasid(cpuid);
289
290 /* the following is used only when starting cpus at boot time */
291 if (unlikely(nasid == -1))
292 ia64_sn_get_sapic_info(physid, &nasid, NULL, NULL);
293
294 sn_send_IPI_phys(nasid, physid, vector, delivery_mode);
295}
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
new file mode 100644
index 000000000000..197356460ee1
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -0,0 +1,690 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004-2005 Silicon Graphics, Inc. All rights reserved.
7 *
8 * SGI Altix topology and hardware performance monitoring API.
9 * Mark Goodwin <markgw@sgi.com>.
10 *
11 * Creates /proc/sgi_sn/sn_topology (read-only) to export
12 * info about Altix nodes, routers, CPUs and NumaLink
13 * interconnection/topology.
14 *
15 * Also creates a dynamic misc device named "sn_hwperf"
16 * that supports an ioctl interface to call down into SAL
17 * to discover hw objects, topology and to read/write
18 * memory mapped registers, e.g. for performance monitoring.
19 * The "sn_hwperf" device is registered only after the procfs
20 * file is first opened, i.e. only if/when it's needed.
21 *
22 * This API is used by SGI Performance Co-Pilot and other
23 * tools, see http://oss.sgi.com/projects/pcp
24 */
25
26#include <linux/fs.h>
27#include <linux/slab.h>
28#include <linux/vmalloc.h>
29#include <linux/seq_file.h>
30#include <linux/miscdevice.h>
31#include <linux/cpumask.h>
32#include <linux/smp_lock.h>
33#include <linux/nodemask.h>
34#include <asm/processor.h>
35#include <asm/topology.h>
36#include <asm/smp.h>
37#include <asm/semaphore.h>
38#include <asm/segment.h>
39#include <asm/uaccess.h>
40#include <asm/sal.h>
41#include <asm/sn/io.h>
42#include <asm/sn/sn_sal.h>
43#include <asm/sn/module.h>
44#include <asm/sn/geo.h>
45#include <asm/sn/sn2/sn_hwperf.h>
46
47static void *sn_hwperf_salheap = NULL;
48static int sn_hwperf_obj_cnt = 0;
49static nasid_t sn_hwperf_master_nasid = INVALID_NASID;
50static int sn_hwperf_init(void);
51static DECLARE_MUTEX(sn_hwperf_init_mutex);
52
53static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret)
54{
55 int e;
56 u64 sz;
57 struct sn_hwperf_object_info *objbuf = NULL;
58
59 if ((e = sn_hwperf_init()) < 0) {
60 printk("sn_hwperf_init failed: err %d\n", e);
61 goto out;
62 }
63
64 sz = sn_hwperf_obj_cnt * sizeof(struct sn_hwperf_object_info);
65 if ((objbuf = (struct sn_hwperf_object_info *) vmalloc(sz)) == NULL) {
66 printk("sn_hwperf_enum_objects: vmalloc(%d) failed\n", (int)sz);
67 e = -ENOMEM;
68 goto out;
69 }
70
71 e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, SN_HWPERF_ENUM_OBJECTS,
72 0, sz, (u64) objbuf, 0, 0, NULL);
73 if (e != SN_HWPERF_OP_OK) {
74 e = -EINVAL;
75 vfree(objbuf);
76 }
77
78out:
79 *nobj = sn_hwperf_obj_cnt;
80 *ret = objbuf;
81 return e;
82}
83
84static int sn_hwperf_geoid_to_cnode(char *location)
85{
86 int cnode;
87 geoid_t geoid;
88 moduleid_t module_id;
89 char type;
90 int rack, slot, slab;
91 int this_rack, this_slot, this_slab;
92
93 if (sscanf(location, "%03d%c%02d#%d", &rack, &type, &slot, &slab) != 4)
94 return -1;
95
96 for (cnode = 0; cnode < numionodes; cnode++) {
97 geoid = cnodeid_get_geoid(cnode);
98 module_id = geo_module(geoid);
99 this_rack = MODULE_GET_RACK(module_id);
100 this_slot = MODULE_GET_BPOS(module_id);
101 this_slab = geo_slab(geoid);
102 if (rack == this_rack && slot == this_slot && slab == this_slab)
103 break;
104 }
105
106 return cnode < numionodes ? cnode : -1;
107}
108
109static int sn_hwperf_obj_to_cnode(struct sn_hwperf_object_info * obj)
110{
111 if (!obj->sn_hwp_this_part)
112 return -1;
113 return sn_hwperf_geoid_to_cnode(obj->location);
114}
115
116static int sn_hwperf_generic_ordinal(struct sn_hwperf_object_info *obj,
117 struct sn_hwperf_object_info *objs)
118{
119 int ordinal;
120 struct sn_hwperf_object_info *p;
121
122 for (ordinal=0, p=objs; p != obj; p++) {
123 if (SN_HWPERF_FOREIGN(p))
124 continue;
125 if (SN_HWPERF_SAME_OBJTYPE(p, obj))
126 ordinal++;
127 }
128
129 return ordinal;
130}
131
132static const char *slabname_node = "node"; /* SHub asic */
133static const char *slabname_ionode = "ionode"; /* TIO asic */
134static const char *slabname_router = "router"; /* NL3R or NL4R */
135static const char *slabname_other = "other"; /* unknown asic */
136
137static const char *sn_hwperf_get_slabname(struct sn_hwperf_object_info *obj,
138 struct sn_hwperf_object_info *objs, int *ordinal)
139{
140 int isnode;
141 const char *slabname = slabname_other;
142
143 if ((isnode = SN_HWPERF_IS_NODE(obj)) || SN_HWPERF_IS_IONODE(obj)) {
144 slabname = isnode ? slabname_node : slabname_ionode;
145 *ordinal = sn_hwperf_obj_to_cnode(obj);
146 }
147 else {
148 *ordinal = sn_hwperf_generic_ordinal(obj, objs);
149 if (SN_HWPERF_IS_ROUTER(obj))
150 slabname = slabname_router;
151 }
152
153 return slabname;
154}
155
156static int sn_topology_show(struct seq_file *s, void *d)
157{
158 int sz;
159 int pt;
160 int e;
161 int i;
162 int j;
163 const char *slabname;
164 int ordinal;
165 cpumask_t cpumask;
166 char slice;
167 struct cpuinfo_ia64 *c;
168 struct sn_hwperf_port_info *ptdata;
169 struct sn_hwperf_object_info *p;
170 struct sn_hwperf_object_info *obj = d; /* this object */
171 struct sn_hwperf_object_info *objs = s->private; /* all objects */
172
173 if (obj == objs) {
174 seq_printf(s, "# sn_topology version 1\n");
175 seq_printf(s, "# objtype ordinal location partition"
176 " [attribute value [, ...]]\n");
177 }
178
179 if (SN_HWPERF_FOREIGN(obj)) {
180 /* private in another partition: not interesting */
181 return 0;
182 }
183
184 for (i = 0; obj->name[i]; i++) {
185 if (obj->name[i] == ' ')
186 obj->name[i] = '_';
187 }
188
189 slabname = sn_hwperf_get_slabname(obj, objs, &ordinal);
190 seq_printf(s, "%s %d %s %s asic %s", slabname, ordinal, obj->location,
191 obj->sn_hwp_this_part ? "local" : "shared", obj->name);
192
193 if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj))
194 seq_putc(s, '\n');
195 else {
196 seq_printf(s, ", nasid 0x%x", cnodeid_to_nasid(ordinal));
197 for (i=0; i < numionodes; i++) {
198 seq_printf(s, i ? ":%d" : ", dist %d",
199 node_distance(ordinal, i));
200 }
201 seq_putc(s, '\n');
202
203 /*
204 * CPUs on this node, if any
205 */
206 cpumask = node_to_cpumask(ordinal);
207 for_each_online_cpu(i) {
208 if (cpu_isset(i, cpumask)) {
209 slice = 'a' + cpuid_to_slice(i);
210 c = cpu_data(i);
211 seq_printf(s, "cpu %d %s%c local"
212 " freq %luMHz, arch ia64",
213 i, obj->location, slice,
214 c->proc_freq / 1000000);
215 for_each_online_cpu(j) {
216 seq_printf(s, j ? ":%d" : ", dist %d",
217 node_distance(
218 cpuid_to_cnodeid(i),
219 cpuid_to_cnodeid(j)));
220 }
221 seq_putc(s, '\n');
222 }
223 }
224 }
225
226 if (obj->ports) {
227 /*
228 * numalink ports
229 */
230 sz = obj->ports * sizeof(struct sn_hwperf_port_info);
231 if ((ptdata = vmalloc(sz)) == NULL)
232 return -ENOMEM;
233 e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
234 SN_HWPERF_ENUM_PORTS, obj->id, sz,
235 (u64) ptdata, 0, 0, NULL);
236 if (e != SN_HWPERF_OP_OK)
237 return -EINVAL;
238 for (ordinal=0, p=objs; p != obj; p++) {
239 if (!SN_HWPERF_FOREIGN(p))
240 ordinal += p->ports;
241 }
242 for (pt = 0; pt < obj->ports; pt++) {
243 for (p = objs, i = 0; i < sn_hwperf_obj_cnt; i++, p++) {
244 if (ptdata[pt].conn_id == p->id) {
245 break;
246 }
247 }
248 seq_printf(s, "numalink %d %s-%d",
249 ordinal+pt, obj->location, ptdata[pt].port);
250
251 if (i >= sn_hwperf_obj_cnt) {
252 /* no connection */
253 seq_puts(s, " local endpoint disconnected"
254 ", protocol unknown\n");
255 continue;
256 }
257
258 if (obj->sn_hwp_this_part && p->sn_hwp_this_part)
259 /* both ends local to this partition */
260 seq_puts(s, " local");
261 else if (!obj->sn_hwp_this_part && !p->sn_hwp_this_part)
262 /* both ends of the link in foreign partiton */
263 seq_puts(s, " foreign");
264 else
265 /* link straddles a partition */
266 seq_puts(s, " shared");
267
268 /*
269 * Unlikely, but strictly should query the LLP config
270 * registers because an NL4R can be configured to run
271 * NL3 protocol, even when not talking to an NL3 router.
272 * Ditto for node-node.
273 */
274 seq_printf(s, " endpoint %s-%d, protocol %s\n",
275 p->location, ptdata[pt].conn_port,
276 (SN_HWPERF_IS_NL3ROUTER(obj) ||
277 SN_HWPERF_IS_NL3ROUTER(p)) ? "LLP3" : "LLP4");
278 }
279 vfree(ptdata);
280 }
281
282 return 0;
283}
284
285static void *sn_topology_start(struct seq_file *s, loff_t * pos)
286{
287 struct sn_hwperf_object_info *objs = s->private;
288
289 if (*pos < sn_hwperf_obj_cnt)
290 return (void *)(objs + *pos);
291
292 return NULL;
293}
294
295static void *sn_topology_next(struct seq_file *s, void *v, loff_t * pos)
296{
297 ++*pos;
298 return sn_topology_start(s, pos);
299}
300
301static void sn_topology_stop(struct seq_file *m, void *v)
302{
303 return;
304}
305
306/*
307 * /proc/sgi_sn/sn_topology, read-only using seq_file
308 */
309static struct seq_operations sn_topology_seq_ops = {
310 .start = sn_topology_start,
311 .next = sn_topology_next,
312 .stop = sn_topology_stop,
313 .show = sn_topology_show
314};
315
316struct sn_hwperf_op_info {
317 u64 op;
318 struct sn_hwperf_ioctl_args *a;
319 void *p;
320 int *v0;
321 int ret;
322};
323
324static void sn_hwperf_call_sal(void *info)
325{
326 struct sn_hwperf_op_info *op_info = info;
327 int r;
328
329 r = ia64_sn_hwperf_op(sn_hwperf_master_nasid, op_info->op,
330 op_info->a->arg, op_info->a->sz,
331 (u64) op_info->p, 0, 0, op_info->v0);
332 op_info->ret = r;
333}
334
335static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
336{
337 u32 cpu;
338 u32 use_ipi;
339 int r = 0;
340 cpumask_t save_allowed;
341
342 cpu = (op_info->a->arg & SN_HWPERF_ARG_CPU_MASK) >> 32;
343 use_ipi = op_info->a->arg & SN_HWPERF_ARG_USE_IPI_MASK;
344 op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK;
345
346 if (cpu != SN_HWPERF_ARG_ANY_CPU) {
347 if (cpu >= num_online_cpus() || !cpu_online(cpu)) {
348 r = -EINVAL;
349 goto out;
350 }
351 }
352
353 if (cpu == SN_HWPERF_ARG_ANY_CPU || cpu == get_cpu()) {
354 /* don't care, or already on correct cpu */
355 sn_hwperf_call_sal(op_info);
356 }
357 else {
358 if (use_ipi) {
359 /* use an interprocessor interrupt to call SAL */
360 smp_call_function_single(cpu, sn_hwperf_call_sal,
361 op_info, 1, 1);
362 }
363 else {
364 /* migrate the task before calling SAL */
365 save_allowed = current->cpus_allowed;
366 set_cpus_allowed(current, cpumask_of_cpu(cpu));
367 sn_hwperf_call_sal(op_info);
368 set_cpus_allowed(current, save_allowed);
369 }
370 }
371 r = op_info->ret;
372
373out:
374 return r;
375}
376
377/* map SAL hwperf error code to system error code */
378static int sn_hwperf_map_err(int hwperf_err)
379{
380 int e;
381
382 switch(hwperf_err) {
383 case SN_HWPERF_OP_OK:
384 e = 0;
385 break;
386
387 case SN_HWPERF_OP_NOMEM:
388 e = -ENOMEM;
389 break;
390
391 case SN_HWPERF_OP_NO_PERM:
392 e = -EPERM;
393 break;
394
395 case SN_HWPERF_OP_IO_ERROR:
396 e = -EIO;
397 break;
398
399 case SN_HWPERF_OP_BUSY:
400 case SN_HWPERF_OP_RECONFIGURE:
401 e = -EAGAIN;
402 break;
403
404 case SN_HWPERF_OP_INVAL:
405 default:
406 e = -EINVAL;
407 break;
408 }
409
410 return e;
411}
412
413/*
414 * ioctl for "sn_hwperf" misc device
415 */
416static int
417sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
418{
419 struct sn_hwperf_ioctl_args a;
420 struct cpuinfo_ia64 *cdata;
421 struct sn_hwperf_object_info *objs;
422 struct sn_hwperf_object_info *cpuobj;
423 struct sn_hwperf_op_info op_info;
424 void *p = NULL;
425 int nobj;
426 char slice;
427 int node;
428 int r;
429 int v0;
430 int i;
431 int j;
432
433 unlock_kernel();
434
435 /* only user requests are allowed here */
436 if ((op & SN_HWPERF_OP_MASK) < 10) {
437 r = -EINVAL;
438 goto error;
439 }
440 r = copy_from_user(&a, (const void __user *)arg,
441 sizeof(struct sn_hwperf_ioctl_args));
442 if (r != 0) {
443 r = -EFAULT;
444 goto error;
445 }
446
447 /*
448 * Allocate memory to hold a kernel copy of the user buffer. The
449 * buffer contents are either copied in or out (or both) of user
450 * space depending on the flags encoded in the requested operation.
451 */
452 if (a.ptr) {
453 p = vmalloc(a.sz);
454 if (!p) {
455 r = -ENOMEM;
456 goto error;
457 }
458 }
459
460 if (op & SN_HWPERF_OP_MEM_COPYIN) {
461 r = copy_from_user(p, (const void __user *)a.ptr, a.sz);
462 if (r != 0) {
463 r = -EFAULT;
464 goto error;
465 }
466 }
467
468 switch (op) {
469 case SN_HWPERF_GET_CPU_INFO:
470 if (a.sz == sizeof(u64)) {
471 /* special case to get size needed */
472 *(u64 *) p = (u64) num_online_cpus() *
473 sizeof(struct sn_hwperf_object_info);
474 } else
475 if (a.sz < num_online_cpus() * sizeof(struct sn_hwperf_object_info)) {
476 r = -ENOMEM;
477 goto error;
478 } else
479 if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
480 memset(p, 0, a.sz);
481 for (i = 0; i < nobj; i++) {
482 node = sn_hwperf_obj_to_cnode(objs + i);
483 for_each_online_cpu(j) {
484 if (node != cpu_to_node(j))
485 continue;
486 cpuobj = (struct sn_hwperf_object_info *) p + j;
487 slice = 'a' + cpuid_to_slice(j);
488 cdata = cpu_data(j);
489 cpuobj->id = j;
490 snprintf(cpuobj->name,
491 sizeof(cpuobj->name),
492 "CPU %luMHz %s",
493 cdata->proc_freq / 1000000,
494 cdata->vendor);
495 snprintf(cpuobj->location,
496 sizeof(cpuobj->location),
497 "%s%c", objs[i].location,
498 slice);
499 }
500 }
501
502 vfree(objs);
503 }
504 break;
505
506 case SN_HWPERF_GET_NODE_NASID:
507 if (a.sz != sizeof(u64) ||
508 (node = a.arg) < 0 || node >= numionodes) {
509 r = -EINVAL;
510 goto error;
511 }
512 *(u64 *)p = (u64)cnodeid_to_nasid(node);
513 break;
514
515 case SN_HWPERF_GET_OBJ_NODE:
516 if (a.sz != sizeof(u64) || a.arg < 0) {
517 r = -EINVAL;
518 goto error;
519 }
520 if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
521 if (a.arg >= nobj) {
522 r = -EINVAL;
523 vfree(objs);
524 goto error;
525 }
526 if (objs[(i = a.arg)].id != a.arg) {
527 for (i = 0; i < nobj; i++) {
528 if (objs[i].id == a.arg)
529 break;
530 }
531 }
532 if (i == nobj) {
533 r = -EINVAL;
534 vfree(objs);
535 goto error;
536 }
537 *(u64 *)p = (u64)sn_hwperf_obj_to_cnode(objs + i);
538 vfree(objs);
539 }
540 break;
541
542 case SN_HWPERF_GET_MMRS:
543 case SN_HWPERF_SET_MMRS:
544 case SN_HWPERF_OBJECT_DISTANCE:
545 op_info.p = p;
546 op_info.a = &a;
547 op_info.v0 = &v0;
548 op_info.op = op;
549 r = sn_hwperf_op_cpu(&op_info);
550 if (r) {
551 r = sn_hwperf_map_err(r);
552 goto error;
553 }
554 break;
555
556 default:
557 /* all other ops are a direct SAL call */
558 r = ia64_sn_hwperf_op(sn_hwperf_master_nasid, op,
559 a.arg, a.sz, (u64) p, 0, 0, &v0);
560 if (r) {
561 r = sn_hwperf_map_err(r);
562 goto error;
563 }
564 a.v0 = v0;
565 break;
566 }
567
568 if (op & SN_HWPERF_OP_MEM_COPYOUT) {
569 r = copy_to_user((void __user *)a.ptr, p, a.sz);
570 if (r != 0) {
571 r = -EFAULT;
572 goto error;
573 }
574 }
575
576error:
577 vfree(p);
578
579 lock_kernel();
580 return r;
581}
582
583static struct file_operations sn_hwperf_fops = {
584 .ioctl = sn_hwperf_ioctl,
585};
586
587static struct miscdevice sn_hwperf_dev = {
588 MISC_DYNAMIC_MINOR,
589 "sn_hwperf",
590 &sn_hwperf_fops
591};
592
593static int sn_hwperf_init(void)
594{
595 u64 v;
596 int salr;
597 int e = 0;
598
599 /* single threaded, once-only initialization */
600 down(&sn_hwperf_init_mutex);
601 if (sn_hwperf_salheap) {
602 up(&sn_hwperf_init_mutex);
603 return e;
604 }
605
606 /*
607 * The PROM code needs a fixed reference node. For convenience the
608 * same node as the console I/O is used.
609 */
610 sn_hwperf_master_nasid = (nasid_t) ia64_sn_get_console_nasid();
611
612 /*
613 * Request the needed size and install the PROM scratch area.
614 * The PROM keeps various tracking bits in this memory area.
615 */
616 salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
617 (u64) SN_HWPERF_GET_HEAPSIZE, 0,
618 (u64) sizeof(u64), (u64) &v, 0, 0, NULL);
619 if (salr != SN_HWPERF_OP_OK) {
620 e = -EINVAL;
621 goto out;
622 }
623
624 if ((sn_hwperf_salheap = vmalloc(v)) == NULL) {
625 e = -ENOMEM;
626 goto out;
627 }
628 salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
629 SN_HWPERF_INSTALL_HEAP, 0, v,
630 (u64) sn_hwperf_salheap, 0, 0, NULL);
631 if (salr != SN_HWPERF_OP_OK) {
632 e = -EINVAL;
633 goto out;
634 }
635
636 salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
637 SN_HWPERF_OBJECT_COUNT, 0,
638 sizeof(u64), (u64) &v, 0, 0, NULL);
639 if (salr != SN_HWPERF_OP_OK) {
640 e = -EINVAL;
641 goto out;
642 }
643 sn_hwperf_obj_cnt = (int)v;
644
645out:
646 if (e < 0 && sn_hwperf_salheap) {
647 vfree(sn_hwperf_salheap);
648 sn_hwperf_salheap = NULL;
649 sn_hwperf_obj_cnt = 0;
650 }
651
652 if (!e) {
653 /*
654 * Register a dynamic misc device for ioctl. Platforms
655 * supporting hotplug will create /dev/sn_hwperf, else
656 * user can to look up the minor number in /proc/misc.
657 */
658 if ((e = misc_register(&sn_hwperf_dev)) != 0) {
659 printk(KERN_ERR "sn_hwperf_init: misc register "
660 "for \"sn_hwperf\" failed, err %d\n", e);
661 }
662 }
663
664 up(&sn_hwperf_init_mutex);
665 return e;
666}
667
668int sn_topology_open(struct inode *inode, struct file *file)
669{
670 int e;
671 struct seq_file *seq;
672 struct sn_hwperf_object_info *objbuf;
673 int nobj;
674
675 if ((e = sn_hwperf_enum_objects(&nobj, &objbuf)) == 0) {
676 e = seq_open(file, &sn_topology_seq_ops);
677 seq = file->private_data;
678 seq->private = objbuf;
679 }
680
681 return e;
682}
683
684int sn_topology_release(struct inode *inode, struct file *file)
685{
686 struct seq_file *seq = file->private_data;
687
688 vfree(seq->private);
689 return seq_release(inode, file);
690}
diff --git a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
new file mode 100644
index 000000000000..6a80fca807b9
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
@@ -0,0 +1,149 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8#include <linux/config.h>
9#include <asm/uaccess.h>
10
11#ifdef CONFIG_PROC_FS
12#include <linux/proc_fs.h>
13#include <linux/seq_file.h>
14#include <asm/sn/sn_sal.h>
15
16static int partition_id_show(struct seq_file *s, void *p)
17{
18 seq_printf(s, "%d\n", sn_local_partid());
19 return 0;
20}
21
22static int partition_id_open(struct inode *inode, struct file *file)
23{
24 return single_open(file, partition_id_show, NULL);
25}
26
27static int system_serial_number_show(struct seq_file *s, void *p)
28{
29 seq_printf(s, "%s\n", sn_system_serial_number());
30 return 0;
31}
32
33static int system_serial_number_open(struct inode *inode, struct file *file)
34{
35 return single_open(file, system_serial_number_show, NULL);
36}
37
38static int licenseID_show(struct seq_file *s, void *p)
39{
40 seq_printf(s, "0x%lx\n", sn_partition_serial_number_val());
41 return 0;
42}
43
44static int licenseID_open(struct inode *inode, struct file *file)
45{
46 return single_open(file, licenseID_show, NULL);
47}
48
49/*
50 * Enable forced interrupt by default.
51 * When set, the sn interrupt handler writes the force interrupt register on
52 * the bridge chip. The hardware will then send an interrupt message if the
53 * interrupt line is active. This mimics a level sensitive interrupt.
54 */
55int sn_force_interrupt_flag = 1;
56
57static int sn_force_interrupt_show(struct seq_file *s, void *p)
58{
59 seq_printf(s, "Force interrupt is %s\n",
60 sn_force_interrupt_flag ? "enabled" : "disabled");
61 return 0;
62}
63
64static ssize_t sn_force_interrupt_write_proc(struct file *file,
65 const char __user *buffer, size_t count, loff_t *data)
66{
67 char val;
68
69 if (copy_from_user(&val, buffer, 1))
70 return -EFAULT;
71
72 sn_force_interrupt_flag = (val == '0') ? 0 : 1;
73 return count;
74}
75
76static int sn_force_interrupt_open(struct inode *inode, struct file *file)
77{
78 return single_open(file, sn_force_interrupt_show, NULL);
79}
80
81static int coherence_id_show(struct seq_file *s, void *p)
82{
83 seq_printf(s, "%d\n", partition_coherence_id());
84
85 return 0;
86}
87
88static int coherence_id_open(struct inode *inode, struct file *file)
89{
90 return single_open(file, coherence_id_show, NULL);
91}
92
93static struct proc_dir_entry *sn_procfs_create_entry(
94 const char *name, struct proc_dir_entry *parent,
95 int (*openfunc)(struct inode *, struct file *),
96 int (*releasefunc)(struct inode *, struct file *))
97{
98 struct proc_dir_entry *e = create_proc_entry(name, 0444, parent);
99
100 if (e) {
101 e->proc_fops = (struct file_operations *)kmalloc(
102 sizeof(struct file_operations), GFP_KERNEL);
103 if (e->proc_fops) {
104 memset(e->proc_fops, 0, sizeof(struct file_operations));
105 e->proc_fops->open = openfunc;
106 e->proc_fops->read = seq_read;
107 e->proc_fops->llseek = seq_lseek;
108 e->proc_fops->release = releasefunc;
109 }
110 }
111
112 return e;
113}
114
115/* /proc/sgi_sn/sn_topology uses seq_file, see sn_hwperf.c */
116extern int sn_topology_open(struct inode *, struct file *);
117extern int sn_topology_release(struct inode *, struct file *);
118
119void register_sn_procfs(void)
120{
121 static struct proc_dir_entry *sgi_proc_dir = NULL;
122 struct proc_dir_entry *e;
123
124 BUG_ON(sgi_proc_dir != NULL);
125 if (!(sgi_proc_dir = proc_mkdir("sgi_sn", NULL)))
126 return;
127
128 sn_procfs_create_entry("partition_id", sgi_proc_dir,
129 partition_id_open, single_release);
130
131 sn_procfs_create_entry("system_serial_number", sgi_proc_dir,
132 system_serial_number_open, single_release);
133
134 sn_procfs_create_entry("licenseID", sgi_proc_dir,
135 licenseID_open, single_release);
136
137 e = sn_procfs_create_entry("sn_force_interrupt", sgi_proc_dir,
138 sn_force_interrupt_open, single_release);
139 if (e)
140 e->proc_fops->write = sn_force_interrupt_write_proc;
141
142 sn_procfs_create_entry("coherence_id", sgi_proc_dir,
143 coherence_id_open, single_release);
144
145 sn_procfs_create_entry("sn_topology", sgi_proc_dir,
146 sn_topology_open, sn_topology_release);
147}
148
149#endif /* CONFIG_PROC_FS */
diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c
new file mode 100644
index 000000000000..deb9baf4d473
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/timer.c
@@ -0,0 +1,36 @@
1/*
2 * linux/arch/ia64/sn/kernel/sn2/timer.c
3 *
4 * Copyright (C) 2003 Silicon Graphics, Inc.
5 * Copyright (C) 2003 Hewlett-Packard Co
6 * David Mosberger <davidm@hpl.hp.com>: updated for new timer-interpolation infrastructure
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/time.h>
13#include <linux/interrupt.h>
14
15#include <asm/hw_irq.h>
16#include <asm/system.h>
17
18#include <asm/sn/leds.h>
19#include <asm/sn/shub_mmr.h>
20#include <asm/sn/clksupport.h>
21
22extern unsigned long sn_rtc_cycles_per_second;
23
24static struct time_interpolator sn2_interpolator = {
25 .drift = -1,
26 .shift = 10,
27 .mask = (1LL << 55) - 1,
28 .source = TIME_SOURCE_MMIO64
29};
30
31void __init sn_timer_init(void)
32{
33 sn2_interpolator.frequency = sn_rtc_cycles_per_second;
34 sn2_interpolator.addr = RTC_COUNTER_ADDR;
35 register_time_interpolator(&sn2_interpolator);
36}
diff --git a/arch/ia64/sn/kernel/sn2/timer_interrupt.c b/arch/ia64/sn/kernel/sn2/timer_interrupt.c
new file mode 100644
index 000000000000..cde7375390b0
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/timer_interrupt.c
@@ -0,0 +1,63 @@
1/*
2 *
3 *
4 * Copyright (c) 2003 Silicon Graphics, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
13 *
14 * Further, this software is distributed without any warranty that it is
15 * free of the rightful claim of any third person regarding infringement
16 * or the like. Any license provided herein, whether implied or
17 * otherwise, applies only to this software file. Patent licenses, if
18 * any, provided herein do not apply to combinations of this program with
19 * other software, or any other product whatsoever.
20 *
21 * You should have received a copy of the GNU General Public
22 * License along with this program; if not, write the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
24 *
25 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
26 * Mountain View, CA 94043, or:
27 *
28 * http://www.sgi.com
29 *
30 * For further information regarding this notice, see:
31 *
32 * http://oss.sgi.com/projects/GenInfo/NoticeExplan
33 */
34
35#include <linux/interrupt.h>
36#include <asm/sn/pda.h>
37#include <asm/sn/leds.h>
38
39extern void sn_lb_int_war_check(void);
40extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
41
42#define SN_LB_INT_WAR_INTERVAL 100
43
44void sn_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
45{
46 /* LED blinking */
47 if (!pda->hb_count--) {
48 pda->hb_count = HZ / 2;
49 set_led_bits(pda->hb_state ^=
50 LED_CPU_HEARTBEAT, LED_CPU_HEARTBEAT);
51 }
52
53 if (enable_shub_wars_1_1()) {
54 /* Bugfix code for SHUB 1.1 */
55 if (pda->pio_shub_war_cam_addr)
56 *pda->pio_shub_war_cam_addr = 0x8000000000000010UL;
57 }
58 if (pda->sn_lb_int_war_ticks == 0)
59 sn_lb_int_war_check();
60 pda->sn_lb_int_war_ticks++;
61 if (pda->sn_lb_int_war_ticks >= SN_LB_INT_WAR_INTERVAL)
62 pda->sn_lb_int_war_ticks = 0;
63}
diff --git a/arch/ia64/sn/pci/Makefile b/arch/ia64/sn/pci/Makefile
new file mode 100644
index 000000000000..b5dca0097a8e
--- /dev/null
+++ b/arch/ia64/sn/pci/Makefile
@@ -0,0 +1,10 @@
1#
2# This file is subject to the terms and conditions of the GNU General Public
3# License. See the file "COPYING" in the main directory of this archive
4# for more details.
5#
6# Copyright (C) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
7#
8# Makefile for the sn pci general routines.
9
10obj-y := pci_dma.o pcibr/
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
new file mode 100644
index 000000000000..f680824f819d
--- /dev/null
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -0,0 +1,363 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000,2002-2005 Silicon Graphics, Inc. All rights reserved.
7 *
8 * Routines for PCI DMA mapping. See Documentation/DMA-API.txt for
9 * a description of how these routines should be used.
10 */
11
12#include <linux/module.h>
13#include <asm/dma.h>
14#include <asm/sn/sn_sal.h>
15#include "pci/pcibus_provider_defs.h"
16#include "pci/pcidev.h"
17#include "pci/pcibr_provider.h"
18
19#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
20#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
21
22/**
23 * sn_dma_supported - test a DMA mask
24 * @dev: device to test
25 * @mask: DMA mask to test
26 *
27 * Return whether the given PCI device DMA address mask can be supported
28 * properly. For example, if your device can only drive the low 24-bits
29 * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
30 * this function. Of course, SN only supports devices that have 32 or more
31 * address bits when using the PMU.
32 */
33int sn_dma_supported(struct device *dev, u64 mask)
34{
35 BUG_ON(dev->bus != &pci_bus_type);
36
37 if (mask < 0x7fffffff)
38 return 0;
39 return 1;
40}
41EXPORT_SYMBOL(sn_dma_supported);
42
43/**
44 * sn_dma_set_mask - set the DMA mask
45 * @dev: device to set
46 * @dma_mask: new mask
47 *
48 * Set @dev's DMA mask if the hw supports it.
49 */
50int sn_dma_set_mask(struct device *dev, u64 dma_mask)
51{
52 BUG_ON(dev->bus != &pci_bus_type);
53
54 if (!sn_dma_supported(dev, dma_mask))
55 return 0;
56
57 *dev->dma_mask = dma_mask;
58 return 1;
59}
60EXPORT_SYMBOL(sn_dma_set_mask);
61
62/**
63 * sn_dma_alloc_coherent - allocate memory for coherent DMA
64 * @dev: device to allocate for
65 * @size: size of the region
66 * @dma_handle: DMA (bus) address
67 * @flags: memory allocation flags
68 *
69 * dma_alloc_coherent() returns a pointer to a memory region suitable for
70 * coherent DMA traffic to/from a PCI device. On SN platforms, this means
71 * that @dma_handle will have the %PCIIO_DMA_CMD flag set.
72 *
73 * This interface is usually used for "command" streams (e.g. the command
74 * queue for a SCSI controller). See Documentation/DMA-API.txt for
75 * more information.
76 */
77void *sn_dma_alloc_coherent(struct device *dev, size_t size,
78 dma_addr_t * dma_handle, int flags)
79{
80 void *cpuaddr;
81 unsigned long phys_addr;
82 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
83
84 BUG_ON(dev->bus != &pci_bus_type);
85
86 /*
87 * Allocate the memory.
88 * FIXME: We should be doing alloc_pages_node for the node closest
89 * to the PCI device.
90 */
91 if (!(cpuaddr = (void *)__get_free_pages(GFP_ATOMIC, get_order(size))))
92 return NULL;
93
94 memset(cpuaddr, 0x0, size);
95
96 /* physical addr. of the memory we just got */
97 phys_addr = __pa(cpuaddr);
98
99 /*
100 * 64 bit address translations should never fail.
101 * 32 bit translations can fail if there are insufficient mapping
102 * resources.
103 */
104
105 *dma_handle = pcibr_dma_map(pcidev_info, phys_addr, size,
106 SN_PCIDMA_CONSISTENT);
107 if (!*dma_handle) {
108 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
109 free_pages((unsigned long)cpuaddr, get_order(size));
110 return NULL;
111 }
112
113 return cpuaddr;
114}
115EXPORT_SYMBOL(sn_dma_alloc_coherent);
116
117/**
118 * sn_pci_free_coherent - free memory associated with coherent DMAable region
119 * @dev: device to free for
120 * @size: size to free
121 * @cpu_addr: kernel virtual address to free
122 * @dma_handle: DMA address associated with this region
123 *
124 * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
125 * any associated IOMMU mappings.
126 */
127void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
128 dma_addr_t dma_handle)
129{
130 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
131
132 BUG_ON(dev->bus != &pci_bus_type);
133
134 pcibr_dma_unmap(pcidev_info, dma_handle, 0);
135 free_pages((unsigned long)cpu_addr, get_order(size));
136}
137EXPORT_SYMBOL(sn_dma_free_coherent);
138
139/**
140 * sn_dma_map_single - map a single page for DMA
141 * @dev: device to map for
142 * @cpu_addr: kernel virtual address of the region to map
143 * @size: size of the region
144 * @direction: DMA direction
145 *
146 * Map the region pointed to by @cpu_addr for DMA and return the
147 * DMA address.
148 *
149 * We map this to the one step pcibr_dmamap_trans interface rather than
150 * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have
151 * no way of saving the dmamap handle from the alloc to later free
152 * (which is pretty much unacceptable).
153 *
154 * TODO: simplify our interface;
155 * figure out how to save dmamap handle so can use two step.
156 */
157dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
158 int direction)
159{
160 dma_addr_t dma_addr;
161 unsigned long phys_addr;
162 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
163
164 BUG_ON(dev->bus != &pci_bus_type);
165
166 phys_addr = __pa(cpu_addr);
167 dma_addr = pcibr_dma_map(pcidev_info, phys_addr, size, 0);
168 if (!dma_addr) {
169 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
170 return 0;
171 }
172 return dma_addr;
173}
174EXPORT_SYMBOL(sn_dma_map_single);
175
176/**
177 * sn_dma_unmap_single - unamp a DMA mapped page
178 * @dev: device to sync
179 * @dma_addr: DMA address to sync
180 * @size: size of region
181 * @direction: DMA direction
182 *
183 * This routine is supposed to sync the DMA region specified
184 * by @dma_handle into the coherence domain. On SN, we're always cache
185 * coherent, so we just need to free any ATEs associated with this mapping.
186 */
187void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
188 int direction)
189{
190 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
191
192 BUG_ON(dev->bus != &pci_bus_type);
193 pcibr_dma_unmap(pcidev_info, dma_addr, direction);
194}
195EXPORT_SYMBOL(sn_dma_unmap_single);
196
197/**
198 * sn_dma_unmap_sg - unmap a DMA scatterlist
199 * @dev: device to unmap
200 * @sg: scatterlist to unmap
201 * @nhwentries: number of scatterlist entries
202 * @direction: DMA direction
203 *
204 * Unmap a set of streaming mode DMA translations.
205 */
206void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
207 int nhwentries, int direction)
208{
209 int i;
210 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
211
212 BUG_ON(dev->bus != &pci_bus_type);
213
214 for (i = 0; i < nhwentries; i++, sg++) {
215 pcibr_dma_unmap(pcidev_info, sg->dma_address, direction);
216 sg->dma_address = (dma_addr_t) NULL;
217 sg->dma_length = 0;
218 }
219}
220EXPORT_SYMBOL(sn_dma_unmap_sg);
221
222/**
223 * sn_dma_map_sg - map a scatterlist for DMA
224 * @dev: device to map for
225 * @sg: scatterlist to map
226 * @nhwentries: number of entries
227 * @direction: direction of the DMA transaction
228 *
229 * Maps each entry of @sg for DMA.
230 */
231int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
232 int direction)
233{
234 unsigned long phys_addr;
235 struct scatterlist *saved_sg = sg;
236 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
237 int i;
238
239 BUG_ON(dev->bus != &pci_bus_type);
240
241 /*
242 * Setup a DMA address for each entry in the scatterlist.
243 */
244 for (i = 0; i < nhwentries; i++, sg++) {
245 phys_addr = SG_ENT_PHYS_ADDRESS(sg);
246 sg->dma_address = pcibr_dma_map(pcidev_info, phys_addr,
247 sg->length, 0);
248
249 if (!sg->dma_address) {
250 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
251
252 /*
253 * Free any successfully allocated entries.
254 */
255 if (i > 0)
256 sn_dma_unmap_sg(dev, saved_sg, i, direction);
257 return 0;
258 }
259
260 sg->dma_length = sg->length;
261 }
262
263 return nhwentries;
264}
265EXPORT_SYMBOL(sn_dma_map_sg);
266
267void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
268 size_t size, int direction)
269{
270 BUG_ON(dev->bus != &pci_bus_type);
271}
272EXPORT_SYMBOL(sn_dma_sync_single_for_cpu);
273
274void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
275 size_t size, int direction)
276{
277 BUG_ON(dev->bus != &pci_bus_type);
278}
279EXPORT_SYMBOL(sn_dma_sync_single_for_device);
280
281void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
282 int nelems, int direction)
283{
284 BUG_ON(dev->bus != &pci_bus_type);
285}
286EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu);
287
288void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
289 int nelems, int direction)
290{
291 BUG_ON(dev->bus != &pci_bus_type);
292}
293EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
294
295int sn_dma_mapping_error(dma_addr_t dma_addr)
296{
297 return 0;
298}
299EXPORT_SYMBOL(sn_dma_mapping_error);
300
301char *sn_pci_get_legacy_mem(struct pci_bus *bus)
302{
303 if (!SN_PCIBUS_BUSSOFT(bus))
304 return ERR_PTR(-ENODEV);
305
306 return (char *)(SN_PCIBUS_BUSSOFT(bus)->bs_legacy_mem | __IA64_UNCACHED_OFFSET);
307}
308
309int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
310{
311 unsigned long addr;
312 int ret;
313
314 if (!SN_PCIBUS_BUSSOFT(bus))
315 return -ENODEV;
316
317 addr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
318 addr += port;
319
320 ret = ia64_sn_probe_mem(addr, (long)size, (void *)val);
321
322 if (ret == 2)
323 return -EINVAL;
324
325 if (ret == 1)
326 *val = -1;
327
328 return size;
329}
330
331int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
332{
333 int ret = size;
334 unsigned long paddr;
335 unsigned long *addr;
336
337 if (!SN_PCIBUS_BUSSOFT(bus)) {
338 ret = -ENODEV;
339 goto out;
340 }
341
342 /* Put the phys addr in uncached space */
343 paddr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
344 paddr += port;
345 addr = (unsigned long *)paddr;
346
347 switch (size) {
348 case 1:
349 *(volatile u8 *)(addr) = (u8)(val);
350 break;
351 case 2:
352 *(volatile u16 *)(addr) = (u16)(val);
353 break;
354 case 4:
355 *(volatile u32 *)(addr) = (u32)(val);
356 break;
357 default:
358 ret = -EINVAL;
359 break;
360 }
361 out:
362 return ret;
363}
diff --git a/arch/ia64/sn/pci/pcibr/Makefile b/arch/ia64/sn/pci/pcibr/Makefile
new file mode 100644
index 000000000000..1850c4a94c41
--- /dev/null
+++ b/arch/ia64/sn/pci/pcibr/Makefile
@@ -0,0 +1,11 @@
1#
2# This file is subject to the terms and conditions of the GNU General Public
3# License. See the file "COPYING" in the main directory of this archive
4# for more details.
5#
6# Copyright (C) 2002-2004 Silicon Graphics, Inc. All Rights Reserved.
7#
8# Makefile for the sn2 io routines.
9
10obj-y += pcibr_dma.o pcibr_reg.o \
11 pcibr_ate.o pcibr_provider.o
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
new file mode 100644
index 000000000000..9d6854666f9b
--- /dev/null
+++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
@@ -0,0 +1,188 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <linux/types.h>
10#include <asm/sn/sn_sal.h>
11#include "pci/pcibus_provider_defs.h"
12#include "pci/pcidev.h"
13#include "pci/pcibr_provider.h"
14
15int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */
16
17/*
18 * mark_ate: Mark the ate as either free or inuse.
19 */
20static void mark_ate(struct ate_resource *ate_resource, int start, int number,
21 uint64_t value)
22{
23
24 uint64_t *ate = ate_resource->ate;
25 int index;
26 int length = 0;
27
28 for (index = start; length < number; index++, length++)
29 ate[index] = value;
30
31}
32
33/*
34 * find_free_ate: Find the first free ate index starting from the given
35 * index for the desired consequtive count.
36 */
37static int find_free_ate(struct ate_resource *ate_resource, int start,
38 int count)
39{
40
41 uint64_t *ate = ate_resource->ate;
42 int index;
43 int start_free;
44
45 for (index = start; index < ate_resource->num_ate;) {
46 if (!ate[index]) {
47 int i;
48 int free;
49 free = 0;
50 start_free = index; /* Found start free ate */
51 for (i = start_free; i < ate_resource->num_ate; i++) {
52 if (!ate[i]) { /* This is free */
53 if (++free == count)
54 return start_free;
55 } else {
56 index = i + 1;
57 break;
58 }
59 }
60 } else
61 index++; /* Try next ate */
62 }
63
64 return -1;
65}
66
67/*
68 * free_ate_resource: Free the requested number of ATEs.
69 */
70static inline void free_ate_resource(struct ate_resource *ate_resource,
71 int start)
72{
73
74 mark_ate(ate_resource, start, ate_resource->ate[start], 0);
75 if ((ate_resource->lowest_free_index > start) ||
76 (ate_resource->lowest_free_index < 0))
77 ate_resource->lowest_free_index = start;
78
79}
80
81/*
82 * alloc_ate_resource: Allocate the requested number of ATEs.
83 */
84static inline int alloc_ate_resource(struct ate_resource *ate_resource,
85 int ate_needed)
86{
87
88 int start_index;
89
90 /*
91 * Check for ate exhaustion.
92 */
93 if (ate_resource->lowest_free_index < 0)
94 return -1;
95
96 /*
97 * Find the required number of free consequtive ates.
98 */
99 start_index =
100 find_free_ate(ate_resource, ate_resource->lowest_free_index,
101 ate_needed);
102 if (start_index >= 0)
103 mark_ate(ate_resource, start_index, ate_needed, ate_needed);
104
105 ate_resource->lowest_free_index =
106 find_free_ate(ate_resource, ate_resource->lowest_free_index, 1);
107
108 return start_index;
109}
110
111/*
112 * Allocate "count" contiguous Bridge Address Translation Entries
113 * on the specified bridge to be used for PCI to XTALK mappings.
114 * Indices in rm map range from 1..num_entries. Indicies returned
115 * to caller range from 0..num_entries-1.
116 *
117 * Return the start index on success, -1 on failure.
118 */
119int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count)
120{
121 int status = 0;
122 uint64_t flag;
123
124 flag = pcibr_lock(pcibus_info);
125 status = alloc_ate_resource(&pcibus_info->pbi_int_ate_resource, count);
126
127 if (status < 0) {
128 /* Failed to allocate */
129 pcibr_unlock(pcibus_info, flag);
130 return -1;
131 }
132
133 pcibr_unlock(pcibus_info, flag);
134
135 return status;
136}
137
138/*
139 * Setup an Address Translation Entry as specified. Use either the Bridge
140 * internal maps or the external map RAM, as appropriate.
141 */
142static inline uint64_t *pcibr_ate_addr(struct pcibus_info *pcibus_info,
143 int ate_index)
144{
145 if (ate_index < pcibus_info->pbi_int_ate_size) {
146 return pcireg_int_ate_addr(pcibus_info, ate_index);
147 }
148 panic("pcibr_ate_addr: invalid ate_index 0x%x", ate_index);
149}
150
151/*
152 * Update the ate.
153 */
154void inline
155ate_write(struct pcibus_info *pcibus_info, int ate_index, int count,
156 volatile uint64_t ate)
157{
158 while (count-- > 0) {
159 if (ate_index < pcibus_info->pbi_int_ate_size) {
160 pcireg_int_ate_set(pcibus_info, ate_index, ate);
161 } else {
162 panic("ate_write: invalid ate_index 0x%x", ate_index);
163 }
164 ate_index++;
165 ate += IOPGSIZE;
166 }
167
168 pcireg_tflush_get(pcibus_info); /* wait until Bridge PIO complete */
169}
170
171void pcibr_ate_free(struct pcibus_info *pcibus_info, int index)
172{
173
174 volatile uint64_t ate;
175 int count;
176 uint64_t flags;
177
178 if (pcibr_invalidate_ate) {
179 /* For debugging purposes, clear the valid bit in the ATE */
180 ate = *pcibr_ate_addr(pcibus_info, index);
181 count = pcibus_info->pbi_int_ate_resource.ate[index];
182 ate_write(pcibus_info, index, count, (ate & ~PCI32_ATE_V));
183 }
184
185 flags = pcibr_lock(pcibus_info);
186 free_ate_resource(&pcibus_info->pbi_int_ate_resource, index);
187 pcibr_unlock(pcibus_info, flags);
188}
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
new file mode 100644
index 000000000000..b1d66ac065c8
--- /dev/null
+++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
@@ -0,0 +1,379 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <linux/types.h>
10#include <linux/pci.h>
11#include <asm/sn/sn_sal.h>
12#include <asm/sn/geo.h>
13#include "xtalk/xwidgetdev.h"
14#include "xtalk/hubdev.h"
15#include "pci/pcibus_provider_defs.h"
16#include "pci/pcidev.h"
17#include "pci/tiocp.h"
18#include "pci/pic.h"
19#include "pci/pcibr_provider.h"
20#include "pci/tiocp.h"
21#include "tio.h"
22#include <asm/sn/addrs.h>
23
24extern int sn_ioif_inited;
25
26/* =====================================================================
27 * DMA MANAGEMENT
28 *
29 * The Bridge ASIC provides three methods of doing DMA: via a "direct map"
30 * register available in 32-bit PCI space (which selects a contiguous 2G
31 * address space on some other widget), via "direct" addressing via 64-bit
32 * PCI space (all destination information comes from the PCI address,
33 * including transfer attributes), and via a "mapped" region that allows
34 * a bunch of different small mappings to be established with the PMU.
35 *
36 * For efficiency, we most prefer to use the 32bit direct mapping facility,
37 * since it requires no resource allocations. The advantage of using the
38 * PMU over the 64-bit direct is that single-cycle PCI addressing can be
39 * used; the advantage of using 64-bit direct over PMU addressing is that
40 * we do not have to allocate entries in the PMU.
41 */
42
43static uint64_t
44pcibr_dmamap_ate32(struct pcidev_info *info,
45 uint64_t paddr, size_t req_size, uint64_t flags)
46{
47
48 struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
49 struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
50 pdi_pcibus_info;
51 uint8_t internal_device = (PCI_SLOT(pcidev_info->pdi_host_pcidev_info->
52 pdi_linux_pcidev->devfn)) - 1;
53 int ate_count;
54 int ate_index;
55 uint64_t ate_flags = flags | PCI32_ATE_V;
56 uint64_t ate;
57 uint64_t pci_addr;
58 uint64_t xio_addr;
59 uint64_t offset;
60
61 /* PIC in PCI-X mode does not supports 32bit PageMap mode */
62 if (IS_PIC_SOFT(pcibus_info) && IS_PCIX(pcibus_info)) {
63 return 0;
64 }
65
66 /* Calculate the number of ATEs needed. */
67 if (!(MINIMAL_ATE_FLAG(paddr, req_size))) {
68 ate_count = IOPG((IOPGSIZE - 1) /* worst case start offset */
69 +req_size /* max mapping bytes */
70 - 1) + 1; /* round UP */
71 } else { /* assume requested target is page aligned */
72 ate_count = IOPG(req_size /* max mapping bytes */
73 - 1) + 1; /* round UP */
74 }
75
76 /* Get the number of ATEs required. */
77 ate_index = pcibr_ate_alloc(pcibus_info, ate_count);
78 if (ate_index < 0)
79 return 0;
80
81 /* In PCI-X mode, Prefetch not supported */
82 if (IS_PCIX(pcibus_info))
83 ate_flags &= ~(PCI32_ATE_PREF);
84
85 xio_addr =
86 IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
87 PHYS_TO_TIODMA(paddr);
88 offset = IOPGOFF(xio_addr);
89 ate = ate_flags | (xio_addr - offset);
90
91 /* If PIC, put the targetid in the ATE */
92 if (IS_PIC_SOFT(pcibus_info)) {
93 ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT);
94 }
95 ate_write(pcibus_info, ate_index, ate_count, ate);
96
97 /*
98 * Set up the DMA mapped Address.
99 */
100 pci_addr = PCI32_MAPPED_BASE + offset + IOPGSIZE * ate_index;
101
102 /*
103 * If swap was set in device in pcibr_endian_set()
104 * we need to turn swapping on.
105 */
106 if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR)
107 ATE_SWAP_ON(pci_addr);
108
109 return pci_addr;
110}
111
112static uint64_t
113pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr,
114 uint64_t dma_attributes)
115{
116 struct pcibus_info *pcibus_info = (struct pcibus_info *)
117 ((info->pdi_host_pcidev_info)->pdi_pcibus_info);
118 uint64_t pci_addr;
119
120 /* Translate to Crosstalk View of Physical Address */
121 pci_addr = (IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
122 PHYS_TO_TIODMA(paddr)) | dma_attributes;
123
124 /* Handle Bus mode */
125 if (IS_PCIX(pcibus_info))
126 pci_addr &= ~PCI64_ATTR_PREF;
127
128 /* Handle Bridge Chipset differences */
129 if (IS_PIC_SOFT(pcibus_info)) {
130 pci_addr |=
131 ((uint64_t) pcibus_info->
132 pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT);
133 } else
134 pci_addr |= TIOCP_PCI64_CMDTYPE_MEM;
135
136 /* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */
137 if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn))
138 pci_addr |= PCI64_ATTR_VIRTUAL;
139
140 return pci_addr;
141
142}
143
144static uint64_t
145pcibr_dmatrans_direct32(struct pcidev_info * info,
146 uint64_t paddr, size_t req_size, uint64_t flags)
147{
148
149 struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
150 struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
151 pdi_pcibus_info;
152 uint64_t xio_addr;
153
154 uint64_t xio_base;
155 uint64_t offset;
156 uint64_t endoff;
157
158 if (IS_PCIX(pcibus_info)) {
159 return 0;
160 }
161
162 xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
163 PHYS_TO_TIODMA(paddr);
164
165 xio_base = pcibus_info->pbi_dir_xbase;
166 offset = xio_addr - xio_base;
167 endoff = req_size + offset;
168 if ((req_size > (1ULL << 31)) || /* Too Big */
169 (xio_addr < xio_base) || /* Out of range for mappings */
170 (endoff > (1ULL << 31))) { /* Too Big */
171 return 0;
172 }
173
174 return PCI32_DIRECT_BASE | offset;
175
176}
177
178/*
179 * Wrapper routine for free'ing DMA maps
180 * DMA mappings for Direct 64 and 32 do not have any DMA maps.
181 */
182void
183pcibr_dma_unmap(struct pcidev_info *pcidev_info, dma_addr_t dma_handle,
184 int direction)
185{
186 struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
187 pdi_pcibus_info;
188
189 if (IS_PCI32_MAPPED(dma_handle)) {
190 int ate_index;
191
192 ate_index =
193 IOPG((ATE_SWAP_OFF(dma_handle) - PCI32_MAPPED_BASE));
194 pcibr_ate_free(pcibus_info, ate_index);
195 }
196}
197
198/*
199 * On SN systems there is a race condition between a PIO read response and
200 * DMA's. In rare cases, the read response may beat the DMA, causing the
201 * driver to think that data in memory is complete and meaningful. This code
202 * eliminates that race. This routine is called by the PIO read routines
203 * after doing the read. For PIC this routine then forces a fake interrupt
204 * on another line, which is logically associated with the slot that the PIO
205 * is addressed to. It then spins while watching the memory location that
206 * the interrupt is targetted to. When the interrupt response arrives, we
207 * are sure that the DMA has landed in memory and it is safe for the driver
208 * to proceed. For TIOCP use the Device(x) Write Request Buffer Flush
209 * Bridge register since it ensures the data has entered the coherence domain,
210 * unlike the PIC Device(x) Write Request Buffer Flush register.
211 */
212
213void sn_dma_flush(uint64_t addr)
214{
215 nasid_t nasid;
216 int is_tio;
217 int wid_num;
218 int i, j;
219 int bwin;
220 uint64_t flags;
221 struct hubdev_info *hubinfo;
222 volatile struct sn_flush_device_list *p;
223 struct sn_flush_nasid_entry *flush_nasid_list;
224
225 if (!sn_ioif_inited)
226 return;
227
228 nasid = NASID_GET(addr);
229 if (-1 == nasid_to_cnodeid(nasid))
230 return;
231
232 hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo;
233
234 if (!hubinfo) {
235 BUG();
236 }
237 is_tio = (nasid & 1);
238 if (is_tio) {
239 wid_num = TIO_SWIN_WIDGETNUM(addr);
240 bwin = TIO_BWIN_WINDOWNUM(addr);
241 } else {
242 wid_num = SWIN_WIDGETNUM(addr);
243 bwin = BWIN_WINDOWNUM(addr);
244 }
245
246 flush_nasid_list = &hubinfo->hdi_flush_nasid_list;
247 if (flush_nasid_list->widget_p == NULL)
248 return;
249 if (bwin > 0) {
250 uint64_t itte = flush_nasid_list->iio_itte[bwin];
251
252 if (is_tio) {
253 wid_num = (itte >> TIO_ITTE_WIDGET_SHIFT) &
254 TIO_ITTE_WIDGET_MASK;
255 } else {
256 wid_num = (itte >> IIO_ITTE_WIDGET_SHIFT) &
257 IIO_ITTE_WIDGET_MASK;
258 }
259 }
260 if (flush_nasid_list->widget_p == NULL)
261 return;
262 if (flush_nasid_list->widget_p[wid_num] == NULL)
263 return;
264 p = &flush_nasid_list->widget_p[wid_num][0];
265
266 /* find a matching BAR */
267 for (i = 0; i < DEV_PER_WIDGET; i++) {
268 for (j = 0; j < PCI_ROM_RESOURCE; j++) {
269 if (p->sfdl_bar_list[j].start == 0)
270 break;
271 if (addr >= p->sfdl_bar_list[j].start
272 && addr <= p->sfdl_bar_list[j].end)
273 break;
274 }
275 if (j < PCI_ROM_RESOURCE && p->sfdl_bar_list[j].start != 0)
276 break;
277 p++;
278 }
279
280 /* if no matching BAR, return without doing anything. */
281 if (i == DEV_PER_WIDGET)
282 return;
283
284 /*
285 * For TIOCP use the Device(x) Write Request Buffer Flush Bridge
286 * register since it ensures the data has entered the coherence
287 * domain, unlike PIC
288 */
289 if (is_tio) {
290 uint32_t tio_id = REMOTE_HUB_L(nasid, TIO_NODE_ID);
291 uint32_t revnum = XWIDGET_PART_REV_NUM(tio_id);
292
293 /* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */
294 if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) {
295 return;
296 } else {
297 pcireg_wrb_flush_get(p->sfdl_pcibus_info,
298 (p->sfdl_slot - 1));
299 }
300 } else {
301 spin_lock_irqsave(&((struct sn_flush_device_list *)p)->
302 sfdl_flush_lock, flags);
303
304 p->sfdl_flush_value = 0;
305
306 /* force an interrupt. */
307 *(volatile uint32_t *)(p->sfdl_force_int_addr) = 1;
308
309 /* wait for the interrupt to come back. */
310 while (*(p->sfdl_flush_addr) != 0x10f) ;
311
312 /* okay, everything is synched up. */
313 spin_unlock_irqrestore((spinlock_t *)&p->sfdl_flush_lock, flags);
314 }
315 return;
316}
317
318/*
319 * Wrapper DMA interface. Called from pci_dma.c routines.
320 */
321
322uint64_t
323pcibr_dma_map(struct pcidev_info * pcidev_info, unsigned long phys_addr,
324 size_t size, unsigned int flags)
325{
326 dma_addr_t dma_handle;
327 struct pci_dev *pcidev = pcidev_info->pdi_linux_pcidev;
328
329 if (flags & SN_PCIDMA_CONSISTENT) {
330 /* sn_pci_alloc_consistent interfaces */
331 if (pcidev->dev.coherent_dma_mask == ~0UL) {
332 dma_handle =
333 pcibr_dmatrans_direct64(pcidev_info, phys_addr,
334 PCI64_ATTR_BAR);
335 } else {
336 dma_handle =
337 (dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
338 phys_addr, size,
339 PCI32_ATE_BAR);
340 }
341 } else {
342 /* map_sg/map_single interfaces */
343
344 /* SN cannot support DMA addresses smaller than 32 bits. */
345 if (pcidev->dma_mask < 0x7fffffff) {
346 return 0;
347 }
348
349 if (pcidev->dma_mask == ~0UL) {
350 /*
351 * Handle the most common case: 64 bit cards. This
352 * call should always succeed.
353 */
354
355 dma_handle =
356 pcibr_dmatrans_direct64(pcidev_info, phys_addr,
357 PCI64_ATTR_PREF);
358 } else {
359 /* Handle 32-63 bit cards via direct mapping */
360 dma_handle =
361 pcibr_dmatrans_direct32(pcidev_info, phys_addr,
362 size, 0);
363 if (!dma_handle) {
364 /*
365 * It is a 32 bit card and we cannot do direct mapping,
366 * so we use an ATE.
367 */
368
369 dma_handle =
370 pcibr_dmamap_ate32(pcidev_info, phys_addr,
371 size, PCI32_ATE_PREF);
372 }
373 }
374 }
375
376 return dma_handle;
377}
378
379EXPORT_SYMBOL(sn_dma_flush);
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
new file mode 100644
index 000000000000..92bd278cf7ff
--- /dev/null
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -0,0 +1,170 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <linux/types.h>
10#include <linux/interrupt.h>
11#include <linux/pci.h>
12#include <asm/sn/sn_sal.h>
13#include "xtalk/xwidgetdev.h"
14#include <asm/sn/geo.h>
15#include "xtalk/hubdev.h"
16#include "pci/pcibus_provider_defs.h"
17#include "pci/pcidev.h"
18#include "pci/pcibr_provider.h"
19#include <asm/sn/addrs.h>
20
21
22static int sal_pcibr_error_interrupt(struct pcibus_info *soft)
23{
24 struct ia64_sal_retval ret_stuff;
25 uint64_t busnum;
26 int segment;
27 ret_stuff.status = 0;
28 ret_stuff.v0 = 0;
29
30 segment = 0;
31 busnum = soft->pbi_buscommon.bs_persist_busnum;
32 SAL_CALL_NOLOCK(ret_stuff,
33 (u64) SN_SAL_IOIF_ERROR_INTERRUPT,
34 (u64) segment, (u64) busnum, 0, 0, 0, 0, 0);
35
36 return (int)ret_stuff.v0;
37}
38
39/*
40 * PCI Bridge Error interrupt handler. Gets invoked whenever a PCI
41 * bridge sends an error interrupt.
42 */
43static irqreturn_t
44pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *regs)
45{
46 struct pcibus_info *soft = (struct pcibus_info *)arg;
47
48 if (sal_pcibr_error_interrupt(soft) < 0) {
49 panic("pcibr_error_intr_handler(): Fatal Bridge Error");
50 }
51 return IRQ_HANDLED;
52}
53
54void *
55pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft)
56{
57 int nasid, cnode, j;
58 struct hubdev_info *hubdev_info;
59 struct pcibus_info *soft;
60 struct sn_flush_device_list *sn_flush_device_list;
61
62 if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) {
63 return NULL;
64 }
65
66 /*
67 * Allocate kernel bus soft and copy from prom.
68 */
69
70 soft = kmalloc(sizeof(struct pcibus_info), GFP_KERNEL);
71 if (!soft) {
72 return NULL;
73 }
74
75 memcpy(soft, prom_bussoft, sizeof(struct pcibus_info));
76 soft->pbi_buscommon.bs_base =
77 (((u64) soft->pbi_buscommon.
78 bs_base << 4) >> 4) | __IA64_UNCACHED_OFFSET;
79
80 spin_lock_init(&soft->pbi_lock);
81
82 /*
83 * register the bridge's error interrupt handler
84 */
85 if (request_irq(SGI_PCIBR_ERROR, (void *)pcibr_error_intr_handler,
86 SA_SHIRQ, "PCIBR error", (void *)(soft))) {
87 printk(KERN_WARNING
88 "pcibr cannot allocate interrupt for error handler\n");
89 }
90
91 /*
92 * Update the Bridge with the "kernel" pagesize
93 */
94 if (PAGE_SIZE < 16384) {
95 pcireg_control_bit_clr(soft, PCIBR_CTRL_PAGE_SIZE);
96 } else {
97 pcireg_control_bit_set(soft, PCIBR_CTRL_PAGE_SIZE);
98 }
99
100 nasid = NASID_GET(soft->pbi_buscommon.bs_base);
101 cnode = nasid_to_cnodeid(nasid);
102 hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
103
104 if (hubdev_info->hdi_flush_nasid_list.widget_p) {
105 sn_flush_device_list = hubdev_info->hdi_flush_nasid_list.
106 widget_p[(int)soft->pbi_buscommon.bs_xid];
107 if (sn_flush_device_list) {
108 for (j = 0; j < DEV_PER_WIDGET;
109 j++, sn_flush_device_list++) {
110 if (sn_flush_device_list->sfdl_slot == -1)
111 continue;
112 if (sn_flush_device_list->
113 sfdl_persistent_busnum ==
114 soft->pbi_buscommon.bs_persist_busnum)
115 sn_flush_device_list->sfdl_pcibus_info =
116 soft;
117 }
118 }
119 }
120
121 /* Setup the PMU ATE map */
122 soft->pbi_int_ate_resource.lowest_free_index = 0;
123 soft->pbi_int_ate_resource.ate =
124 kmalloc(soft->pbi_int_ate_size * sizeof(uint64_t), GFP_KERNEL);
125 memset(soft->pbi_int_ate_resource.ate, 0,
126 (soft->pbi_int_ate_size * sizeof(uint64_t)));
127
128 return soft;
129}
130
131void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info)
132{
133 struct pcidev_info *pcidev_info;
134 struct pcibus_info *pcibus_info;
135 int bit = sn_irq_info->irq_int_bit;
136
137 pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
138 if (pcidev_info) {
139 pcibus_info =
140 (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
141 pdi_pcibus_info;
142 pcireg_force_intr_set(pcibus_info, bit);
143 }
144}
145
146void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info)
147{
148 struct pcidev_info *pcidev_info;
149 struct pcibus_info *pcibus_info;
150 int bit = sn_irq_info->irq_int_bit;
151 uint64_t xtalk_addr = sn_irq_info->irq_xtalkaddr;
152
153 pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
154 if (pcidev_info) {
155 pcibus_info =
156 (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
157 pdi_pcibus_info;
158
159 /* Disable the device's IRQ */
160 pcireg_intr_enable_bit_clr(pcibus_info, bit);
161
162 /* Change the device's IRQ */
163 pcireg_intr_addr_addr_set(pcibus_info, bit, xtalk_addr);
164
165 /* Re-enable the device's IRQ */
166 pcireg_intr_enable_bit_set(pcibus_info, bit);
167
168 pcibr_force_interrupt(sn_irq_info);
169 }
170}
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_reg.c b/arch/ia64/sn/pci/pcibr/pcibr_reg.c
new file mode 100644
index 000000000000..74a74a7d2a13
--- /dev/null
+++ b/arch/ia64/sn/pci/pcibr/pcibr_reg.c
@@ -0,0 +1,282 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <linux/types.h>
10#include <linux/interrupt.h>
11#include "pci/pcibus_provider_defs.h"
12#include "pci/pcidev.h"
13#include "pci/tiocp.h"
14#include "pci/pic.h"
15#include "pci/pcibr_provider.h"
16
17union br_ptr {
18 struct tiocp tio;
19 struct pic pic;
20};
21
22/*
23 * Control Register Access -- Read/Write 0000_0020
24 */
25void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits)
26{
27 union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
28
29 if (pcibus_info) {
30 switch (pcibus_info->pbi_bridge_type) {
31 case PCIBR_BRIDGETYPE_TIOCP:
32 ptr->tio.cp_control &= ~bits;
33 break;
34 case PCIBR_BRIDGETYPE_PIC:
35 ptr->pic.p_wid_control &= ~bits;
36 break;
37 default:
38 panic
39 ("pcireg_control_bit_clr: unknown bridgetype bridge 0x%p",
40 (void *)ptr);
41 }
42 }
43}
44
45void pcireg_control_bit_set(struct pcibus_info *pcibus_info, uint64_t bits)
46{
47 union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
48
49 if (pcibus_info) {
50 switch (pcibus_info->pbi_bridge_type) {
51 case PCIBR_BRIDGETYPE_TIOCP:
52 ptr->tio.cp_control |= bits;
53 break;
54 case PCIBR_BRIDGETYPE_PIC:
55 ptr->pic.p_wid_control |= bits;
56 break;
57 default:
58 panic
59 ("pcireg_control_bit_set: unknown bridgetype bridge 0x%p",
60 (void *)ptr);
61 }
62 }
63}
64
65/*
66 * PCI/PCIX Target Flush Register Access -- Read Only 0000_0050
67 */
68uint64_t pcireg_tflush_get(struct pcibus_info *pcibus_info)
69{
70 union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
71 uint64_t ret = 0;
72
73 if (pcibus_info) {
74 switch (pcibus_info->pbi_bridge_type) {
75 case PCIBR_BRIDGETYPE_TIOCP:
76 ret = ptr->tio.cp_tflush;
77 break;
78 case PCIBR_BRIDGETYPE_PIC:
79 ret = ptr->pic.p_wid_tflush;
80 break;
81 default:
82 panic
83 ("pcireg_tflush_get: unknown bridgetype bridge 0x%p",
84 (void *)ptr);
85 }
86 }
87
88 /* Read of the Target Flush should always return zero */
89 if (ret != 0)
90 panic("pcireg_tflush_get:Target Flush failed\n");
91
92 return ret;
93}
94
95/*
96 * Interrupt Status Register Access -- Read Only 0000_0100
97 */
98uint64_t pcireg_intr_status_get(struct pcibus_info * pcibus_info)
99{
100 union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
101 uint64_t ret = 0;
102
103 if (pcibus_info) {
104 switch (pcibus_info->pbi_bridge_type) {
105 case PCIBR_BRIDGETYPE_TIOCP:
106 ret = ptr->tio.cp_int_status;
107 break;
108 case PCIBR_BRIDGETYPE_PIC:
109 ret = ptr->pic.p_int_status;
110 break;
111 default:
112 panic
113 ("pcireg_intr_status_get: unknown bridgetype bridge 0x%p",
114 (void *)ptr);
115 }
116 }
117 return ret;
118}
119
120/*
121 * Interrupt Enable Register Access -- Read/Write 0000_0108
122 */
123void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits)
124{
125 union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
126
127 if (pcibus_info) {
128 switch (pcibus_info->pbi_bridge_type) {
129 case PCIBR_BRIDGETYPE_TIOCP:
130 ptr->tio.cp_int_enable &= ~bits;
131 break;
132 case PCIBR_BRIDGETYPE_PIC:
133 ptr->pic.p_int_enable &= ~bits;
134 break;
135 default:
136 panic
137 ("pcireg_intr_enable_bit_clr: unknown bridgetype bridge 0x%p",
138 (void *)ptr);
139 }
140 }
141}
142
143void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, uint64_t bits)
144{
145 union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
146
147 if (pcibus_info) {
148 switch (pcibus_info->pbi_bridge_type) {
149 case PCIBR_BRIDGETYPE_TIOCP:
150 ptr->tio.cp_int_enable |= bits;
151 break;
152 case PCIBR_BRIDGETYPE_PIC:
153 ptr->pic.p_int_enable |= bits;
154 break;
155 default:
156 panic
157 ("pcireg_intr_enable_bit_set: unknown bridgetype bridge 0x%p",
158 (void *)ptr);
159 }
160 }
161}
162
163/*
164 * Intr Host Address Register (int_addr) -- Read/Write 0000_0130 - 0000_0168
165 */
166void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n,
167 uint64_t addr)
168{
169 union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
170
171 if (pcibus_info) {
172 switch (pcibus_info->pbi_bridge_type) {
173 case PCIBR_BRIDGETYPE_TIOCP:
174 ptr->tio.cp_int_addr[int_n] &= ~TIOCP_HOST_INTR_ADDR;
175 ptr->tio.cp_int_addr[int_n] |=
176 (addr & TIOCP_HOST_INTR_ADDR);
177 break;
178 case PCIBR_BRIDGETYPE_PIC:
179 ptr->pic.p_int_addr[int_n] &= ~PIC_HOST_INTR_ADDR;
180 ptr->pic.p_int_addr[int_n] |=
181 (addr & PIC_HOST_INTR_ADDR);
182 break;
183 default:
184 panic
185 ("pcireg_intr_addr_addr_get: unknown bridgetype bridge 0x%p",
186 (void *)ptr);
187 }
188 }
189}
190
191/*
192 * Force Interrupt Register Access -- Write Only 0000_01C0 - 0000_01F8
193 */
194void pcireg_force_intr_set(struct pcibus_info *pcibus_info, int int_n)
195{
196 union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
197
198 if (pcibus_info) {
199 switch (pcibus_info->pbi_bridge_type) {
200 case PCIBR_BRIDGETYPE_TIOCP:
201 ptr->tio.cp_force_pin[int_n] = 1;
202 break;
203 case PCIBR_BRIDGETYPE_PIC:
204 ptr->pic.p_force_pin[int_n] = 1;
205 break;
206 default:
207 panic
208 ("pcireg_force_intr_set: unknown bridgetype bridge 0x%p",
209 (void *)ptr);
210 }
211 }
212}
213
214/*
215 * Device(x) Write Buffer Flush Reg Access -- Read Only 0000_0240 - 0000_0258
216 */
217uint64_t pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device)
218{
219 union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
220 uint64_t ret = 0;
221
222 if (pcibus_info) {
223 switch (pcibus_info->pbi_bridge_type) {
224 case PCIBR_BRIDGETYPE_TIOCP:
225 ret = ptr->tio.cp_wr_req_buf[device];
226 break;
227 case PCIBR_BRIDGETYPE_PIC:
228 ret = ptr->pic.p_wr_req_buf[device];
229 break;
230 default:
231 panic("pcireg_wrb_flush_get: unknown bridgetype bridge 0x%p", (void *)ptr);
232 }
233
234 }
235 /* Read of the Write Buffer Flush should always return zero */
236 return ret;
237}
238
239void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index,
240 uint64_t val)
241{
242 union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
243
244 if (pcibus_info) {
245 switch (pcibus_info->pbi_bridge_type) {
246 case PCIBR_BRIDGETYPE_TIOCP:
247 ptr->tio.cp_int_ate_ram[ate_index] = (uint64_t) val;
248 break;
249 case PCIBR_BRIDGETYPE_PIC:
250 ptr->pic.p_int_ate_ram[ate_index] = (uint64_t) val;
251 break;
252 default:
253 panic
254 ("pcireg_int_ate_set: unknown bridgetype bridge 0x%p",
255 (void *)ptr);
256 }
257 }
258}
259
260uint64_t *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index)
261{
262 union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
263 uint64_t *ret = (uint64_t *) 0;
264
265 if (pcibus_info) {
266 switch (pcibus_info->pbi_bridge_type) {
267 case PCIBR_BRIDGETYPE_TIOCP:
268 ret =
269 (uint64_t *) & (ptr->tio.cp_int_ate_ram[ate_index]);
270 break;
271 case PCIBR_BRIDGETYPE_PIC:
272 ret =
273 (uint64_t *) & (ptr->pic.p_int_ate_ram[ate_index]);
274 break;
275 default:
276 panic
277 ("pcireg_int_ate_addr: unknown bridgetype bridge 0x%p",
278 (void *)ptr);
279 }
280 }
281 return ret;
282}