aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/sn/pci
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/ia64/sn/pci
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/ia64/sn/pci')
-rw-r--r--arch/ia64/sn/pci/Makefile10
-rw-r--r--arch/ia64/sn/pci/pci_dma.c363
-rw-r--r--arch/ia64/sn/pci/pcibr/Makefile11
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_ate.c188
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_dma.c379
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c170
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_reg.c282
7 files changed, 1403 insertions, 0 deletions
diff --git a/arch/ia64/sn/pci/Makefile b/arch/ia64/sn/pci/Makefile
new file mode 100644
index 000000000000..b5dca0097a8e
--- /dev/null
+++ b/arch/ia64/sn/pci/Makefile
@@ -0,0 +1,10 @@
1#
2# This file is subject to the terms and conditions of the GNU General Public
3# License. See the file "COPYING" in the main directory of this archive
4# for more details.
5#
6# Copyright (C) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
7#
8# Makefile for the sn pci general routines.
9
10obj-y := pci_dma.o pcibr/
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
new file mode 100644
index 000000000000..f680824f819d
--- /dev/null
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -0,0 +1,363 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000,2002-2005 Silicon Graphics, Inc. All rights reserved.
7 *
8 * Routines for PCI DMA mapping. See Documentation/DMA-API.txt for
9 * a description of how these routines should be used.
10 */
11
12#include <linux/module.h>
13#include <asm/dma.h>
14#include <asm/sn/sn_sal.h>
15#include "pci/pcibus_provider_defs.h"
16#include "pci/pcidev.h"
17#include "pci/pcibr_provider.h"
18
19#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
20#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
21
22/**
23 * sn_dma_supported - test a DMA mask
24 * @dev: device to test
25 * @mask: DMA mask to test
26 *
27 * Return whether the given PCI device DMA address mask can be supported
28 * properly. For example, if your device can only drive the low 24-bits
29 * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
30 * this function. Of course, SN only supports devices that have 32 or more
31 * address bits when using the PMU.
32 */
33int sn_dma_supported(struct device *dev, u64 mask)
34{
35 BUG_ON(dev->bus != &pci_bus_type);
36
37 if (mask < 0x7fffffff)
38 return 0;
39 return 1;
40}
41EXPORT_SYMBOL(sn_dma_supported);
42
43/**
44 * sn_dma_set_mask - set the DMA mask
45 * @dev: device to set
46 * @dma_mask: new mask
47 *
48 * Set @dev's DMA mask if the hw supports it.
49 */
50int sn_dma_set_mask(struct device *dev, u64 dma_mask)
51{
52 BUG_ON(dev->bus != &pci_bus_type);
53
54 if (!sn_dma_supported(dev, dma_mask))
55 return 0;
56
57 *dev->dma_mask = dma_mask;
58 return 1;
59}
60EXPORT_SYMBOL(sn_dma_set_mask);
61
62/**
63 * sn_dma_alloc_coherent - allocate memory for coherent DMA
64 * @dev: device to allocate for
65 * @size: size of the region
66 * @dma_handle: DMA (bus) address
67 * @flags: memory allocation flags
68 *
69 * dma_alloc_coherent() returns a pointer to a memory region suitable for
70 * coherent DMA traffic to/from a PCI device. On SN platforms, this means
71 * that @dma_handle will have the %PCIIO_DMA_CMD flag set.
72 *
73 * This interface is usually used for "command" streams (e.g. the command
74 * queue for a SCSI controller). See Documentation/DMA-API.txt for
75 * more information.
76 */
77void *sn_dma_alloc_coherent(struct device *dev, size_t size,
78 dma_addr_t * dma_handle, int flags)
79{
80 void *cpuaddr;
81 unsigned long phys_addr;
82 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
83
84 BUG_ON(dev->bus != &pci_bus_type);
85
86 /*
87 * Allocate the memory.
88 * FIXME: We should be doing alloc_pages_node for the node closest
89 * to the PCI device.
90 */
91 if (!(cpuaddr = (void *)__get_free_pages(GFP_ATOMIC, get_order(size))))
92 return NULL;
93
94 memset(cpuaddr, 0x0, size);
95
96 /* physical addr. of the memory we just got */
97 phys_addr = __pa(cpuaddr);
98
99 /*
100 * 64 bit address translations should never fail.
101 * 32 bit translations can fail if there are insufficient mapping
102 * resources.
103 */
104
105 *dma_handle = pcibr_dma_map(pcidev_info, phys_addr, size,
106 SN_PCIDMA_CONSISTENT);
107 if (!*dma_handle) {
108 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
109 free_pages((unsigned long)cpuaddr, get_order(size));
110 return NULL;
111 }
112
113 return cpuaddr;
114}
115EXPORT_SYMBOL(sn_dma_alloc_coherent);
116
117/**
118 * sn_pci_free_coherent - free memory associated with coherent DMAable region
119 * @dev: device to free for
120 * @size: size to free
121 * @cpu_addr: kernel virtual address to free
122 * @dma_handle: DMA address associated with this region
123 *
124 * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
125 * any associated IOMMU mappings.
126 */
127void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
128 dma_addr_t dma_handle)
129{
130 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
131
132 BUG_ON(dev->bus != &pci_bus_type);
133
134 pcibr_dma_unmap(pcidev_info, dma_handle, 0);
135 free_pages((unsigned long)cpu_addr, get_order(size));
136}
137EXPORT_SYMBOL(sn_dma_free_coherent);
138
139/**
140 * sn_dma_map_single - map a single page for DMA
141 * @dev: device to map for
142 * @cpu_addr: kernel virtual address of the region to map
143 * @size: size of the region
144 * @direction: DMA direction
145 *
146 * Map the region pointed to by @cpu_addr for DMA and return the
147 * DMA address.
148 *
149 * We map this to the one step pcibr_dmamap_trans interface rather than
150 * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have
151 * no way of saving the dmamap handle from the alloc to later free
152 * (which is pretty much unacceptable).
153 *
154 * TODO: simplify our interface;
155 * figure out how to save dmamap handle so can use two step.
156 */
157dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
158 int direction)
159{
160 dma_addr_t dma_addr;
161 unsigned long phys_addr;
162 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
163
164 BUG_ON(dev->bus != &pci_bus_type);
165
166 phys_addr = __pa(cpu_addr);
167 dma_addr = pcibr_dma_map(pcidev_info, phys_addr, size, 0);
168 if (!dma_addr) {
169 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
170 return 0;
171 }
172 return dma_addr;
173}
174EXPORT_SYMBOL(sn_dma_map_single);
175
176/**
177 * sn_dma_unmap_single - unamp a DMA mapped page
178 * @dev: device to sync
179 * @dma_addr: DMA address to sync
180 * @size: size of region
181 * @direction: DMA direction
182 *
183 * This routine is supposed to sync the DMA region specified
184 * by @dma_handle into the coherence domain. On SN, we're always cache
185 * coherent, so we just need to free any ATEs associated with this mapping.
186 */
187void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
188 int direction)
189{
190 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
191
192 BUG_ON(dev->bus != &pci_bus_type);
193 pcibr_dma_unmap(pcidev_info, dma_addr, direction);
194}
195EXPORT_SYMBOL(sn_dma_unmap_single);
196
197/**
198 * sn_dma_unmap_sg - unmap a DMA scatterlist
199 * @dev: device to unmap
200 * @sg: scatterlist to unmap
201 * @nhwentries: number of scatterlist entries
202 * @direction: DMA direction
203 *
204 * Unmap a set of streaming mode DMA translations.
205 */
206void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
207 int nhwentries, int direction)
208{
209 int i;
210 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
211
212 BUG_ON(dev->bus != &pci_bus_type);
213
214 for (i = 0; i < nhwentries; i++, sg++) {
215 pcibr_dma_unmap(pcidev_info, sg->dma_address, direction);
216 sg->dma_address = (dma_addr_t) NULL;
217 sg->dma_length = 0;
218 }
219}
220EXPORT_SYMBOL(sn_dma_unmap_sg);
221
222/**
223 * sn_dma_map_sg - map a scatterlist for DMA
224 * @dev: device to map for
225 * @sg: scatterlist to map
226 * @nhwentries: number of entries
227 * @direction: direction of the DMA transaction
228 *
229 * Maps each entry of @sg for DMA.
230 */
231int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
232 int direction)
233{
234 unsigned long phys_addr;
235 struct scatterlist *saved_sg = sg;
236 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
237 int i;
238
239 BUG_ON(dev->bus != &pci_bus_type);
240
241 /*
242 * Setup a DMA address for each entry in the scatterlist.
243 */
244 for (i = 0; i < nhwentries; i++, sg++) {
245 phys_addr = SG_ENT_PHYS_ADDRESS(sg);
246 sg->dma_address = pcibr_dma_map(pcidev_info, phys_addr,
247 sg->length, 0);
248
249 if (!sg->dma_address) {
250 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
251
252 /*
253 * Free any successfully allocated entries.
254 */
255 if (i > 0)
256 sn_dma_unmap_sg(dev, saved_sg, i, direction);
257 return 0;
258 }
259
260 sg->dma_length = sg->length;
261 }
262
263 return nhwentries;
264}
265EXPORT_SYMBOL(sn_dma_map_sg);
266
267void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
268 size_t size, int direction)
269{
270 BUG_ON(dev->bus != &pci_bus_type);
271}
272EXPORT_SYMBOL(sn_dma_sync_single_for_cpu);
273
274void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
275 size_t size, int direction)
276{
277 BUG_ON(dev->bus != &pci_bus_type);
278}
279EXPORT_SYMBOL(sn_dma_sync_single_for_device);
280
281void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
282 int nelems, int direction)
283{
284 BUG_ON(dev->bus != &pci_bus_type);
285}
286EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu);
287
288void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
289 int nelems, int direction)
290{
291 BUG_ON(dev->bus != &pci_bus_type);
292}
293EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
294
295int sn_dma_mapping_error(dma_addr_t dma_addr)
296{
297 return 0;
298}
299EXPORT_SYMBOL(sn_dma_mapping_error);
300
301char *sn_pci_get_legacy_mem(struct pci_bus *bus)
302{
303 if (!SN_PCIBUS_BUSSOFT(bus))
304 return ERR_PTR(-ENODEV);
305
306 return (char *)(SN_PCIBUS_BUSSOFT(bus)->bs_legacy_mem | __IA64_UNCACHED_OFFSET);
307}
308
309int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
310{
311 unsigned long addr;
312 int ret;
313
314 if (!SN_PCIBUS_BUSSOFT(bus))
315 return -ENODEV;
316
317 addr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
318 addr += port;
319
320 ret = ia64_sn_probe_mem(addr, (long)size, (void *)val);
321
322 if (ret == 2)
323 return -EINVAL;
324
325 if (ret == 1)
326 *val = -1;
327
328 return size;
329}
330
331int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
332{
333 int ret = size;
334 unsigned long paddr;
335 unsigned long *addr;
336
337 if (!SN_PCIBUS_BUSSOFT(bus)) {
338 ret = -ENODEV;
339 goto out;
340 }
341
342 /* Put the phys addr in uncached space */
343 paddr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
344 paddr += port;
345 addr = (unsigned long *)paddr;
346
347 switch (size) {
348 case 1:
349 *(volatile u8 *)(addr) = (u8)(val);
350 break;
351 case 2:
352 *(volatile u16 *)(addr) = (u16)(val);
353 break;
354 case 4:
355 *(volatile u32 *)(addr) = (u32)(val);
356 break;
357 default:
358 ret = -EINVAL;
359 break;
360 }
361 out:
362 return ret;
363}
diff --git a/arch/ia64/sn/pci/pcibr/Makefile b/arch/ia64/sn/pci/pcibr/Makefile
new file mode 100644
index 000000000000..1850c4a94c41
--- /dev/null
+++ b/arch/ia64/sn/pci/pcibr/Makefile
@@ -0,0 +1,11 @@
1#
2# This file is subject to the terms and conditions of the GNU General Public
3# License. See the file "COPYING" in the main directory of this archive
4# for more details.
5#
6# Copyright (C) 2002-2004 Silicon Graphics, Inc. All Rights Reserved.
7#
8# Makefile for the sn2 io routines.
9
10obj-y += pcibr_dma.o pcibr_reg.o \
11 pcibr_ate.o pcibr_provider.o
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
new file mode 100644
index 000000000000..9d6854666f9b
--- /dev/null
+++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
@@ -0,0 +1,188 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <linux/types.h>
10#include <asm/sn/sn_sal.h>
11#include "pci/pcibus_provider_defs.h"
12#include "pci/pcidev.h"
13#include "pci/pcibr_provider.h"
14
15int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */
16
17/*
18 * mark_ate: Mark the ate as either free or inuse.
19 */
20static void mark_ate(struct ate_resource *ate_resource, int start, int number,
21 uint64_t value)
22{
23
24 uint64_t *ate = ate_resource->ate;
25 int index;
26 int length = 0;
27
28 for (index = start; length < number; index++, length++)
29 ate[index] = value;
30
31}
32
33/*
34 * find_free_ate: Find the first free ate index starting from the given
35 * index for the desired consequtive count.
36 */
37static int find_free_ate(struct ate_resource *ate_resource, int start,
38 int count)
39{
40
41 uint64_t *ate = ate_resource->ate;
42 int index;
43 int start_free;
44
45 for (index = start; index < ate_resource->num_ate;) {
46 if (!ate[index]) {
47 int i;
48 int free;
49 free = 0;
50 start_free = index; /* Found start free ate */
51 for (i = start_free; i < ate_resource->num_ate; i++) {
52 if (!ate[i]) { /* This is free */
53 if (++free == count)
54 return start_free;
55 } else {
56 index = i + 1;
57 break;
58 }
59 }
60 } else
61 index++; /* Try next ate */
62 }
63
64 return -1;
65}
66
67/*
68 * free_ate_resource: Free the requested number of ATEs.
69 */
70static inline void free_ate_resource(struct ate_resource *ate_resource,
71 int start)
72{
73
74 mark_ate(ate_resource, start, ate_resource->ate[start], 0);
75 if ((ate_resource->lowest_free_index > start) ||
76 (ate_resource->lowest_free_index < 0))
77 ate_resource->lowest_free_index = start;
78
79}
80
81/*
82 * alloc_ate_resource: Allocate the requested number of ATEs.
83 */
84static inline int alloc_ate_resource(struct ate_resource *ate_resource,
85 int ate_needed)
86{
87
88 int start_index;
89
90 /*
91 * Check for ate exhaustion.
92 */
93 if (ate_resource->lowest_free_index < 0)
94 return -1;
95
96 /*
97 * Find the required number of free consequtive ates.
98 */
99 start_index =
100 find_free_ate(ate_resource, ate_resource->lowest_free_index,
101 ate_needed);
102 if (start_index >= 0)
103 mark_ate(ate_resource, start_index, ate_needed, ate_needed);
104
105 ate_resource->lowest_free_index =
106 find_free_ate(ate_resource, ate_resource->lowest_free_index, 1);
107
108 return start_index;
109}
110
111/*
112 * Allocate "count" contiguous Bridge Address Translation Entries
113 * on the specified bridge to be used for PCI to XTALK mappings.
114 * Indices in rm map range from 1..num_entries. Indicies returned
115 * to caller range from 0..num_entries-1.
116 *
117 * Return the start index on success, -1 on failure.
118 */
119int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count)
120{
121 int status = 0;
122 uint64_t flag;
123
124 flag = pcibr_lock(pcibus_info);
125 status = alloc_ate_resource(&pcibus_info->pbi_int_ate_resource, count);
126
127 if (status < 0) {
128 /* Failed to allocate */
129 pcibr_unlock(pcibus_info, flag);
130 return -1;
131 }
132
133 pcibr_unlock(pcibus_info, flag);
134
135 return status;
136}
137
138/*
139 * Setup an Address Translation Entry as specified. Use either the Bridge
140 * internal maps or the external map RAM, as appropriate.
141 */
142static inline uint64_t *pcibr_ate_addr(struct pcibus_info *pcibus_info,
143 int ate_index)
144{
145 if (ate_index < pcibus_info->pbi_int_ate_size) {
146 return pcireg_int_ate_addr(pcibus_info, ate_index);
147 }
148 panic("pcibr_ate_addr: invalid ate_index 0x%x", ate_index);
149}
150
151/*
152 * Update the ate.
153 */
154void inline
155ate_write(struct pcibus_info *pcibus_info, int ate_index, int count,
156 volatile uint64_t ate)
157{
158 while (count-- > 0) {
159 if (ate_index < pcibus_info->pbi_int_ate_size) {
160 pcireg_int_ate_set(pcibus_info, ate_index, ate);
161 } else {
162 panic("ate_write: invalid ate_index 0x%x", ate_index);
163 }
164 ate_index++;
165 ate += IOPGSIZE;
166 }
167
168 pcireg_tflush_get(pcibus_info); /* wait until Bridge PIO complete */
169}
170
171void pcibr_ate_free(struct pcibus_info *pcibus_info, int index)
172{
173
174 volatile uint64_t ate;
175 int count;
176 uint64_t flags;
177
178 if (pcibr_invalidate_ate) {
179 /* For debugging purposes, clear the valid bit in the ATE */
180 ate = *pcibr_ate_addr(pcibus_info, index);
181 count = pcibus_info->pbi_int_ate_resource.ate[index];
182 ate_write(pcibus_info, index, count, (ate & ~PCI32_ATE_V));
183 }
184
185 flags = pcibr_lock(pcibus_info);
186 free_ate_resource(&pcibus_info->pbi_int_ate_resource, index);
187 pcibr_unlock(pcibus_info, flags);
188}
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
new file mode 100644
index 000000000000..b1d66ac065c8
--- /dev/null
+++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
@@ -0,0 +1,379 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <linux/types.h>
10#include <linux/pci.h>
11#include <asm/sn/sn_sal.h>
12#include <asm/sn/geo.h>
13#include "xtalk/xwidgetdev.h"
14#include "xtalk/hubdev.h"
15#include "pci/pcibus_provider_defs.h"
16#include "pci/pcidev.h"
17#include "pci/tiocp.h"
18#include "pci/pic.h"
19#include "pci/pcibr_provider.h"
20#include "pci/tiocp.h"
21#include "tio.h"
22#include <asm/sn/addrs.h>
23
24extern int sn_ioif_inited;
25
26/* =====================================================================
27 * DMA MANAGEMENT
28 *
29 * The Bridge ASIC provides three methods of doing DMA: via a "direct map"
30 * register available in 32-bit PCI space (which selects a contiguous 2G
31 * address space on some other widget), via "direct" addressing via 64-bit
32 * PCI space (all destination information comes from the PCI address,
33 * including transfer attributes), and via a "mapped" region that allows
34 * a bunch of different small mappings to be established with the PMU.
35 *
36 * For efficiency, we most prefer to use the 32bit direct mapping facility,
37 * since it requires no resource allocations. The advantage of using the
38 * PMU over the 64-bit direct is that single-cycle PCI addressing can be
39 * used; the advantage of using 64-bit direct over PMU addressing is that
40 * we do not have to allocate entries in the PMU.
41 */
42
43static uint64_t
44pcibr_dmamap_ate32(struct pcidev_info *info,
45 uint64_t paddr, size_t req_size, uint64_t flags)
46{
47
48 struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
49 struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
50 pdi_pcibus_info;
51 uint8_t internal_device = (PCI_SLOT(pcidev_info->pdi_host_pcidev_info->
52 pdi_linux_pcidev->devfn)) - 1;
53 int ate_count;
54 int ate_index;
55 uint64_t ate_flags = flags | PCI32_ATE_V;
56 uint64_t ate;
57 uint64_t pci_addr;
58 uint64_t xio_addr;
59 uint64_t offset;
60
61 /* PIC in PCI-X mode does not supports 32bit PageMap mode */
62 if (IS_PIC_SOFT(pcibus_info) && IS_PCIX(pcibus_info)) {
63 return 0;
64 }
65
66 /* Calculate the number of ATEs needed. */
67 if (!(MINIMAL_ATE_FLAG(paddr, req_size))) {
68 ate_count = IOPG((IOPGSIZE - 1) /* worst case start offset */
69 +req_size /* max mapping bytes */
70 - 1) + 1; /* round UP */
71 } else { /* assume requested target is page aligned */
72 ate_count = IOPG(req_size /* max mapping bytes */
73 - 1) + 1; /* round UP */
74 }
75
76 /* Get the number of ATEs required. */
77 ate_index = pcibr_ate_alloc(pcibus_info, ate_count);
78 if (ate_index < 0)
79 return 0;
80
81 /* In PCI-X mode, Prefetch not supported */
82 if (IS_PCIX(pcibus_info))
83 ate_flags &= ~(PCI32_ATE_PREF);
84
85 xio_addr =
86 IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
87 PHYS_TO_TIODMA(paddr);
88 offset = IOPGOFF(xio_addr);
89 ate = ate_flags | (xio_addr - offset);
90
91 /* If PIC, put the targetid in the ATE */
92 if (IS_PIC_SOFT(pcibus_info)) {
93 ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT);
94 }
95 ate_write(pcibus_info, ate_index, ate_count, ate);
96
97 /*
98 * Set up the DMA mapped Address.
99 */
100 pci_addr = PCI32_MAPPED_BASE + offset + IOPGSIZE * ate_index;
101
102 /*
103 * If swap was set in device in pcibr_endian_set()
104 * we need to turn swapping on.
105 */
106 if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR)
107 ATE_SWAP_ON(pci_addr);
108
109 return pci_addr;
110}
111
112static uint64_t
113pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr,
114 uint64_t dma_attributes)
115{
116 struct pcibus_info *pcibus_info = (struct pcibus_info *)
117 ((info->pdi_host_pcidev_info)->pdi_pcibus_info);
118 uint64_t pci_addr;
119
120 /* Translate to Crosstalk View of Physical Address */
121 pci_addr = (IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
122 PHYS_TO_TIODMA(paddr)) | dma_attributes;
123
124 /* Handle Bus mode */
125 if (IS_PCIX(pcibus_info))
126 pci_addr &= ~PCI64_ATTR_PREF;
127
128 /* Handle Bridge Chipset differences */
129 if (IS_PIC_SOFT(pcibus_info)) {
130 pci_addr |=
131 ((uint64_t) pcibus_info->
132 pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT);
133 } else
134 pci_addr |= TIOCP_PCI64_CMDTYPE_MEM;
135
136 /* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */
137 if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn))
138 pci_addr |= PCI64_ATTR_VIRTUAL;
139
140 return pci_addr;
141
142}
143
144static uint64_t
145pcibr_dmatrans_direct32(struct pcidev_info * info,
146 uint64_t paddr, size_t req_size, uint64_t flags)
147{
148
149 struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
150 struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
151 pdi_pcibus_info;
152 uint64_t xio_addr;
153
154 uint64_t xio_base;
155 uint64_t offset;
156 uint64_t endoff;
157
158 if (IS_PCIX(pcibus_info)) {
159 return 0;
160 }
161
162 xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
163 PHYS_TO_TIODMA(paddr);
164
165 xio_base = pcibus_info->pbi_dir_xbase;
166 offset = xio_addr - xio_base;
167 endoff = req_size + offset;
168 if ((req_size > (1ULL << 31)) || /* Too Big */
169 (xio_addr < xio_base) || /* Out of range for mappings */
170 (endoff > (1ULL << 31))) { /* Too Big */
171 return 0;
172 }
173
174 return PCI32_DIRECT_BASE | offset;
175
176}
177
178/*
179 * Wrapper routine for free'ing DMA maps
180 * DMA mappings for Direct 64 and 32 do not have any DMA maps.
181 */
182void
183pcibr_dma_unmap(struct pcidev_info *pcidev_info, dma_addr_t dma_handle,
184 int direction)
185{
186 struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
187 pdi_pcibus_info;
188
189 if (IS_PCI32_MAPPED(dma_handle)) {
190 int ate_index;
191
192 ate_index =
193 IOPG((ATE_SWAP_OFF(dma_handle) - PCI32_MAPPED_BASE));
194 pcibr_ate_free(pcibus_info, ate_index);
195 }
196}
197
198/*
199 * On SN systems there is a race condition between a PIO read response and
200 * DMA's. In rare cases, the read response may beat the DMA, causing the
201 * driver to think that data in memory is complete and meaningful. This code
202 * eliminates that race. This routine is called by the PIO read routines
203 * after doing the read. For PIC this routine then forces a fake interrupt
204 * on another line, which is logically associated with the slot that the PIO
205 * is addressed to. It then spins while watching the memory location that
206 * the interrupt is targetted to. When the interrupt response arrives, we
207 * are sure that the DMA has landed in memory and it is safe for the driver
208 * to proceed. For TIOCP use the Device(x) Write Request Buffer Flush
209 * Bridge register since it ensures the data has entered the coherence domain,
210 * unlike the PIC Device(x) Write Request Buffer Flush register.
211 */
212
213void sn_dma_flush(uint64_t addr)
214{
215 nasid_t nasid;
216 int is_tio;
217 int wid_num;
218 int i, j;
219 int bwin;
220 uint64_t flags;
221 struct hubdev_info *hubinfo;
222 volatile struct sn_flush_device_list *p;
223 struct sn_flush_nasid_entry *flush_nasid_list;
224
225 if (!sn_ioif_inited)
226 return;
227
228 nasid = NASID_GET(addr);
229 if (-1 == nasid_to_cnodeid(nasid))
230 return;
231
232 hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo;
233
234 if (!hubinfo) {
235 BUG();
236 }
237 is_tio = (nasid & 1);
238 if (is_tio) {
239 wid_num = TIO_SWIN_WIDGETNUM(addr);
240 bwin = TIO_BWIN_WINDOWNUM(addr);
241 } else {
242 wid_num = SWIN_WIDGETNUM(addr);
243 bwin = BWIN_WINDOWNUM(addr);
244 }
245
246 flush_nasid_list = &hubinfo->hdi_flush_nasid_list;
247 if (flush_nasid_list->widget_p == NULL)
248 return;
249 if (bwin > 0) {
250 uint64_t itte = flush_nasid_list->iio_itte[bwin];
251
252 if (is_tio) {
253 wid_num = (itte >> TIO_ITTE_WIDGET_SHIFT) &
254 TIO_ITTE_WIDGET_MASK;
255 } else {
256 wid_num = (itte >> IIO_ITTE_WIDGET_SHIFT) &
257 IIO_ITTE_WIDGET_MASK;
258 }
259 }
260 if (flush_nasid_list->widget_p == NULL)
261 return;
262 if (flush_nasid_list->widget_p[wid_num] == NULL)
263 return;
264 p = &flush_nasid_list->widget_p[wid_num][0];
265
266 /* find a matching BAR */
267 for (i = 0; i < DEV_PER_WIDGET; i++) {
268 for (j = 0; j < PCI_ROM_RESOURCE; j++) {
269 if (p->sfdl_bar_list[j].start == 0)
270 break;
271 if (addr >= p->sfdl_bar_list[j].start
272 && addr <= p->sfdl_bar_list[j].end)
273 break;
274 }
275 if (j < PCI_ROM_RESOURCE && p->sfdl_bar_list[j].start != 0)
276 break;
277 p++;
278 }
279
280 /* if no matching BAR, return without doing anything. */
281 if (i == DEV_PER_WIDGET)
282 return;
283
284 /*
285 * For TIOCP use the Device(x) Write Request Buffer Flush Bridge
286 * register since it ensures the data has entered the coherence
287 * domain, unlike PIC
288 */
289 if (is_tio) {
290 uint32_t tio_id = REMOTE_HUB_L(nasid, TIO_NODE_ID);
291 uint32_t revnum = XWIDGET_PART_REV_NUM(tio_id);
292
293 /* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */
294 if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) {
295 return;
296 } else {
297 pcireg_wrb_flush_get(p->sfdl_pcibus_info,
298 (p->sfdl_slot - 1));
299 }
300 } else {
301 spin_lock_irqsave(&((struct sn_flush_device_list *)p)->
302 sfdl_flush_lock, flags);
303
304 p->sfdl_flush_value = 0;
305
306 /* force an interrupt. */
307 *(volatile uint32_t *)(p->sfdl_force_int_addr) = 1;
308
309 /* wait for the interrupt to come back. */
310 while (*(p->sfdl_flush_addr) != 0x10f) ;
311
312 /* okay, everything is synched up. */
313 spin_unlock_irqrestore((spinlock_t *)&p->sfdl_flush_lock, flags);
314 }
315 return;
316}
317
318/*
319 * Wrapper DMA interface. Called from pci_dma.c routines.
320 */
321
322uint64_t
323pcibr_dma_map(struct pcidev_info * pcidev_info, unsigned long phys_addr,
324 size_t size, unsigned int flags)
325{
326 dma_addr_t dma_handle;
327 struct pci_dev *pcidev = pcidev_info->pdi_linux_pcidev;
328
329 if (flags & SN_PCIDMA_CONSISTENT) {
330 /* sn_pci_alloc_consistent interfaces */
331 if (pcidev->dev.coherent_dma_mask == ~0UL) {
332 dma_handle =
333 pcibr_dmatrans_direct64(pcidev_info, phys_addr,
334 PCI64_ATTR_BAR);
335 } else {
336 dma_handle =
337 (dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
338 phys_addr, size,
339 PCI32_ATE_BAR);
340 }
341 } else {
342 /* map_sg/map_single interfaces */
343
344 /* SN cannot support DMA addresses smaller than 32 bits. */
345 if (pcidev->dma_mask < 0x7fffffff) {
346 return 0;
347 }
348
349 if (pcidev->dma_mask == ~0UL) {
350 /*
351 * Handle the most common case: 64 bit cards. This
352 * call should always succeed.
353 */
354
355 dma_handle =
356 pcibr_dmatrans_direct64(pcidev_info, phys_addr,
357 PCI64_ATTR_PREF);
358 } else {
359 /* Handle 32-63 bit cards via direct mapping */
360 dma_handle =
361 pcibr_dmatrans_direct32(pcidev_info, phys_addr,
362 size, 0);
363 if (!dma_handle) {
364 /*
365 * It is a 32 bit card and we cannot do direct mapping,
366 * so we use an ATE.
367 */
368
369 dma_handle =
370 pcibr_dmamap_ate32(pcidev_info, phys_addr,
371 size, PCI32_ATE_PREF);
372 }
373 }
374 }
375
376 return dma_handle;
377}
378
379EXPORT_SYMBOL(sn_dma_flush);
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
new file mode 100644
index 000000000000..92bd278cf7ff
--- /dev/null
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -0,0 +1,170 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <linux/types.h>
10#include <linux/interrupt.h>
11#include <linux/pci.h>
12#include <asm/sn/sn_sal.h>
13#include "xtalk/xwidgetdev.h"
14#include <asm/sn/geo.h>
15#include "xtalk/hubdev.h"
16#include "pci/pcibus_provider_defs.h"
17#include "pci/pcidev.h"
18#include "pci/pcibr_provider.h"
19#include <asm/sn/addrs.h>
20
21
22static int sal_pcibr_error_interrupt(struct pcibus_info *soft)
23{
24 struct ia64_sal_retval ret_stuff;
25 uint64_t busnum;
26 int segment;
27 ret_stuff.status = 0;
28 ret_stuff.v0 = 0;
29
30 segment = 0;
31 busnum = soft->pbi_buscommon.bs_persist_busnum;
32 SAL_CALL_NOLOCK(ret_stuff,
33 (u64) SN_SAL_IOIF_ERROR_INTERRUPT,
34 (u64) segment, (u64) busnum, 0, 0, 0, 0, 0);
35
36 return (int)ret_stuff.v0;
37}
38
39/*
40 * PCI Bridge Error interrupt handler. Gets invoked whenever a PCI
41 * bridge sends an error interrupt.
42 */
43static irqreturn_t
44pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *regs)
45{
46 struct pcibus_info *soft = (struct pcibus_info *)arg;
47
48 if (sal_pcibr_error_interrupt(soft) < 0) {
49 panic("pcibr_error_intr_handler(): Fatal Bridge Error");
50 }
51 return IRQ_HANDLED;
52}
53
54void *
55pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft)
56{
57 int nasid, cnode, j;
58 struct hubdev_info *hubdev_info;
59 struct pcibus_info *soft;
60 struct sn_flush_device_list *sn_flush_device_list;
61
62 if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) {
63 return NULL;
64 }
65
66 /*
67 * Allocate kernel bus soft and copy from prom.
68 */
69
70 soft = kmalloc(sizeof(struct pcibus_info), GFP_KERNEL);
71 if (!soft) {
72 return NULL;
73 }
74
75 memcpy(soft, prom_bussoft, sizeof(struct pcibus_info));
76 soft->pbi_buscommon.bs_base =
77 (((u64) soft->pbi_buscommon.
78 bs_base << 4) >> 4) | __IA64_UNCACHED_OFFSET;
79
80 spin_lock_init(&soft->pbi_lock);
81
82 /*
83 * register the bridge's error interrupt handler
84 */
85 if (request_irq(SGI_PCIBR_ERROR, (void *)pcibr_error_intr_handler,
86 SA_SHIRQ, "PCIBR error", (void *)(soft))) {
87 printk(KERN_WARNING
88 "pcibr cannot allocate interrupt for error handler\n");
89 }
90
91 /*
92 * Update the Bridge with the "kernel" pagesize
93 */
94 if (PAGE_SIZE < 16384) {
95 pcireg_control_bit_clr(soft, PCIBR_CTRL_PAGE_SIZE);
96 } else {
97 pcireg_control_bit_set(soft, PCIBR_CTRL_PAGE_SIZE);
98 }
99
100 nasid = NASID_GET(soft->pbi_buscommon.bs_base);
101 cnode = nasid_to_cnodeid(nasid);
102 hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
103
104 if (hubdev_info->hdi_flush_nasid_list.widget_p) {
105 sn_flush_device_list = hubdev_info->hdi_flush_nasid_list.
106 widget_p[(int)soft->pbi_buscommon.bs_xid];
107 if (sn_flush_device_list) {
108 for (j = 0; j < DEV_PER_WIDGET;
109 j++, sn_flush_device_list++) {
110 if (sn_flush_device_list->sfdl_slot == -1)
111 continue;
112 if (sn_flush_device_list->
113 sfdl_persistent_busnum ==
114 soft->pbi_buscommon.bs_persist_busnum)
115 sn_flush_device_list->sfdl_pcibus_info =
116 soft;
117 }
118 }
119 }
120
121 /* Setup the PMU ATE map */
122 soft->pbi_int_ate_resource.lowest_free_index = 0;
123 soft->pbi_int_ate_resource.ate =
124 kmalloc(soft->pbi_int_ate_size * sizeof(uint64_t), GFP_KERNEL);
125 memset(soft->pbi_int_ate_resource.ate, 0,
126 (soft->pbi_int_ate_size * sizeof(uint64_t)));
127
128 return soft;
129}
130
131void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info)
132{
133 struct pcidev_info *pcidev_info;
134 struct pcibus_info *pcibus_info;
135 int bit = sn_irq_info->irq_int_bit;
136
137 pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
138 if (pcidev_info) {
139 pcibus_info =
140 (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
141 pdi_pcibus_info;
142 pcireg_force_intr_set(pcibus_info, bit);
143 }
144}
145
146void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info)
147{
148 struct pcidev_info *pcidev_info;
149 struct pcibus_info *pcibus_info;
150 int bit = sn_irq_info->irq_int_bit;
151 uint64_t xtalk_addr = sn_irq_info->irq_xtalkaddr;
152
153 pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
154 if (pcidev_info) {
155 pcibus_info =
156 (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
157 pdi_pcibus_info;
158
159 /* Disable the device's IRQ */
160 pcireg_intr_enable_bit_clr(pcibus_info, bit);
161
162 /* Change the device's IRQ */
163 pcireg_intr_addr_addr_set(pcibus_info, bit, xtalk_addr);
164
165 /* Re-enable the device's IRQ */
166 pcireg_intr_enable_bit_set(pcibus_info, bit);
167
168 pcibr_force_interrupt(sn_irq_info);
169 }
170}
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_reg.c b/arch/ia64/sn/pci/pcibr/pcibr_reg.c
new file mode 100644
index 000000000000..74a74a7d2a13
--- /dev/null
+++ b/arch/ia64/sn/pci/pcibr/pcibr_reg.c
@@ -0,0 +1,282 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <linux/types.h>
10#include <linux/interrupt.h>
11#include "pci/pcibus_provider_defs.h"
12#include "pci/pcidev.h"
13#include "pci/tiocp.h"
14#include "pci/pic.h"
15#include "pci/pcibr_provider.h"
16
17union br_ptr {
18 struct tiocp tio;
19 struct pic pic;
20};
21
22/*
23 * Control Register Access -- Read/Write 0000_0020
24 */
25void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits)
26{
27 union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
28
29 if (pcibus_info) {
30 switch (pcibus_info->pbi_bridge_type) {
31 case PCIBR_BRIDGETYPE_TIOCP:
32 ptr->tio.cp_control &= ~bits;
33 break;
34 case PCIBR_BRIDGETYPE_PIC:
35 ptr->pic.p_wid_control &= ~bits;
36 break;
37 default:
38 panic
39 ("pcireg_control_bit_clr: unknown bridgetype bridge 0x%p",
40 (void *)ptr);
41 }
42 }
43}
44
45void pcireg_control_bit_set(struct pcibus_info *pcibus_info, uint64_t bits)
46{
47 union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
48
49 if (pcibus_info) {
50 switch (pcibus_info->pbi_bridge_type) {
51 case PCIBR_BRIDGETYPE_TIOCP:
52 ptr->tio.cp_control |= bits;
53 break;
54 case PCIBR_BRIDGETYPE_PIC:
55 ptr->pic.p_wid_control |= bits;
56 break;
57 default:
58 panic
59 ("pcireg_control_bit_set: unknown bridgetype bridge 0x%p",
60 (void *)ptr);
61 }
62 }
63}
64
65/*
66 * PCI/PCIX Target Flush Register Access -- Read Only 0000_0050
67 */
68uint64_t pcireg_tflush_get(struct pcibus_info *pcibus_info)
69{
70 union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
71 uint64_t ret = 0;
72
73 if (pcibus_info) {
74 switch (pcibus_info->pbi_bridge_type) {
75 case PCIBR_BRIDGETYPE_TIOCP:
76 ret = ptr->tio.cp_tflush;
77 break;
78 case PCIBR_BRIDGETYPE_PIC:
79 ret = ptr->pic.p_wid_tflush;
80 break;
81 default:
82 panic
83 ("pcireg_tflush_get: unknown bridgetype bridge 0x%p",
84 (void *)ptr);
85 }
86 }
87
88 /* Read of the Target Flush should always return zero */
89 if (ret != 0)
90 panic("pcireg_tflush_get:Target Flush failed\n");
91
92 return ret;
93}
94
95/*
96 * Interrupt Status Register Access -- Read Only 0000_0100
97 */
98uint64_t pcireg_intr_status_get(struct pcibus_info * pcibus_info)
99{
100 union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
101 uint64_t ret = 0;
102
103 if (pcibus_info) {
104 switch (pcibus_info->pbi_bridge_type) {
105 case PCIBR_BRIDGETYPE_TIOCP:
106 ret = ptr->tio.cp_int_status;
107 break;
108 case PCIBR_BRIDGETYPE_PIC:
109 ret = ptr->pic.p_int_status;
110 break;
111 default:
112 panic
113 ("pcireg_intr_status_get: unknown bridgetype bridge 0x%p",
114 (void *)ptr);
115 }
116 }
117 return ret;
118}
119
120/*
121 * Interrupt Enable Register Access -- Read/Write 0000_0108
122 */
123void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits)
124{
125 union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
126
127 if (pcibus_info) {
128 switch (pcibus_info->pbi_bridge_type) {
129 case PCIBR_BRIDGETYPE_TIOCP:
130 ptr->tio.cp_int_enable &= ~bits;
131 break;
132 case PCIBR_BRIDGETYPE_PIC:
133 ptr->pic.p_int_enable &= ~bits;
134 break;
135 default:
136 panic
137 ("pcireg_intr_enable_bit_clr: unknown bridgetype bridge 0x%p",
138 (void *)ptr);
139 }
140 }
141}
142
143void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, uint64_t bits)
144{
145 union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
146
147 if (pcibus_info) {
148 switch (pcibus_info->pbi_bridge_type) {
149 case PCIBR_BRIDGETYPE_TIOCP:
150 ptr->tio.cp_int_enable |= bits;
151 break;
152 case PCIBR_BRIDGETYPE_PIC:
153 ptr->pic.p_int_enable |= bits;
154 break;
155 default:
156 panic
157 ("pcireg_intr_enable_bit_set: unknown bridgetype bridge 0x%p",
158 (void *)ptr);
159 }
160 }
161}
162
163/*
164 * Intr Host Address Register (int_addr) -- Read/Write 0000_0130 - 0000_0168
165 */
166void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n,
167 uint64_t addr)
168{
169 union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
170
171 if (pcibus_info) {
172 switch (pcibus_info->pbi_bridge_type) {
173 case PCIBR_BRIDGETYPE_TIOCP:
174 ptr->tio.cp_int_addr[int_n] &= ~TIOCP_HOST_INTR_ADDR;
175 ptr->tio.cp_int_addr[int_n] |=
176 (addr & TIOCP_HOST_INTR_ADDR);
177 break;
178 case PCIBR_BRIDGETYPE_PIC:
179 ptr->pic.p_int_addr[int_n] &= ~PIC_HOST_INTR_ADDR;
180 ptr->pic.p_int_addr[int_n] |=
181 (addr & PIC_HOST_INTR_ADDR);
182 break;
183 default:
184 panic
185 ("pcireg_intr_addr_addr_get: unknown bridgetype bridge 0x%p",
186 (void *)ptr);
187 }
188 }
189}
190
191/*
192 * Force Interrupt Register Access -- Write Only 0000_01C0 - 0000_01F8
193 */
194void pcireg_force_intr_set(struct pcibus_info *pcibus_info, int int_n)
195{
196 union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
197
198 if (pcibus_info) {
199 switch (pcibus_info->pbi_bridge_type) {
200 case PCIBR_BRIDGETYPE_TIOCP:
201 ptr->tio.cp_force_pin[int_n] = 1;
202 break;
203 case PCIBR_BRIDGETYPE_PIC:
204 ptr->pic.p_force_pin[int_n] = 1;
205 break;
206 default:
207 panic
208 ("pcireg_force_intr_set: unknown bridgetype bridge 0x%p",
209 (void *)ptr);
210 }
211 }
212}
213
214/*
215 * Device(x) Write Buffer Flush Reg Access -- Read Only 0000_0240 - 0000_0258
216 */
217uint64_t pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device)
218{
219 union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
220 uint64_t ret = 0;
221
222 if (pcibus_info) {
223 switch (pcibus_info->pbi_bridge_type) {
224 case PCIBR_BRIDGETYPE_TIOCP:
225 ret = ptr->tio.cp_wr_req_buf[device];
226 break;
227 case PCIBR_BRIDGETYPE_PIC:
228 ret = ptr->pic.p_wr_req_buf[device];
229 break;
230 default:
231 panic("pcireg_wrb_flush_get: unknown bridgetype bridge 0x%p", (void *)ptr);
232 }
233
234 }
235 /* Read of the Write Buffer Flush should always return zero */
236 return ret;
237}
238
239void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index,
240 uint64_t val)
241{
242 union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
243
244 if (pcibus_info) {
245 switch (pcibus_info->pbi_bridge_type) {
246 case PCIBR_BRIDGETYPE_TIOCP:
247 ptr->tio.cp_int_ate_ram[ate_index] = (uint64_t) val;
248 break;
249 case PCIBR_BRIDGETYPE_PIC:
250 ptr->pic.p_int_ate_ram[ate_index] = (uint64_t) val;
251 break;
252 default:
253 panic
254 ("pcireg_int_ate_set: unknown bridgetype bridge 0x%p",
255 (void *)ptr);
256 }
257 }
258}
259
260uint64_t *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index)
261{
262 union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
263 uint64_t *ret = (uint64_t *) 0;
264
265 if (pcibus_info) {
266 switch (pcibus_info->pbi_bridge_type) {
267 case PCIBR_BRIDGETYPE_TIOCP:
268 ret =
269 (uint64_t *) & (ptr->tio.cp_int_ate_ram[ate_index]);
270 break;
271 case PCIBR_BRIDGETYPE_PIC:
272 ret =
273 (uint64_t *) & (ptr->pic.p_int_ate_ram[ate_index]);
274 break;
275 default:
276 panic
277 ("pcireg_int_ate_addr: unknown bridgetype bridge 0x%p",
278 (void *)ptr);
279 }
280 }
281 return ret;
282}