diff options
Diffstat (limited to 'arch/alpha/include/asm/pci.h')
-rw-r--r-- | arch/alpha/include/asm/pci.h | 276 |
1 files changed, 276 insertions, 0 deletions
diff --git a/arch/alpha/include/asm/pci.h b/arch/alpha/include/asm/pci.h new file mode 100644 index 000000000000..2a14302c17a3 --- /dev/null +++ b/arch/alpha/include/asm/pci.h | |||
@@ -0,0 +1,276 @@ | |||
1 | #ifndef __ALPHA_PCI_H | ||
2 | #define __ALPHA_PCI_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | #include <linux/spinlock.h> | ||
7 | #include <linux/dma-mapping.h> | ||
8 | #include <asm/scatterlist.h> | ||
9 | #include <asm/machvec.h> | ||
10 | |||
11 | /* | ||
12 | * The following structure is used to manage multiple PCI busses. | ||
13 | */ | ||
14 | |||
15 | struct pci_dev; | ||
16 | struct pci_bus; | ||
17 | struct resource; | ||
18 | struct pci_iommu_arena; | ||
19 | struct page; | ||
20 | |||
21 | /* A controller. Used to manage multiple PCI busses. */ | ||
22 | |||
23 | struct pci_controller { | ||
24 | struct pci_controller *next; | ||
25 | struct pci_bus *bus; | ||
26 | struct resource *io_space; | ||
27 | struct resource *mem_space; | ||
28 | |||
29 | /* The following are for reporting to userland. The invariant is | ||
30 | that if we report a BWX-capable dense memory, we do not report | ||
31 | a sparse memory at all, even if it exists. */ | ||
32 | unsigned long sparse_mem_base; | ||
33 | unsigned long dense_mem_base; | ||
34 | unsigned long sparse_io_base; | ||
35 | unsigned long dense_io_base; | ||
36 | |||
37 | /* This one's for the kernel only. It's in KSEG somewhere. */ | ||
38 | unsigned long config_space_base; | ||
39 | |||
40 | unsigned int index; | ||
41 | /* For compatibility with current (as of July 2003) pciutils | ||
42 | and XFree86. Eventually will be removed. */ | ||
43 | unsigned int need_domain_info; | ||
44 | |||
45 | struct pci_iommu_arena *sg_pci; | ||
46 | struct pci_iommu_arena *sg_isa; | ||
47 | |||
48 | void *sysdata; | ||
49 | }; | ||
50 | |||
51 | /* Override the logic in pci_scan_bus for skipping already-configured | ||
52 | bus numbers. */ | ||
53 | |||
54 | #define pcibios_assign_all_busses() 1 | ||
55 | #define pcibios_scan_all_fns(a, b) 0 | ||
56 | |||
57 | #define PCIBIOS_MIN_IO alpha_mv.min_io_address | ||
58 | #define PCIBIOS_MIN_MEM alpha_mv.min_mem_address | ||
59 | |||
60 | extern void pcibios_set_master(struct pci_dev *dev); | ||
61 | |||
62 | extern inline void pcibios_penalize_isa_irq(int irq, int active) | ||
63 | { | ||
64 | /* We don't do dynamic PCI IRQ allocation */ | ||
65 | } | ||
66 | |||
67 | /* IOMMU controls. */ | ||
68 | |||
69 | /* The PCI address space does not equal the physical memory address space. | ||
70 | The networking and block device layers use this boolean for bounce buffer | ||
71 | decisions. */ | ||
72 | #define PCI_DMA_BUS_IS_PHYS 0 | ||
73 | |||
74 | /* Allocate and map kernel buffer using consistent mode DMA for PCI | ||
75 | device. Returns non-NULL cpu-view pointer to the buffer if | ||
76 | successful and sets *DMA_ADDRP to the pci side dma address as well, | ||
77 | else DMA_ADDRP is undefined. */ | ||
78 | |||
79 | extern void *__pci_alloc_consistent(struct pci_dev *, size_t, | ||
80 | dma_addr_t *, gfp_t); | ||
81 | static inline void * | ||
82 | pci_alloc_consistent(struct pci_dev *dev, size_t size, dma_addr_t *dma) | ||
83 | { | ||
84 | return __pci_alloc_consistent(dev, size, dma, GFP_ATOMIC); | ||
85 | } | ||
86 | |||
87 | /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must | ||
88 | be values that were returned from pci_alloc_consistent. SIZE must | ||
89 | be the same as what as passed into pci_alloc_consistent. | ||
90 | References to the memory and mappings associated with CPU_ADDR or | ||
91 | DMA_ADDR past this call are illegal. */ | ||
92 | |||
93 | extern void pci_free_consistent(struct pci_dev *, size_t, void *, dma_addr_t); | ||
94 | |||
95 | /* Map a single buffer of the indicate size for PCI DMA in streaming mode. | ||
96 | The 32-bit PCI bus mastering address to use is returned. Once the device | ||
97 | is given the dma address, the device owns this memory until either | ||
98 | pci_unmap_single or pci_dma_sync_single_for_cpu is performed. */ | ||
99 | |||
100 | extern dma_addr_t pci_map_single(struct pci_dev *, void *, size_t, int); | ||
101 | |||
102 | /* Likewise, but for a page instead of an address. */ | ||
103 | extern dma_addr_t pci_map_page(struct pci_dev *, struct page *, | ||
104 | unsigned long, size_t, int); | ||
105 | |||
106 | /* Test for pci_map_single or pci_map_page having generated an error. */ | ||
107 | |||
108 | static inline int | ||
109 | pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr) | ||
110 | { | ||
111 | return dma_addr == 0; | ||
112 | } | ||
113 | |||
114 | /* Unmap a single streaming mode DMA translation. The DMA_ADDR and | ||
115 | SIZE must match what was provided for in a previous pci_map_single | ||
116 | call. All other usages are undefined. After this call, reads by | ||
117 | the cpu to the buffer are guaranteed to see whatever the device | ||
118 | wrote there. */ | ||
119 | |||
120 | extern void pci_unmap_single(struct pci_dev *, dma_addr_t, size_t, int); | ||
121 | extern void pci_unmap_page(struct pci_dev *, dma_addr_t, size_t, int); | ||
122 | |||
123 | /* pci_unmap_{single,page} is not a nop, thus... */ | ||
124 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ | ||
125 | dma_addr_t ADDR_NAME; | ||
126 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ | ||
127 | __u32 LEN_NAME; | ||
128 | #define pci_unmap_addr(PTR, ADDR_NAME) \ | ||
129 | ((PTR)->ADDR_NAME) | ||
130 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ | ||
131 | (((PTR)->ADDR_NAME) = (VAL)) | ||
132 | #define pci_unmap_len(PTR, LEN_NAME) \ | ||
133 | ((PTR)->LEN_NAME) | ||
134 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | ||
135 | (((PTR)->LEN_NAME) = (VAL)) | ||
136 | |||
137 | /* Map a set of buffers described by scatterlist in streaming mode for | ||
138 | PCI DMA. This is the scatter-gather version of the above | ||
139 | pci_map_single interface. Here the scatter gather list elements | ||
140 | are each tagged with the appropriate PCI dma address and length. | ||
141 | They are obtained via sg_dma_{address,length}(SG). | ||
142 | |||
143 | NOTE: An implementation may be able to use a smaller number of DMA | ||
144 | address/length pairs than there are SG table elements. (for | ||
145 | example via virtual mapping capabilities) The routine returns the | ||
146 | number of addr/length pairs actually used, at most nents. | ||
147 | |||
148 | Device ownership issues as mentioned above for pci_map_single are | ||
149 | the same here. */ | ||
150 | |||
151 | extern int pci_map_sg(struct pci_dev *, struct scatterlist *, int, int); | ||
152 | |||
153 | /* Unmap a set of streaming mode DMA translations. Again, cpu read | ||
154 | rules concerning calls here are the same as for pci_unmap_single() | ||
155 | above. */ | ||
156 | |||
157 | extern void pci_unmap_sg(struct pci_dev *, struct scatterlist *, int, int); | ||
158 | |||
159 | /* Make physical memory consistent for a single streaming mode DMA | ||
160 | translation after a transfer and device currently has ownership | ||
161 | of the buffer. | ||
162 | |||
163 | If you perform a pci_map_single() but wish to interrogate the | ||
164 | buffer using the cpu, yet do not wish to teardown the PCI dma | ||
165 | mapping, you must call this function before doing so. At the next | ||
166 | point you give the PCI dma address back to the card, you must first | ||
167 | perform a pci_dma_sync_for_device, and then the device again owns | ||
168 | the buffer. */ | ||
169 | |||
170 | static inline void | ||
171 | pci_dma_sync_single_for_cpu(struct pci_dev *dev, dma_addr_t dma_addr, | ||
172 | long size, int direction) | ||
173 | { | ||
174 | /* Nothing to do. */ | ||
175 | } | ||
176 | |||
177 | static inline void | ||
178 | pci_dma_sync_single_for_device(struct pci_dev *dev, dma_addr_t dma_addr, | ||
179 | size_t size, int direction) | ||
180 | { | ||
181 | /* Nothing to do. */ | ||
182 | } | ||
183 | |||
184 | /* Make physical memory consistent for a set of streaming mode DMA | ||
185 | translations after a transfer. The same as pci_dma_sync_single_* | ||
186 | but for a scatter-gather list, same rules and usage. */ | ||
187 | |||
188 | static inline void | ||
189 | pci_dma_sync_sg_for_cpu(struct pci_dev *dev, struct scatterlist *sg, | ||
190 | int nents, int direction) | ||
191 | { | ||
192 | /* Nothing to do. */ | ||
193 | } | ||
194 | |||
195 | static inline void | ||
196 | pci_dma_sync_sg_for_device(struct pci_dev *dev, struct scatterlist *sg, | ||
197 | int nents, int direction) | ||
198 | { | ||
199 | /* Nothing to do. */ | ||
200 | } | ||
201 | |||
202 | /* Return whether the given PCI device DMA address mask can | ||
203 | be supported properly. For example, if your device can | ||
204 | only drive the low 24-bits during PCI bus mastering, then | ||
205 | you would pass 0x00ffffff as the mask to this function. */ | ||
206 | |||
207 | extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask); | ||
208 | |||
209 | #ifdef CONFIG_PCI | ||
210 | static inline void pci_dma_burst_advice(struct pci_dev *pdev, | ||
211 | enum pci_dma_burst_strategy *strat, | ||
212 | unsigned long *strategy_parameter) | ||
213 | { | ||
214 | unsigned long cacheline_size; | ||
215 | u8 byte; | ||
216 | |||
217 | pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte); | ||
218 | if (byte == 0) | ||
219 | cacheline_size = 1024; | ||
220 | else | ||
221 | cacheline_size = (int) byte * 4; | ||
222 | |||
223 | *strat = PCI_DMA_BURST_BOUNDARY; | ||
224 | *strategy_parameter = cacheline_size; | ||
225 | } | ||
226 | #endif | ||
227 | |||
228 | /* TODO: integrate with include/asm-generic/pci.h ? */ | ||
229 | static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) | ||
230 | { | ||
231 | return channel ? 15 : 14; | ||
232 | } | ||
233 | |||
234 | extern void pcibios_resource_to_bus(struct pci_dev *, struct pci_bus_region *, | ||
235 | struct resource *); | ||
236 | |||
237 | extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, | ||
238 | struct pci_bus_region *region); | ||
239 | |||
240 | static inline struct resource * | ||
241 | pcibios_select_root(struct pci_dev *pdev, struct resource *res) | ||
242 | { | ||
243 | struct resource *root = NULL; | ||
244 | |||
245 | if (res->flags & IORESOURCE_IO) | ||
246 | root = &ioport_resource; | ||
247 | if (res->flags & IORESOURCE_MEM) | ||
248 | root = &iomem_resource; | ||
249 | |||
250 | return root; | ||
251 | } | ||
252 | |||
253 | #define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index | ||
254 | |||
255 | static inline int pci_proc_domain(struct pci_bus *bus) | ||
256 | { | ||
257 | struct pci_controller *hose = bus->sysdata; | ||
258 | return hose->need_domain_info; | ||
259 | } | ||
260 | |||
261 | struct pci_dev *alpha_gendev_to_pci(struct device *dev); | ||
262 | |||
263 | #endif /* __KERNEL__ */ | ||
264 | |||
265 | /* Values for the `which' argument to sys_pciconfig_iobase. */ | ||
266 | #define IOBASE_HOSE 0 | ||
267 | #define IOBASE_SPARSE_MEM 1 | ||
268 | #define IOBASE_DENSE_MEM 2 | ||
269 | #define IOBASE_SPARSE_IO 3 | ||
270 | #define IOBASE_DENSE_IO 4 | ||
271 | #define IOBASE_ROOT_BUS 5 | ||
272 | #define IOBASE_FROM_HOSE 0x10000 | ||
273 | |||
274 | extern struct pci_dev *isa_bridge; | ||
275 | |||
276 | #endif /* __ALPHA_PCI_H */ | ||