diff options
Diffstat (limited to 'arch/ia64/pci')
-rw-r--r-- | arch/ia64/pci/Makefile | 4 | ||||
-rw-r--r-- | arch/ia64/pci/pci.c | 735 |
2 files changed, 739 insertions, 0 deletions
diff --git a/arch/ia64/pci/Makefile b/arch/ia64/pci/Makefile new file mode 100644 index 000000000000..e66889e6922a --- /dev/null +++ b/arch/ia64/pci/Makefile | |||
@@ -0,0 +1,4 @@ | |||
1 | # | ||
2 | # Makefile for the ia64-specific parts of the pci bus | ||
3 | # | ||
4 | obj-y := pci.o | ||
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c new file mode 100644 index 000000000000..88641e5095b5 --- /dev/null +++ b/arch/ia64/pci/pci.c | |||
@@ -0,0 +1,735 @@ | |||
1 | /* | ||
2 | * pci.c - Low-Level PCI Access in IA-64 | ||
3 | * | ||
4 | * Derived from bios32.c of i386 tree. | ||
5 | * | ||
6 | * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P. | ||
7 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
8 | * Bjorn Helgaas <bjorn.helgaas@hp.com> | ||
9 | * Copyright (C) 2004 Silicon Graphics, Inc. | ||
10 | * | ||
11 | * Note: Above list of copyright holders is incomplete... | ||
12 | */ | ||
13 | #include <linux/config.h> | ||
14 | |||
15 | #include <linux/acpi.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/pci.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/ioport.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/smp_lock.h> | ||
23 | #include <linux/spinlock.h> | ||
24 | |||
25 | #include <asm/machvec.h> | ||
26 | #include <asm/page.h> | ||
27 | #include <asm/segment.h> | ||
28 | #include <asm/system.h> | ||
29 | #include <asm/io.h> | ||
30 | #include <asm/sal.h> | ||
31 | #include <asm/smp.h> | ||
32 | #include <asm/irq.h> | ||
33 | #include <asm/hw_irq.h> | ||
34 | |||
35 | |||
36 | static int pci_routeirq; | ||
37 | |||
38 | /* | ||
39 | * Low-level SAL-based PCI configuration access functions. Note that SAL | ||
40 | * calls are already serialized (via sal_lock), so we don't need another | ||
41 | * synchronization mechanism here. | ||
42 | */ | ||
43 | |||
44 | #define PCI_SAL_ADDRESS(seg, bus, devfn, reg) \ | ||
45 | (((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg)) | ||
46 | |||
47 | /* SAL 3.2 adds support for extended config space. */ | ||
48 | |||
49 | #define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg) \ | ||
50 | (((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg)) | ||
51 | |||
52 | static int | ||
53 | pci_sal_read (unsigned int seg, unsigned int bus, unsigned int devfn, | ||
54 | int reg, int len, u32 *value) | ||
55 | { | ||
56 | u64 addr, data = 0; | ||
57 | int mode, result; | ||
58 | |||
59 | if (!value || (seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095)) | ||
60 | return -EINVAL; | ||
61 | |||
62 | if ((seg | reg) <= 255) { | ||
63 | addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg); | ||
64 | mode = 0; | ||
65 | } else { | ||
66 | addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg); | ||
67 | mode = 1; | ||
68 | } | ||
69 | result = ia64_sal_pci_config_read(addr, mode, len, &data); | ||
70 | if (result != 0) | ||
71 | return -EINVAL; | ||
72 | |||
73 | *value = (u32) data; | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static int | ||
78 | pci_sal_write (unsigned int seg, unsigned int bus, unsigned int devfn, | ||
79 | int reg, int len, u32 value) | ||
80 | { | ||
81 | u64 addr; | ||
82 | int mode, result; | ||
83 | |||
84 | if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095)) | ||
85 | return -EINVAL; | ||
86 | |||
87 | if ((seg | reg) <= 255) { | ||
88 | addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg); | ||
89 | mode = 0; | ||
90 | } else { | ||
91 | addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg); | ||
92 | mode = 1; | ||
93 | } | ||
94 | result = ia64_sal_pci_config_write(addr, mode, len, value); | ||
95 | if (result != 0) | ||
96 | return -EINVAL; | ||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | static struct pci_raw_ops pci_sal_ops = { | ||
101 | .read = pci_sal_read, | ||
102 | .write = pci_sal_write | ||
103 | }; | ||
104 | |||
105 | struct pci_raw_ops *raw_pci_ops = &pci_sal_ops; | ||
106 | |||
107 | static int | ||
108 | pci_read (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) | ||
109 | { | ||
110 | return raw_pci_ops->read(pci_domain_nr(bus), bus->number, | ||
111 | devfn, where, size, value); | ||
112 | } | ||
113 | |||
114 | static int | ||
115 | pci_write (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) | ||
116 | { | ||
117 | return raw_pci_ops->write(pci_domain_nr(bus), bus->number, | ||
118 | devfn, where, size, value); | ||
119 | } | ||
120 | |||
121 | struct pci_ops pci_root_ops = { | ||
122 | .read = pci_read, | ||
123 | .write = pci_write, | ||
124 | }; | ||
125 | |||
126 | #ifdef CONFIG_NUMA | ||
127 | extern acpi_status acpi_map_iosapic(acpi_handle, u32, void *, void **); | ||
128 | static void acpi_map_iosapics(void) | ||
129 | { | ||
130 | acpi_get_devices(NULL, acpi_map_iosapic, NULL, NULL); | ||
131 | } | ||
132 | #else | ||
133 | static void acpi_map_iosapics(void) | ||
134 | { | ||
135 | return; | ||
136 | } | ||
137 | #endif /* CONFIG_NUMA */ | ||
138 | |||
139 | static int __init | ||
140 | pci_acpi_init (void) | ||
141 | { | ||
142 | struct pci_dev *dev = NULL; | ||
143 | |||
144 | printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n"); | ||
145 | |||
146 | acpi_map_iosapics(); | ||
147 | |||
148 | if (pci_routeirq) { | ||
149 | /* | ||
150 | * PCI IRQ routing is set up by pci_enable_device(), but we | ||
151 | * also do it here in case there are still broken drivers that | ||
152 | * don't use pci_enable_device(). | ||
153 | */ | ||
154 | printk(KERN_INFO "PCI: Routing interrupts for all devices because \"pci=routeirq\" specified\n"); | ||
155 | for_each_pci_dev(dev) | ||
156 | acpi_pci_irq_enable(dev); | ||
157 | } else | ||
158 | printk(KERN_INFO "PCI: If a device doesn't work, try \"pci=routeirq\". If it helps, post a report\n"); | ||
159 | |||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | subsys_initcall(pci_acpi_init); | ||
164 | |||
165 | /* Called by ACPI when it finds a new root bus. */ | ||
166 | |||
167 | static struct pci_controller * __devinit | ||
168 | alloc_pci_controller (int seg) | ||
169 | { | ||
170 | struct pci_controller *controller; | ||
171 | |||
172 | controller = kmalloc(sizeof(*controller), GFP_KERNEL); | ||
173 | if (!controller) | ||
174 | return NULL; | ||
175 | |||
176 | memset(controller, 0, sizeof(*controller)); | ||
177 | controller->segment = seg; | ||
178 | return controller; | ||
179 | } | ||
180 | |||
181 | static u64 __devinit | ||
182 | add_io_space (struct acpi_resource_address64 *addr) | ||
183 | { | ||
184 | u64 offset; | ||
185 | int sparse = 0; | ||
186 | int i; | ||
187 | |||
188 | if (addr->address_translation_offset == 0) | ||
189 | return IO_SPACE_BASE(0); /* part of legacy IO space */ | ||
190 | |||
191 | if (addr->attribute.io.translation_attribute == ACPI_SPARSE_TRANSLATION) | ||
192 | sparse = 1; | ||
193 | |||
194 | offset = (u64) ioremap(addr->address_translation_offset, 0); | ||
195 | for (i = 0; i < num_io_spaces; i++) | ||
196 | if (io_space[i].mmio_base == offset && | ||
197 | io_space[i].sparse == sparse) | ||
198 | return IO_SPACE_BASE(i); | ||
199 | |||
200 | if (num_io_spaces == MAX_IO_SPACES) { | ||
201 | printk("Too many IO port spaces\n"); | ||
202 | return ~0; | ||
203 | } | ||
204 | |||
205 | i = num_io_spaces++; | ||
206 | io_space[i].mmio_base = offset; | ||
207 | io_space[i].sparse = sparse; | ||
208 | |||
209 | return IO_SPACE_BASE(i); | ||
210 | } | ||
211 | |||
212 | static acpi_status __devinit | ||
213 | count_window (struct acpi_resource *resource, void *data) | ||
214 | { | ||
215 | unsigned int *windows = (unsigned int *) data; | ||
216 | struct acpi_resource_address64 addr; | ||
217 | acpi_status status; | ||
218 | |||
219 | status = acpi_resource_to_address64(resource, &addr); | ||
220 | if (ACPI_SUCCESS(status)) | ||
221 | if (addr.resource_type == ACPI_MEMORY_RANGE || | ||
222 | addr.resource_type == ACPI_IO_RANGE) | ||
223 | (*windows)++; | ||
224 | |||
225 | return AE_OK; | ||
226 | } | ||
227 | |||
228 | struct pci_root_info { | ||
229 | struct pci_controller *controller; | ||
230 | char *name; | ||
231 | }; | ||
232 | |||
233 | static __devinit acpi_status add_window(struct acpi_resource *res, void *data) | ||
234 | { | ||
235 | struct pci_root_info *info = data; | ||
236 | struct pci_window *window; | ||
237 | struct acpi_resource_address64 addr; | ||
238 | acpi_status status; | ||
239 | unsigned long flags, offset = 0; | ||
240 | struct resource *root; | ||
241 | |||
242 | status = acpi_resource_to_address64(res, &addr); | ||
243 | if (!ACPI_SUCCESS(status)) | ||
244 | return AE_OK; | ||
245 | |||
246 | if (!addr.address_length) | ||
247 | return AE_OK; | ||
248 | |||
249 | if (addr.resource_type == ACPI_MEMORY_RANGE) { | ||
250 | flags = IORESOURCE_MEM; | ||
251 | root = &iomem_resource; | ||
252 | offset = addr.address_translation_offset; | ||
253 | } else if (addr.resource_type == ACPI_IO_RANGE) { | ||
254 | flags = IORESOURCE_IO; | ||
255 | root = &ioport_resource; | ||
256 | offset = add_io_space(&addr); | ||
257 | if (offset == ~0) | ||
258 | return AE_OK; | ||
259 | } else | ||
260 | return AE_OK; | ||
261 | |||
262 | window = &info->controller->window[info->controller->windows++]; | ||
263 | window->resource.name = info->name; | ||
264 | window->resource.flags = flags; | ||
265 | window->resource.start = addr.min_address_range + offset; | ||
266 | window->resource.end = addr.max_address_range + offset; | ||
267 | window->resource.child = NULL; | ||
268 | window->offset = offset; | ||
269 | |||
270 | if (insert_resource(root, &window->resource)) { | ||
271 | printk(KERN_ERR "alloc 0x%lx-0x%lx from %s for %s failed\n", | ||
272 | window->resource.start, window->resource.end, | ||
273 | root->name, info->name); | ||
274 | } | ||
275 | |||
276 | return AE_OK; | ||
277 | } | ||
278 | |||
279 | static void __devinit | ||
280 | pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl) | ||
281 | { | ||
282 | int i, j; | ||
283 | |||
284 | j = 0; | ||
285 | for (i = 0; i < ctrl->windows; i++) { | ||
286 | struct resource *res = &ctrl->window[i].resource; | ||
287 | /* HP's firmware has a hack to work around a Windows bug. | ||
288 | * Ignore these tiny memory ranges */ | ||
289 | if ((res->flags & IORESOURCE_MEM) && | ||
290 | (res->end - res->start < 16)) | ||
291 | continue; | ||
292 | if (j >= PCI_BUS_NUM_RESOURCES) { | ||
293 | printk("Ignoring range [%lx-%lx] (%lx)\n", res->start, | ||
294 | res->end, res->flags); | ||
295 | continue; | ||
296 | } | ||
297 | bus->resource[j++] = res; | ||
298 | } | ||
299 | } | ||
300 | |||
301 | struct pci_bus * __devinit | ||
302 | pci_acpi_scan_root(struct acpi_device *device, int domain, int bus) | ||
303 | { | ||
304 | struct pci_root_info info; | ||
305 | struct pci_controller *controller; | ||
306 | unsigned int windows = 0; | ||
307 | struct pci_bus *pbus; | ||
308 | char *name; | ||
309 | |||
310 | controller = alloc_pci_controller(domain); | ||
311 | if (!controller) | ||
312 | goto out1; | ||
313 | |||
314 | controller->acpi_handle = device->handle; | ||
315 | |||
316 | acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window, | ||
317 | &windows); | ||
318 | controller->window = kmalloc(sizeof(*controller->window) * windows, | ||
319 | GFP_KERNEL); | ||
320 | if (!controller->window) | ||
321 | goto out2; | ||
322 | |||
323 | name = kmalloc(16, GFP_KERNEL); | ||
324 | if (!name) | ||
325 | goto out3; | ||
326 | |||
327 | sprintf(name, "PCI Bus %04x:%02x", domain, bus); | ||
328 | info.controller = controller; | ||
329 | info.name = name; | ||
330 | acpi_walk_resources(device->handle, METHOD_NAME__CRS, add_window, | ||
331 | &info); | ||
332 | |||
333 | pbus = pci_scan_bus(bus, &pci_root_ops, controller); | ||
334 | if (pbus) | ||
335 | pcibios_setup_root_windows(pbus, controller); | ||
336 | |||
337 | return pbus; | ||
338 | |||
339 | out3: | ||
340 | kfree(controller->window); | ||
341 | out2: | ||
342 | kfree(controller); | ||
343 | out1: | ||
344 | return NULL; | ||
345 | } | ||
346 | |||
347 | void pcibios_resource_to_bus(struct pci_dev *dev, | ||
348 | struct pci_bus_region *region, struct resource *res) | ||
349 | { | ||
350 | struct pci_controller *controller = PCI_CONTROLLER(dev); | ||
351 | unsigned long offset = 0; | ||
352 | int i; | ||
353 | |||
354 | for (i = 0; i < controller->windows; i++) { | ||
355 | struct pci_window *window = &controller->window[i]; | ||
356 | if (!(window->resource.flags & res->flags)) | ||
357 | continue; | ||
358 | if (window->resource.start > res->start) | ||
359 | continue; | ||
360 | if (window->resource.end < res->end) | ||
361 | continue; | ||
362 | offset = window->offset; | ||
363 | break; | ||
364 | } | ||
365 | |||
366 | region->start = res->start - offset; | ||
367 | region->end = res->end - offset; | ||
368 | } | ||
369 | EXPORT_SYMBOL(pcibios_resource_to_bus); | ||
370 | |||
371 | void pcibios_bus_to_resource(struct pci_dev *dev, | ||
372 | struct resource *res, struct pci_bus_region *region) | ||
373 | { | ||
374 | struct pci_controller *controller = PCI_CONTROLLER(dev); | ||
375 | unsigned long offset = 0; | ||
376 | int i; | ||
377 | |||
378 | for (i = 0; i < controller->windows; i++) { | ||
379 | struct pci_window *window = &controller->window[i]; | ||
380 | if (!(window->resource.flags & res->flags)) | ||
381 | continue; | ||
382 | if (window->resource.start - window->offset > region->start) | ||
383 | continue; | ||
384 | if (window->resource.end - window->offset < region->end) | ||
385 | continue; | ||
386 | offset = window->offset; | ||
387 | break; | ||
388 | } | ||
389 | |||
390 | res->start = region->start + offset; | ||
391 | res->end = region->end + offset; | ||
392 | } | ||
393 | |||
394 | static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev) | ||
395 | { | ||
396 | struct pci_bus_region region; | ||
397 | int i; | ||
398 | int limit = (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) ? \ | ||
399 | PCI_BRIDGE_RESOURCES : PCI_NUM_RESOURCES; | ||
400 | |||
401 | for (i = 0; i < limit; i++) { | ||
402 | if (!dev->resource[i].flags) | ||
403 | continue; | ||
404 | region.start = dev->resource[i].start; | ||
405 | region.end = dev->resource[i].end; | ||
406 | pcibios_bus_to_resource(dev, &dev->resource[i], ®ion); | ||
407 | pci_claim_resource(dev, i); | ||
408 | } | ||
409 | } | ||
410 | |||
411 | /* | ||
412 | * Called after each bus is probed, but before its children are examined. | ||
413 | */ | ||
414 | void __devinit | ||
415 | pcibios_fixup_bus (struct pci_bus *b) | ||
416 | { | ||
417 | struct pci_dev *dev; | ||
418 | |||
419 | list_for_each_entry(dev, &b->devices, bus_list) | ||
420 | pcibios_fixup_device_resources(dev); | ||
421 | |||
422 | return; | ||
423 | } | ||
424 | |||
425 | void __devinit | ||
426 | pcibios_update_irq (struct pci_dev *dev, int irq) | ||
427 | { | ||
428 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); | ||
429 | |||
430 | /* ??? FIXME -- record old value for shutdown. */ | ||
431 | } | ||
432 | |||
433 | static inline int | ||
434 | pcibios_enable_resources (struct pci_dev *dev, int mask) | ||
435 | { | ||
436 | u16 cmd, old_cmd; | ||
437 | int idx; | ||
438 | struct resource *r; | ||
439 | |||
440 | if (!dev) | ||
441 | return -EINVAL; | ||
442 | |||
443 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | ||
444 | old_cmd = cmd; | ||
445 | for (idx=0; idx<6; idx++) { | ||
446 | /* Only set up the desired resources. */ | ||
447 | if (!(mask & (1 << idx))) | ||
448 | continue; | ||
449 | |||
450 | r = &dev->resource[idx]; | ||
451 | if (!r->start && r->end) { | ||
452 | printk(KERN_ERR | ||
453 | "PCI: Device %s not available because of resource collisions\n", | ||
454 | pci_name(dev)); | ||
455 | return -EINVAL; | ||
456 | } | ||
457 | if (r->flags & IORESOURCE_IO) | ||
458 | cmd |= PCI_COMMAND_IO; | ||
459 | if (r->flags & IORESOURCE_MEM) | ||
460 | cmd |= PCI_COMMAND_MEMORY; | ||
461 | } | ||
462 | if (dev->resource[PCI_ROM_RESOURCE].start) | ||
463 | cmd |= PCI_COMMAND_MEMORY; | ||
464 | if (cmd != old_cmd) { | ||
465 | printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd); | ||
466 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
467 | } | ||
468 | return 0; | ||
469 | } | ||
470 | |||
471 | int | ||
472 | pcibios_enable_device (struct pci_dev *dev, int mask) | ||
473 | { | ||
474 | int ret; | ||
475 | |||
476 | ret = pcibios_enable_resources(dev, mask); | ||
477 | if (ret < 0) | ||
478 | return ret; | ||
479 | |||
480 | return acpi_pci_irq_enable(dev); | ||
481 | } | ||
482 | |||
483 | #ifdef CONFIG_ACPI_DEALLOCATE_IRQ | ||
484 | void | ||
485 | pcibios_disable_device (struct pci_dev *dev) | ||
486 | { | ||
487 | acpi_pci_irq_disable(dev); | ||
488 | } | ||
489 | #endif /* CONFIG_ACPI_DEALLOCATE_IRQ */ | ||
490 | |||
491 | void | ||
492 | pcibios_align_resource (void *data, struct resource *res, | ||
493 | unsigned long size, unsigned long align) | ||
494 | { | ||
495 | } | ||
496 | |||
497 | /* | ||
498 | * PCI BIOS setup, always defaults to SAL interface | ||
499 | */ | ||
500 | char * __init | ||
501 | pcibios_setup (char *str) | ||
502 | { | ||
503 | if (!strcmp(str, "routeirq")) | ||
504 | pci_routeirq = 1; | ||
505 | return NULL; | ||
506 | } | ||
507 | |||
508 | int | ||
509 | pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, | ||
510 | enum pci_mmap_state mmap_state, int write_combine) | ||
511 | { | ||
512 | /* | ||
513 | * I/O space cannot be accessed via normal processor loads and | ||
514 | * stores on this platform. | ||
515 | */ | ||
516 | if (mmap_state == pci_mmap_io) | ||
517 | /* | ||
518 | * XXX we could relax this for I/O spaces for which ACPI | ||
519 | * indicates that the space is 1-to-1 mapped. But at the | ||
520 | * moment, we don't support multiple PCI address spaces and | ||
521 | * the legacy I/O space is not 1-to-1 mapped, so this is moot. | ||
522 | */ | ||
523 | return -EINVAL; | ||
524 | |||
525 | /* | ||
526 | * Leave vm_pgoff as-is, the PCI space address is the physical | ||
527 | * address on this platform. | ||
528 | */ | ||
529 | vma->vm_flags |= (VM_SHM | VM_RESERVED | VM_IO); | ||
530 | |||
531 | if (write_combine && efi_range_is_wc(vma->vm_start, | ||
532 | vma->vm_end - vma->vm_start)) | ||
533 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | ||
534 | else | ||
535 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
536 | |||
537 | if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | ||
538 | vma->vm_end - vma->vm_start, vma->vm_page_prot)) | ||
539 | return -EAGAIN; | ||
540 | |||
541 | return 0; | ||
542 | } | ||
543 | |||
544 | /** | ||
545 | * ia64_pci_get_legacy_mem - generic legacy mem routine | ||
546 | * @bus: bus to get legacy memory base address for | ||
547 | * | ||
548 | * Find the base of legacy memory for @bus. This is typically the first | ||
549 | * megabyte of bus address space for @bus or is simply 0 on platforms whose | ||
550 | * chipsets support legacy I/O and memory routing. Returns the base address | ||
551 | * or an error pointer if an error occurred. | ||
552 | * | ||
553 | * This is the ia64 generic version of this routine. Other platforms | ||
554 | * are free to override it with a machine vector. | ||
555 | */ | ||
556 | char *ia64_pci_get_legacy_mem(struct pci_bus *bus) | ||
557 | { | ||
558 | return (char *)__IA64_UNCACHED_OFFSET; | ||
559 | } | ||
560 | |||
561 | /** | ||
562 | * pci_mmap_legacy_page_range - map legacy memory space to userland | ||
563 | * @bus: bus whose legacy space we're mapping | ||
564 | * @vma: vma passed in by mmap | ||
565 | * | ||
566 | * Map legacy memory space for this device back to userspace using a machine | ||
567 | * vector to get the base address. | ||
568 | */ | ||
569 | int | ||
570 | pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma) | ||
571 | { | ||
572 | char *addr; | ||
573 | |||
574 | addr = pci_get_legacy_mem(bus); | ||
575 | if (IS_ERR(addr)) | ||
576 | return PTR_ERR(addr); | ||
577 | |||
578 | vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT; | ||
579 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
580 | vma->vm_flags |= (VM_SHM | VM_RESERVED | VM_IO); | ||
581 | |||
582 | if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | ||
583 | vma->vm_end - vma->vm_start, vma->vm_page_prot)) | ||
584 | return -EAGAIN; | ||
585 | |||
586 | return 0; | ||
587 | } | ||
588 | |||
589 | /** | ||
590 | * ia64_pci_legacy_read - read from legacy I/O space | ||
591 | * @bus: bus to read | ||
592 | * @port: legacy port value | ||
593 | * @val: caller allocated storage for returned value | ||
594 | * @size: number of bytes to read | ||
595 | * | ||
596 | * Simply reads @size bytes from @port and puts the result in @val. | ||
597 | * | ||
598 | * Again, this (and the write routine) are generic versions that can be | ||
599 | * overridden by the platform. This is necessary on platforms that don't | ||
600 | * support legacy I/O routing or that hard fail on legacy I/O timeouts. | ||
601 | */ | ||
602 | int ia64_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size) | ||
603 | { | ||
604 | int ret = size; | ||
605 | |||
606 | switch (size) { | ||
607 | case 1: | ||
608 | *val = inb(port); | ||
609 | break; | ||
610 | case 2: | ||
611 | *val = inw(port); | ||
612 | break; | ||
613 | case 4: | ||
614 | *val = inl(port); | ||
615 | break; | ||
616 | default: | ||
617 | ret = -EINVAL; | ||
618 | break; | ||
619 | } | ||
620 | |||
621 | return ret; | ||
622 | } | ||
623 | |||
624 | /** | ||
625 | * ia64_pci_legacy_write - perform a legacy I/O write | ||
626 | * @bus: bus pointer | ||
627 | * @port: port to write | ||
628 | * @val: value to write | ||
629 | * @size: number of bytes to write from @val | ||
630 | * | ||
631 | * Simply writes @size bytes of @val to @port. | ||
632 | */ | ||
633 | int ia64_pci_legacy_write(struct pci_dev *bus, u16 port, u32 val, u8 size) | ||
634 | { | ||
635 | int ret = 0; | ||
636 | |||
637 | switch (size) { | ||
638 | case 1: | ||
639 | outb(val, port); | ||
640 | break; | ||
641 | case 2: | ||
642 | outw(val, port); | ||
643 | break; | ||
644 | case 4: | ||
645 | outl(val, port); | ||
646 | break; | ||
647 | default: | ||
648 | ret = -EINVAL; | ||
649 | break; | ||
650 | } | ||
651 | |||
652 | return ret; | ||
653 | } | ||
654 | |||
655 | /** | ||
656 | * pci_cacheline_size - determine cacheline size for PCI devices | ||
657 | * @dev: void | ||
658 | * | ||
659 | * We want to use the line-size of the outer-most cache. We assume | ||
660 | * that this line-size is the same for all CPUs. | ||
661 | * | ||
662 | * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info(). | ||
663 | * | ||
664 | * RETURNS: An appropriate -ERRNO error value on eror, or zero for success. | ||
665 | */ | ||
666 | static unsigned long | ||
667 | pci_cacheline_size (void) | ||
668 | { | ||
669 | u64 levels, unique_caches; | ||
670 | s64 status; | ||
671 | pal_cache_config_info_t cci; | ||
672 | static u8 cacheline_size; | ||
673 | |||
674 | if (cacheline_size) | ||
675 | return cacheline_size; | ||
676 | |||
677 | status = ia64_pal_cache_summary(&levels, &unique_caches); | ||
678 | if (status != 0) { | ||
679 | printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", | ||
680 | __FUNCTION__, status); | ||
681 | return SMP_CACHE_BYTES; | ||
682 | } | ||
683 | |||
684 | status = ia64_pal_cache_config_info(levels - 1, /* cache_type (data_or_unified)= */ 2, | ||
685 | &cci); | ||
686 | if (status != 0) { | ||
687 | printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed (status=%ld)\n", | ||
688 | __FUNCTION__, status); | ||
689 | return SMP_CACHE_BYTES; | ||
690 | } | ||
691 | cacheline_size = 1 << cci.pcci_line_size; | ||
692 | return cacheline_size; | ||
693 | } | ||
694 | |||
695 | /** | ||
696 | * pcibios_prep_mwi - helper function for drivers/pci/pci.c:pci_set_mwi() | ||
697 | * @dev: the PCI device for which MWI is enabled | ||
698 | * | ||
699 | * For ia64, we can get the cacheline sizes from PAL. | ||
700 | * | ||
701 | * RETURNS: An appropriate -ERRNO error value on eror, or zero for success. | ||
702 | */ | ||
703 | int | ||
704 | pcibios_prep_mwi (struct pci_dev *dev) | ||
705 | { | ||
706 | unsigned long desired_linesize, current_linesize; | ||
707 | int rc = 0; | ||
708 | u8 pci_linesize; | ||
709 | |||
710 | desired_linesize = pci_cacheline_size(); | ||
711 | |||
712 | pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &pci_linesize); | ||
713 | current_linesize = 4 * pci_linesize; | ||
714 | if (desired_linesize != current_linesize) { | ||
715 | printk(KERN_WARNING "PCI: slot %s has incorrect PCI cache line size of %lu bytes,", | ||
716 | pci_name(dev), current_linesize); | ||
717 | if (current_linesize > desired_linesize) { | ||
718 | printk(" expected %lu bytes instead\n", desired_linesize); | ||
719 | rc = -EINVAL; | ||
720 | } else { | ||
721 | printk(" correcting to %lu\n", desired_linesize); | ||
722 | pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, desired_linesize / 4); | ||
723 | } | ||
724 | } | ||
725 | return rc; | ||
726 | } | ||
727 | |||
728 | int pci_vector_resources(int last, int nr_released) | ||
729 | { | ||
730 | int count = nr_released; | ||
731 | |||
732 | count += (IA64_LAST_DEVICE_VECTOR - last); | ||
733 | |||
734 | return count; | ||
735 | } | ||