diff options
Diffstat (limited to 'arch/alpha/kernel/pci_iommu.c')
-rw-r--r-- | arch/alpha/kernel/pci_iommu.c | 201 |
1 files changed, 92 insertions, 109 deletions
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index 8449504f5e0b..ce9e54c887fa 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c | |||
@@ -216,10 +216,30 @@ iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n) | |||
216 | for (i = 0; i < n; ++i) | 216 | for (i = 0; i < n; ++i) |
217 | p[i] = 0; | 217 | p[i] = 0; |
218 | } | 218 | } |
219 | 219 | ||
220 | /* True if the machine supports DAC addressing, and DEV can | 220 | /* |
221 | make use of it given MASK. */ | 221 | * True if the machine supports DAC addressing, and DEV can |
222 | static int pci_dac_dma_supported(struct pci_dev *hwdev, u64 mask); | 222 | * make use of it given MASK. |
223 | */ | ||
224 | static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask) | ||
225 | { | ||
226 | dma64_addr_t dac_offset = alpha_mv.pci_dac_offset; | ||
227 | int ok = 1; | ||
228 | |||
229 | /* If this is not set, the machine doesn't support DAC at all. */ | ||
230 | if (dac_offset == 0) | ||
231 | ok = 0; | ||
232 | |||
233 | /* The device has to be able to address our DAC bit. */ | ||
234 | if ((dac_offset & dev->dma_mask) != dac_offset) | ||
235 | ok = 0; | ||
236 | |||
237 | /* If both conditions above are met, we are fine. */ | ||
238 | DBGA("pci_dac_dma_supported %s from %p\n", | ||
239 | ok ? "yes" : "no", __builtin_return_address(0)); | ||
240 | |||
241 | return ok; | ||
242 | } | ||
223 | 243 | ||
224 | /* Map a single buffer of the indicated size for PCI DMA in streaming | 244 | /* Map a single buffer of the indicated size for PCI DMA in streaming |
225 | mode. The 32-bit PCI bus mastering address to use is returned. | 245 | mode. The 32-bit PCI bus mastering address to use is returned. |
@@ -301,23 +321,36 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, | |||
301 | return ret; | 321 | return ret; |
302 | } | 322 | } |
303 | 323 | ||
304 | dma_addr_t | 324 | /* Helper for generic DMA-mapping functions. */ |
305 | pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir) | 325 | static struct pci_dev *alpha_gendev_to_pci(struct device *dev) |
306 | { | 326 | { |
307 | int dac_allowed; | 327 | if (dev && dev->bus == &pci_bus_type) |
328 | return to_pci_dev(dev); | ||
308 | 329 | ||
309 | if (dir == PCI_DMA_NONE) | 330 | /* Assume that non-PCI devices asking for DMA are either ISA or EISA, |
310 | BUG(); | 331 | BUG() otherwise. */ |
332 | BUG_ON(!isa_bridge); | ||
311 | 333 | ||
312 | dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; | 334 | /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA |
313 | return pci_map_single_1(pdev, cpu_addr, size, dac_allowed); | 335 | bridge is bus master then). */ |
336 | if (!dev || !dev->dma_mask || !*dev->dma_mask) | ||
337 | return isa_bridge; | ||
338 | |||
339 | /* For EISA bus masters, return isa_bridge (it might have smaller | ||
340 | dma_mask due to wiring limitations). */ | ||
341 | if (*dev->dma_mask >= isa_bridge->dma_mask) | ||
342 | return isa_bridge; | ||
343 | |||
344 | /* This assumes ISA bus master with dma_mask 0xffffff. */ | ||
345 | return NULL; | ||
314 | } | 346 | } |
315 | EXPORT_SYMBOL(pci_map_single); | ||
316 | 347 | ||
317 | dma_addr_t | 348 | static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page, |
318 | pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset, | 349 | unsigned long offset, size_t size, |
319 | size_t size, int dir) | 350 | enum dma_data_direction dir, |
351 | struct dma_attrs *attrs) | ||
320 | { | 352 | { |
353 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); | ||
321 | int dac_allowed; | 354 | int dac_allowed; |
322 | 355 | ||
323 | if (dir == PCI_DMA_NONE) | 356 | if (dir == PCI_DMA_NONE) |
@@ -327,7 +360,6 @@ pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset, | |||
327 | return pci_map_single_1(pdev, (char *)page_address(page) + offset, | 360 | return pci_map_single_1(pdev, (char *)page_address(page) + offset, |
328 | size, dac_allowed); | 361 | size, dac_allowed); |
329 | } | 362 | } |
330 | EXPORT_SYMBOL(pci_map_page); | ||
331 | 363 | ||
332 | /* Unmap a single streaming mode DMA translation. The DMA_ADDR and | 364 | /* Unmap a single streaming mode DMA translation. The DMA_ADDR and |
333 | SIZE must match what was provided for in a previous pci_map_single | 365 | SIZE must match what was provided for in a previous pci_map_single |
@@ -335,16 +367,17 @@ EXPORT_SYMBOL(pci_map_page); | |||
335 | the cpu to the buffer are guaranteed to see whatever the device | 367 | the cpu to the buffer are guaranteed to see whatever the device |
336 | wrote there. */ | 368 | wrote there. */ |
337 | 369 | ||
338 | void | 370 | static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr, |
339 | pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size, | 371 | size_t size, enum dma_data_direction dir, |
340 | int direction) | 372 | struct dma_attrs *attrs) |
341 | { | 373 | { |
342 | unsigned long flags; | 374 | unsigned long flags; |
375 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); | ||
343 | struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose; | 376 | struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose; |
344 | struct pci_iommu_arena *arena; | 377 | struct pci_iommu_arena *arena; |
345 | long dma_ofs, npages; | 378 | long dma_ofs, npages; |
346 | 379 | ||
347 | if (direction == PCI_DMA_NONE) | 380 | if (dir == PCI_DMA_NONE) |
348 | BUG(); | 381 | BUG(); |
349 | 382 | ||
350 | if (dma_addr >= __direct_map_base | 383 | if (dma_addr >= __direct_map_base |
@@ -393,25 +426,16 @@ pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size, | |||
393 | DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %p\n", | 426 | DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %p\n", |
394 | dma_addr, size, npages, __builtin_return_address(0)); | 427 | dma_addr, size, npages, __builtin_return_address(0)); |
395 | } | 428 | } |
396 | EXPORT_SYMBOL(pci_unmap_single); | ||
397 | |||
398 | void | ||
399 | pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr, | ||
400 | size_t size, int direction) | ||
401 | { | ||
402 | pci_unmap_single(pdev, dma_addr, size, direction); | ||
403 | } | ||
404 | EXPORT_SYMBOL(pci_unmap_page); | ||
405 | 429 | ||
406 | /* Allocate and map kernel buffer using consistent mode DMA for PCI | 430 | /* Allocate and map kernel buffer using consistent mode DMA for PCI |
407 | device. Returns non-NULL cpu-view pointer to the buffer if | 431 | device. Returns non-NULL cpu-view pointer to the buffer if |
408 | successful and sets *DMA_ADDRP to the pci side dma address as well, | 432 | successful and sets *DMA_ADDRP to the pci side dma address as well, |
409 | else DMA_ADDRP is undefined. */ | 433 | else DMA_ADDRP is undefined. */ |
410 | 434 | ||
411 | void * | 435 | static void *alpha_pci_alloc_coherent(struct device *dev, size_t size, |
412 | __pci_alloc_consistent(struct pci_dev *pdev, size_t size, | 436 | dma_addr_t *dma_addrp, gfp_t gfp) |
413 | dma_addr_t *dma_addrp, gfp_t gfp) | ||
414 | { | 437 | { |
438 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); | ||
415 | void *cpu_addr; | 439 | void *cpu_addr; |
416 | long order = get_order(size); | 440 | long order = get_order(size); |
417 | 441 | ||
@@ -439,13 +463,12 @@ try_again: | |||
439 | gfp |= GFP_DMA; | 463 | gfp |= GFP_DMA; |
440 | goto try_again; | 464 | goto try_again; |
441 | } | 465 | } |
442 | 466 | ||
443 | DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %p\n", | 467 | DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %p\n", |
444 | size, cpu_addr, *dma_addrp, __builtin_return_address(0)); | 468 | size, cpu_addr, *dma_addrp, __builtin_return_address(0)); |
445 | 469 | ||
446 | return cpu_addr; | 470 | return cpu_addr; |
447 | } | 471 | } |
448 | EXPORT_SYMBOL(__pci_alloc_consistent); | ||
449 | 472 | ||
450 | /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must | 473 | /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must |
451 | be values that were returned from pci_alloc_consistent. SIZE must | 474 | be values that were returned from pci_alloc_consistent. SIZE must |
@@ -453,17 +476,16 @@ EXPORT_SYMBOL(__pci_alloc_consistent); | |||
453 | References to the memory and mappings associated with CPU_ADDR or | 476 | References to the memory and mappings associated with CPU_ADDR or |
454 | DMA_ADDR past this call are illegal. */ | 477 | DMA_ADDR past this call are illegal. */ |
455 | 478 | ||
456 | void | 479 | static void alpha_pci_free_coherent(struct device *dev, size_t size, |
457 | pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr, | 480 | void *cpu_addr, dma_addr_t dma_addr) |
458 | dma_addr_t dma_addr) | ||
459 | { | 481 | { |
482 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); | ||
460 | pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); | 483 | pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); |
461 | free_pages((unsigned long)cpu_addr, get_order(size)); | 484 | free_pages((unsigned long)cpu_addr, get_order(size)); |
462 | 485 | ||
463 | DBGA2("pci_free_consistent: [%llx,%zx] from %p\n", | 486 | DBGA2("pci_free_consistent: [%llx,%zx] from %p\n", |
464 | dma_addr, size, __builtin_return_address(0)); | 487 | dma_addr, size, __builtin_return_address(0)); |
465 | } | 488 | } |
466 | EXPORT_SYMBOL(pci_free_consistent); | ||
467 | 489 | ||
468 | /* Classify the elements of the scatterlist. Write dma_address | 490 | /* Classify the elements of the scatterlist. Write dma_address |
469 | of each element with: | 491 | of each element with: |
@@ -626,23 +648,21 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, | |||
626 | return 1; | 648 | return 1; |
627 | } | 649 | } |
628 | 650 | ||
629 | int | 651 | static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg, |
630 | pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, | 652 | int nents, enum dma_data_direction dir, |
631 | int direction) | 653 | struct dma_attrs *attrs) |
632 | { | 654 | { |
655 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); | ||
633 | struct scatterlist *start, *end, *out; | 656 | struct scatterlist *start, *end, *out; |
634 | struct pci_controller *hose; | 657 | struct pci_controller *hose; |
635 | struct pci_iommu_arena *arena; | 658 | struct pci_iommu_arena *arena; |
636 | dma_addr_t max_dma; | 659 | dma_addr_t max_dma; |
637 | int dac_allowed; | 660 | int dac_allowed; |
638 | struct device *dev; | ||
639 | 661 | ||
640 | if (direction == PCI_DMA_NONE) | 662 | if (dir == PCI_DMA_NONE) |
641 | BUG(); | 663 | BUG(); |
642 | 664 | ||
643 | dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; | 665 | dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; |
644 | |||
645 | dev = pdev ? &pdev->dev : NULL; | ||
646 | 666 | ||
647 | /* Fast path single entry scatterlists. */ | 667 | /* Fast path single entry scatterlists. */ |
648 | if (nents == 1) { | 668 | if (nents == 1) { |
@@ -699,19 +719,19 @@ pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, | |||
699 | /* Some allocation failed while mapping the scatterlist | 719 | /* Some allocation failed while mapping the scatterlist |
700 | entries. Unmap them now. */ | 720 | entries. Unmap them now. */ |
701 | if (out > start) | 721 | if (out > start) |
702 | pci_unmap_sg(pdev, start, out - start, direction); | 722 | pci_unmap_sg(pdev, start, out - start, dir); |
703 | return 0; | 723 | return 0; |
704 | } | 724 | } |
705 | EXPORT_SYMBOL(pci_map_sg); | ||
706 | 725 | ||
707 | /* Unmap a set of streaming mode DMA translations. Again, cpu read | 726 | /* Unmap a set of streaming mode DMA translations. Again, cpu read |
708 | rules concerning calls here are the same as for pci_unmap_single() | 727 | rules concerning calls here are the same as for pci_unmap_single() |
709 | above. */ | 728 | above. */ |
710 | 729 | ||
711 | void | 730 | static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg, |
712 | pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, | 731 | int nents, enum dma_data_direction dir, |
713 | int direction) | 732 | struct dma_attrs *attrs) |
714 | { | 733 | { |
734 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); | ||
715 | unsigned long flags; | 735 | unsigned long flags; |
716 | struct pci_controller *hose; | 736 | struct pci_controller *hose; |
717 | struct pci_iommu_arena *arena; | 737 | struct pci_iommu_arena *arena; |
@@ -719,7 +739,7 @@ pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, | |||
719 | dma_addr_t max_dma; | 739 | dma_addr_t max_dma; |
720 | dma_addr_t fbeg, fend; | 740 | dma_addr_t fbeg, fend; |
721 | 741 | ||
722 | if (direction == PCI_DMA_NONE) | 742 | if (dir == PCI_DMA_NONE) |
723 | BUG(); | 743 | BUG(); |
724 | 744 | ||
725 | if (! alpha_mv.mv_pci_tbi) | 745 | if (! alpha_mv.mv_pci_tbi) |
@@ -783,15 +803,13 @@ pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, | |||
783 | 803 | ||
784 | DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg)); | 804 | DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg)); |
785 | } | 805 | } |
786 | EXPORT_SYMBOL(pci_unmap_sg); | ||
787 | |||
788 | 806 | ||
789 | /* Return whether the given PCI device DMA address mask can be | 807 | /* Return whether the given PCI device DMA address mask can be |
790 | supported properly. */ | 808 | supported properly. */ |
791 | 809 | ||
792 | int | 810 | static int alpha_pci_supported(struct device *dev, u64 mask) |
793 | pci_dma_supported(struct pci_dev *pdev, u64 mask) | ||
794 | { | 811 | { |
812 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); | ||
795 | struct pci_controller *hose; | 813 | struct pci_controller *hose; |
796 | struct pci_iommu_arena *arena; | 814 | struct pci_iommu_arena *arena; |
797 | 815 | ||
@@ -818,7 +836,6 @@ pci_dma_supported(struct pci_dev *pdev, u64 mask) | |||
818 | 836 | ||
819 | return 0; | 837 | return 0; |
820 | } | 838 | } |
821 | EXPORT_SYMBOL(pci_dma_supported); | ||
822 | 839 | ||
823 | 840 | ||
824 | /* | 841 | /* |
@@ -918,66 +935,32 @@ iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count) | |||
918 | return 0; | 935 | return 0; |
919 | } | 936 | } |
920 | 937 | ||
921 | /* True if the machine supports DAC addressing, and DEV can | 938 | static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr) |
922 | make use of it given MASK. */ | ||
923 | |||
924 | static int | ||
925 | pci_dac_dma_supported(struct pci_dev *dev, u64 mask) | ||
926 | { | ||
927 | dma64_addr_t dac_offset = alpha_mv.pci_dac_offset; | ||
928 | int ok = 1; | ||
929 | |||
930 | /* If this is not set, the machine doesn't support DAC at all. */ | ||
931 | if (dac_offset == 0) | ||
932 | ok = 0; | ||
933 | |||
934 | /* The device has to be able to address our DAC bit. */ | ||
935 | if ((dac_offset & dev->dma_mask) != dac_offset) | ||
936 | ok = 0; | ||
937 | |||
938 | /* If both conditions above are met, we are fine. */ | ||
939 | DBGA("pci_dac_dma_supported %s from %p\n", | ||
940 | ok ? "yes" : "no", __builtin_return_address(0)); | ||
941 | |||
942 | return ok; | ||
943 | } | ||
944 | |||
945 | /* Helper for generic DMA-mapping functions. */ | ||
946 | |||
947 | struct pci_dev * | ||
948 | alpha_gendev_to_pci(struct device *dev) | ||
949 | { | 939 | { |
950 | if (dev && dev->bus == &pci_bus_type) | 940 | return dma_addr == 0; |
951 | return to_pci_dev(dev); | ||
952 | |||
953 | /* Assume that non-PCI devices asking for DMA are either ISA or EISA, | ||
954 | BUG() otherwise. */ | ||
955 | BUG_ON(!isa_bridge); | ||
956 | |||
957 | /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA | ||
958 | bridge is bus master then). */ | ||
959 | if (!dev || !dev->dma_mask || !*dev->dma_mask) | ||
960 | return isa_bridge; | ||
961 | |||
962 | /* For EISA bus masters, return isa_bridge (it might have smaller | ||
963 | dma_mask due to wiring limitations). */ | ||
964 | if (*dev->dma_mask >= isa_bridge->dma_mask) | ||
965 | return isa_bridge; | ||
966 | |||
967 | /* This assumes ISA bus master with dma_mask 0xffffff. */ | ||
968 | return NULL; | ||
969 | } | 941 | } |
970 | EXPORT_SYMBOL(alpha_gendev_to_pci); | ||
971 | 942 | ||
972 | int | 943 | static int alpha_pci_set_mask(struct device *dev, u64 mask) |
973 | dma_set_mask(struct device *dev, u64 mask) | ||
974 | { | 944 | { |
975 | if (!dev->dma_mask || | 945 | if (!dev->dma_mask || |
976 | !pci_dma_supported(alpha_gendev_to_pci(dev), mask)) | 946 | !pci_dma_supported(alpha_gendev_to_pci(dev), mask)) |
977 | return -EIO; | 947 | return -EIO; |
978 | 948 | ||
979 | *dev->dma_mask = mask; | 949 | *dev->dma_mask = mask; |
980 | |||
981 | return 0; | 950 | return 0; |
982 | } | 951 | } |
983 | EXPORT_SYMBOL(dma_set_mask); | 952 | |
953 | struct dma_map_ops alpha_pci_ops = { | ||
954 | .alloc_coherent = alpha_pci_alloc_coherent, | ||
955 | .free_coherent = alpha_pci_free_coherent, | ||
956 | .map_page = alpha_pci_map_page, | ||
957 | .unmap_page = alpha_pci_unmap_page, | ||
958 | .map_sg = alpha_pci_map_sg, | ||
959 | .unmap_sg = alpha_pci_unmap_sg, | ||
960 | .mapping_error = alpha_pci_mapping_error, | ||
961 | .dma_supported = alpha_pci_supported, | ||
962 | .set_dma_mask = alpha_pci_set_mask, | ||
963 | }; | ||
964 | |||
965 | struct dma_map_ops *dma_ops = &alpha_pci_ops; | ||
966 | EXPORT_SYMBOL(dma_ops); | ||