diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-29 13:17:59 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-29 13:17:59 -0400 |
commit | a217656cb26c5b7ebe9900354b2e808c1f74b470 (patch) | |
tree | 37679bb7f1cebf927bac353b42e6bda8b4e7c63e /Documentation | |
parent | 8f45c1a58a25c3a1a2f42521445e1e786c4c0b92 (diff) | |
parent | a53edac131cadee317e7e36a5908bb4c71d874cd (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6: (21 commits)
pciehp: fix error message about getting hotplug control
pci/irq: let pci_device_shutdown to call pci_msi_shutdown v2
pci/irq: restore mask_bits in msi shutdown -v3
doc: replace yet another dev with pdev for consistency in DMA-mapping.txt
PCI: don't expose struct pci_vpd to userspace
doc: fix an incorrect suggestion to pass NULL for PCI like buses
Consistently use pdev as the variable of type struct pci_dev *.
pciehp: Fix command write
shpchp: fix slot name
make pciehp_acpi_get_hp_hw_control_from_firmware()
pciehp: Clean up pcie_init()
pciehp: Mask hotplug interrupt at controller release
pciehp: Remove useless hotplug interrupt enabling
pciehp: Fix wrong slot capability check
pciehp: Fix wrong slot control register access
pciehp: Add missing memory barrier
pciehp: Fix interrupt event handlig
pciehp: fix slot name
Update MAINTAINERS with location of PCI tree
PCI: Add Intel SCH PCI IDs
...
Diffstat (limited to 'Documentation')
-rw-r--r-- | Documentation/DMA-mapping.txt | 38 |
1 files changed, 19 insertions, 19 deletions
diff --git a/Documentation/DMA-mapping.txt b/Documentation/DMA-mapping.txt index d84f89dbf921..b463ecd0c7ce 100644 --- a/Documentation/DMA-mapping.txt +++ b/Documentation/DMA-mapping.txt | |||
@@ -315,11 +315,11 @@ you should do: | |||
315 | 315 | ||
316 | dma_addr_t dma_handle; | 316 | dma_addr_t dma_handle; |
317 | 317 | ||
318 | cpu_addr = pci_alloc_consistent(dev, size, &dma_handle); | 318 | cpu_addr = pci_alloc_consistent(pdev, size, &dma_handle); |
319 | 319 | ||
320 | where dev is a struct pci_dev *. You should pass NULL for PCI like buses | 320 | where pdev is a struct pci_dev *. This may be called in interrupt context. |
321 | where devices don't have struct pci_dev (like ISA, EISA). This may be | 321 | You should use dma_alloc_coherent (see DMA-API.txt) for buses |
322 | called in interrupt context. | 322 | where devices don't have struct pci_dev (like ISA, EISA). |
323 | 323 | ||
324 | This argument is needed because the DMA translations may be bus | 324 | This argument is needed because the DMA translations may be bus |
325 | specific (and often is private to the bus which the device is attached | 325 | specific (and often is private to the bus which the device is attached |
@@ -332,7 +332,7 @@ __get_free_pages (but takes size instead of a page order). If your | |||
332 | driver needs regions sized smaller than a page, you may prefer using | 332 | driver needs regions sized smaller than a page, you may prefer using |
333 | the pci_pool interface, described below. | 333 | the pci_pool interface, described below. |
334 | 334 | ||
335 | The consistent DMA mapping interfaces, for non-NULL dev, will by | 335 | The consistent DMA mapping interfaces, for non-NULL pdev, will by |
336 | default return a DMA address which is SAC (Single Address Cycle) | 336 | default return a DMA address which is SAC (Single Address Cycle) |
337 | addressable. Even if the device indicates (via PCI dma mask) that it | 337 | addressable. Even if the device indicates (via PCI dma mask) that it |
338 | may address the upper 32-bits and thus perform DAC cycles, consistent | 338 | may address the upper 32-bits and thus perform DAC cycles, consistent |
@@ -354,9 +354,9 @@ buffer you receive will not cross a 64K boundary. | |||
354 | 354 | ||
355 | To unmap and free such a DMA region, you call: | 355 | To unmap and free such a DMA region, you call: |
356 | 356 | ||
357 | pci_free_consistent(dev, size, cpu_addr, dma_handle); | 357 | pci_free_consistent(pdev, size, cpu_addr, dma_handle); |
358 | 358 | ||
359 | where dev, size are the same as in the above call and cpu_addr and | 359 | where pdev, size are the same as in the above call and cpu_addr and |
360 | dma_handle are the values pci_alloc_consistent returned to you. | 360 | dma_handle are the values pci_alloc_consistent returned to you. |
361 | This function may not be called in interrupt context. | 361 | This function may not be called in interrupt context. |
362 | 362 | ||
@@ -371,9 +371,9 @@ Create a pci_pool like this: | |||
371 | 371 | ||
372 | struct pci_pool *pool; | 372 | struct pci_pool *pool; |
373 | 373 | ||
374 | pool = pci_pool_create(name, dev, size, align, alloc); | 374 | pool = pci_pool_create(name, pdev, size, align, alloc); |
375 | 375 | ||
376 | The "name" is for diagnostics (like a kmem_cache name); dev and size | 376 | The "name" is for diagnostics (like a kmem_cache name); pdev and size |
377 | are as above. The device's hardware alignment requirement for this | 377 | are as above. The device's hardware alignment requirement for this |
378 | type of data is "align" (which is expressed in bytes, and must be a | 378 | type of data is "align" (which is expressed in bytes, and must be a |
379 | power of two). If your device has no boundary crossing restrictions, | 379 | power of two). If your device has no boundary crossing restrictions, |
@@ -472,11 +472,11 @@ To map a single region, you do: | |||
472 | void *addr = buffer->ptr; | 472 | void *addr = buffer->ptr; |
473 | size_t size = buffer->len; | 473 | size_t size = buffer->len; |
474 | 474 | ||
475 | dma_handle = pci_map_single(dev, addr, size, direction); | 475 | dma_handle = pci_map_single(pdev, addr, size, direction); |
476 | 476 | ||
477 | and to unmap it: | 477 | and to unmap it: |
478 | 478 | ||
479 | pci_unmap_single(dev, dma_handle, size, direction); | 479 | pci_unmap_single(pdev, dma_handle, size, direction); |
480 | 480 | ||
481 | You should call pci_unmap_single when the DMA activity is finished, e.g. | 481 | You should call pci_unmap_single when the DMA activity is finished, e.g. |
482 | from the interrupt which told you that the DMA transfer is done. | 482 | from the interrupt which told you that the DMA transfer is done. |
@@ -493,17 +493,17 @@ Specifically: | |||
493 | unsigned long offset = buffer->offset; | 493 | unsigned long offset = buffer->offset; |
494 | size_t size = buffer->len; | 494 | size_t size = buffer->len; |
495 | 495 | ||
496 | dma_handle = pci_map_page(dev, page, offset, size, direction); | 496 | dma_handle = pci_map_page(pdev, page, offset, size, direction); |
497 | 497 | ||
498 | ... | 498 | ... |
499 | 499 | ||
500 | pci_unmap_page(dev, dma_handle, size, direction); | 500 | pci_unmap_page(pdev, dma_handle, size, direction); |
501 | 501 | ||
502 | Here, "offset" means byte offset within the given page. | 502 | Here, "offset" means byte offset within the given page. |
503 | 503 | ||
504 | With scatterlists, you map a region gathered from several regions by: | 504 | With scatterlists, you map a region gathered from several regions by: |
505 | 505 | ||
506 | int i, count = pci_map_sg(dev, sglist, nents, direction); | 506 | int i, count = pci_map_sg(pdev, sglist, nents, direction); |
507 | struct scatterlist *sg; | 507 | struct scatterlist *sg; |
508 | 508 | ||
509 | for_each_sg(sglist, sg, count, i) { | 509 | for_each_sg(sglist, sg, count, i) { |
@@ -527,7 +527,7 @@ accessed sg->address and sg->length as shown above. | |||
527 | 527 | ||
528 | To unmap a scatterlist, just call: | 528 | To unmap a scatterlist, just call: |
529 | 529 | ||
530 | pci_unmap_sg(dev, sglist, nents, direction); | 530 | pci_unmap_sg(pdev, sglist, nents, direction); |
531 | 531 | ||
532 | Again, make sure DMA activity has already finished. | 532 | Again, make sure DMA activity has already finished. |
533 | 533 | ||
@@ -550,11 +550,11 @@ correct copy of the DMA buffer. | |||
550 | So, firstly, just map it with pci_map_{single,sg}, and after each DMA | 550 | So, firstly, just map it with pci_map_{single,sg}, and after each DMA |
551 | transfer call either: | 551 | transfer call either: |
552 | 552 | ||
553 | pci_dma_sync_single_for_cpu(dev, dma_handle, size, direction); | 553 | pci_dma_sync_single_for_cpu(pdev, dma_handle, size, direction); |
554 | 554 | ||
555 | or: | 555 | or: |
556 | 556 | ||
557 | pci_dma_sync_sg_for_cpu(dev, sglist, nents, direction); | 557 | pci_dma_sync_sg_for_cpu(pdev, sglist, nents, direction); |
558 | 558 | ||
559 | as appropriate. | 559 | as appropriate. |
560 | 560 | ||
@@ -562,7 +562,7 @@ Then, if you wish to let the device get at the DMA area again, | |||
562 | finish accessing the data with the cpu, and then before actually | 562 | finish accessing the data with the cpu, and then before actually |
563 | giving the buffer to the hardware call either: | 563 | giving the buffer to the hardware call either: |
564 | 564 | ||
565 | pci_dma_sync_single_for_device(dev, dma_handle, size, direction); | 565 | pci_dma_sync_single_for_device(pdev, dma_handle, size, direction); |
566 | 566 | ||
567 | or: | 567 | or: |
568 | 568 | ||
@@ -739,7 +739,7 @@ failure can be determined by: | |||
739 | 739 | ||
740 | dma_addr_t dma_handle; | 740 | dma_addr_t dma_handle; |
741 | 741 | ||
742 | dma_handle = pci_map_single(dev, addr, size, direction); | 742 | dma_handle = pci_map_single(pdev, addr, size, direction); |
743 | if (pci_dma_mapping_error(dma_handle)) { | 743 | if (pci_dma_mapping_error(dma_handle)) { |
744 | /* | 744 | /* |
745 | * reduce current DMA mapping usage, | 745 | * reduce current DMA mapping usage, |