aboutsummaryrefslogtreecommitdiffstats
path: root/Documentation
diff options
context:
space:
mode:
Diffstat (limited to 'Documentation')
-rw-r--r--Documentation/DMA-mapping.txt38
1 files changed, 19 insertions, 19 deletions
diff --git a/Documentation/DMA-mapping.txt b/Documentation/DMA-mapping.txt
index d84f89dbf921..b463ecd0c7ce 100644
--- a/Documentation/DMA-mapping.txt
+++ b/Documentation/DMA-mapping.txt
@@ -315,11 +315,11 @@ you should do:
315 315
316 dma_addr_t dma_handle; 316 dma_addr_t dma_handle;
317 317
318 cpu_addr = pci_alloc_consistent(dev, size, &dma_handle); 318 cpu_addr = pci_alloc_consistent(pdev, size, &dma_handle);
319 319
320where dev is a struct pci_dev *. You should pass NULL for PCI like buses 320where pdev is a struct pci_dev *. This may be called in interrupt context.
321where devices don't have struct pci_dev (like ISA, EISA). This may be 321You should use dma_alloc_coherent (see DMA-API.txt) for buses
322called in interrupt context. 322where devices don't have struct pci_dev (like ISA, EISA).
323 323
324This argument is needed because the DMA translations may be bus 324This argument is needed because the DMA translations may be bus
325specific (and often is private to the bus which the device is attached 325specific (and often is private to the bus which the device is attached
@@ -332,7 +332,7 @@ __get_free_pages (but takes size instead of a page order). If your
332driver needs regions sized smaller than a page, you may prefer using 332driver needs regions sized smaller than a page, you may prefer using
333the pci_pool interface, described below. 333the pci_pool interface, described below.
334 334
335The consistent DMA mapping interfaces, for non-NULL dev, will by 335The consistent DMA mapping interfaces, for non-NULL pdev, will by
336default return a DMA address which is SAC (Single Address Cycle) 336default return a DMA address which is SAC (Single Address Cycle)
337addressable. Even if the device indicates (via PCI dma mask) that it 337addressable. Even if the device indicates (via PCI dma mask) that it
338may address the upper 32-bits and thus perform DAC cycles, consistent 338may address the upper 32-bits and thus perform DAC cycles, consistent
@@ -354,9 +354,9 @@ buffer you receive will not cross a 64K boundary.
354 354
355To unmap and free such a DMA region, you call: 355To unmap and free such a DMA region, you call:
356 356
357 pci_free_consistent(dev, size, cpu_addr, dma_handle); 357 pci_free_consistent(pdev, size, cpu_addr, dma_handle);
358 358
359where dev, size are the same as in the above call and cpu_addr and 359where pdev, size are the same as in the above call and cpu_addr and
360dma_handle are the values pci_alloc_consistent returned to you. 360dma_handle are the values pci_alloc_consistent returned to you.
361This function may not be called in interrupt context. 361This function may not be called in interrupt context.
362 362
@@ -371,9 +371,9 @@ Create a pci_pool like this:
371 371
372 struct pci_pool *pool; 372 struct pci_pool *pool;
373 373
374 pool = pci_pool_create(name, dev, size, align, alloc); 374 pool = pci_pool_create(name, pdev, size, align, alloc);
375 375
376The "name" is for diagnostics (like a kmem_cache name); dev and size 376The "name" is for diagnostics (like a kmem_cache name); pdev and size
377are as above. The device's hardware alignment requirement for this 377are as above. The device's hardware alignment requirement for this
378type of data is "align" (which is expressed in bytes, and must be a 378type of data is "align" (which is expressed in bytes, and must be a
379power of two). If your device has no boundary crossing restrictions, 379power of two). If your device has no boundary crossing restrictions,
@@ -472,11 +472,11 @@ To map a single region, you do:
472 void *addr = buffer->ptr; 472 void *addr = buffer->ptr;
473 size_t size = buffer->len; 473 size_t size = buffer->len;
474 474
475 dma_handle = pci_map_single(dev, addr, size, direction); 475 dma_handle = pci_map_single(pdev, addr, size, direction);
476 476
477and to unmap it: 477and to unmap it:
478 478
479 pci_unmap_single(dev, dma_handle, size, direction); 479 pci_unmap_single(pdev, dma_handle, size, direction);
480 480
481You should call pci_unmap_single when the DMA activity is finished, e.g. 481You should call pci_unmap_single when the DMA activity is finished, e.g.
482from the interrupt which told you that the DMA transfer is done. 482from the interrupt which told you that the DMA transfer is done.
@@ -493,17 +493,17 @@ Specifically:
493 unsigned long offset = buffer->offset; 493 unsigned long offset = buffer->offset;
494 size_t size = buffer->len; 494 size_t size = buffer->len;
495 495
496 dma_handle = pci_map_page(dev, page, offset, size, direction); 496 dma_handle = pci_map_page(pdev, page, offset, size, direction);
497 497
498 ... 498 ...
499 499
500 pci_unmap_page(dev, dma_handle, size, direction); 500 pci_unmap_page(pdev, dma_handle, size, direction);
501 501
502Here, "offset" means byte offset within the given page. 502Here, "offset" means byte offset within the given page.
503 503
504With scatterlists, you map a region gathered from several regions by: 504With scatterlists, you map a region gathered from several regions by:
505 505
506 int i, count = pci_map_sg(dev, sglist, nents, direction); 506 int i, count = pci_map_sg(pdev, sglist, nents, direction);
507 struct scatterlist *sg; 507 struct scatterlist *sg;
508 508
509 for_each_sg(sglist, sg, count, i) { 509 for_each_sg(sglist, sg, count, i) {
@@ -527,7 +527,7 @@ accessed sg->address and sg->length as shown above.
527 527
528To unmap a scatterlist, just call: 528To unmap a scatterlist, just call:
529 529
530 pci_unmap_sg(dev, sglist, nents, direction); 530 pci_unmap_sg(pdev, sglist, nents, direction);
531 531
532Again, make sure DMA activity has already finished. 532Again, make sure DMA activity has already finished.
533 533
@@ -550,11 +550,11 @@ correct copy of the DMA buffer.
550So, firstly, just map it with pci_map_{single,sg}, and after each DMA 550So, firstly, just map it with pci_map_{single,sg}, and after each DMA
551transfer call either: 551transfer call either:
552 552
553 pci_dma_sync_single_for_cpu(dev, dma_handle, size, direction); 553 pci_dma_sync_single_for_cpu(pdev, dma_handle, size, direction);
554 554
555or: 555or:
556 556
557 pci_dma_sync_sg_for_cpu(dev, sglist, nents, direction); 557 pci_dma_sync_sg_for_cpu(pdev, sglist, nents, direction);
558 558
559as appropriate. 559as appropriate.
560 560
@@ -562,7 +562,7 @@ Then, if you wish to let the device get at the DMA area again,
562finish accessing the data with the cpu, and then before actually 562finish accessing the data with the cpu, and then before actually
563giving the buffer to the hardware call either: 563giving the buffer to the hardware call either:
564 564
565 pci_dma_sync_single_for_device(dev, dma_handle, size, direction); 565 pci_dma_sync_single_for_device(pdev, dma_handle, size, direction);
566 566
567or: 567or:
568 568
@@ -739,7 +739,7 @@ failure can be determined by:
739 739
740 dma_addr_t dma_handle; 740 dma_addr_t dma_handle;
741 741
742 dma_handle = pci_map_single(dev, addr, size, direction); 742 dma_handle = pci_map_single(pdev, addr, size, direction);
743 if (pci_dma_mapping_error(dma_handle)) { 743 if (pci_dma_mapping_error(dma_handle)) {
744 /* 744 /*
745 * reduce current DMA mapping usage, 745 * reduce current DMA mapping usage,