diff options
Diffstat (limited to 'arch/alpha/kernel/pci_iommu.c')
-rw-r--r-- | arch/alpha/kernel/pci_iommu.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index cd634795aa9c..3f844d26d2c7 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c | |||
@@ -236,7 +236,7 @@ static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask) | |||
236 | ok = 0; | 236 | ok = 0; |
237 | 237 | ||
238 | /* If both conditions above are met, we are fine. */ | 238 | /* If both conditions above are met, we are fine. */ |
239 | DBGA("pci_dac_dma_supported %s from %p\n", | 239 | DBGA("pci_dac_dma_supported %s from %pf\n", |
240 | ok ? "yes" : "no", __builtin_return_address(0)); | 240 | ok ? "yes" : "no", __builtin_return_address(0)); |
241 | 241 | ||
242 | return ok; | 242 | return ok; |
@@ -268,7 +268,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, | |||
268 | && paddr + size <= __direct_map_size) { | 268 | && paddr + size <= __direct_map_size) { |
269 | ret = paddr + __direct_map_base; | 269 | ret = paddr + __direct_map_base; |
270 | 270 | ||
271 | DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %p\n", | 271 | DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %pf\n", |
272 | cpu_addr, size, ret, __builtin_return_address(0)); | 272 | cpu_addr, size, ret, __builtin_return_address(0)); |
273 | 273 | ||
274 | return ret; | 274 | return ret; |
@@ -279,7 +279,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, | |||
279 | if (dac_allowed) { | 279 | if (dac_allowed) { |
280 | ret = paddr + alpha_mv.pci_dac_offset; | 280 | ret = paddr + alpha_mv.pci_dac_offset; |
281 | 281 | ||
282 | DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %p\n", | 282 | DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %pf\n", |
283 | cpu_addr, size, ret, __builtin_return_address(0)); | 283 | cpu_addr, size, ret, __builtin_return_address(0)); |
284 | 284 | ||
285 | return ret; | 285 | return ret; |
@@ -316,7 +316,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, | |||
316 | ret = arena->dma_base + dma_ofs * PAGE_SIZE; | 316 | ret = arena->dma_base + dma_ofs * PAGE_SIZE; |
317 | ret += (unsigned long)cpu_addr & ~PAGE_MASK; | 317 | ret += (unsigned long)cpu_addr & ~PAGE_MASK; |
318 | 318 | ||
319 | DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %p\n", | 319 | DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %pf\n", |
320 | cpu_addr, size, npages, ret, __builtin_return_address(0)); | 320 | cpu_addr, size, npages, ret, __builtin_return_address(0)); |
321 | 321 | ||
322 | return ret; | 322 | return ret; |
@@ -385,14 +385,14 @@ static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr, | |||
385 | && dma_addr < __direct_map_base + __direct_map_size) { | 385 | && dma_addr < __direct_map_base + __direct_map_size) { |
386 | /* Nothing to do. */ | 386 | /* Nothing to do. */ |
387 | 387 | ||
388 | DBGA2("pci_unmap_single: direct [%llx,%zx] from %p\n", | 388 | DBGA2("pci_unmap_single: direct [%llx,%zx] from %pf\n", |
389 | dma_addr, size, __builtin_return_address(0)); | 389 | dma_addr, size, __builtin_return_address(0)); |
390 | 390 | ||
391 | return; | 391 | return; |
392 | } | 392 | } |
393 | 393 | ||
394 | if (dma_addr > 0xffffffff) { | 394 | if (dma_addr > 0xffffffff) { |
395 | DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %p\n", | 395 | DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %pf\n", |
396 | dma_addr, size, __builtin_return_address(0)); | 396 | dma_addr, size, __builtin_return_address(0)); |
397 | return; | 397 | return; |
398 | } | 398 | } |
@@ -424,7 +424,7 @@ static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr, | |||
424 | 424 | ||
425 | spin_unlock_irqrestore(&arena->lock, flags); | 425 | spin_unlock_irqrestore(&arena->lock, flags); |
426 | 426 | ||
427 | DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %p\n", | 427 | DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %pf\n", |
428 | dma_addr, size, npages, __builtin_return_address(0)); | 428 | dma_addr, size, npages, __builtin_return_address(0)); |
429 | } | 429 | } |
430 | 430 | ||
@@ -447,7 +447,7 @@ try_again: | |||
447 | cpu_addr = (void *)__get_free_pages(gfp, order); | 447 | cpu_addr = (void *)__get_free_pages(gfp, order); |
448 | if (! cpu_addr) { | 448 | if (! cpu_addr) { |
449 | printk(KERN_INFO "pci_alloc_consistent: " | 449 | printk(KERN_INFO "pci_alloc_consistent: " |
450 | "get_free_pages failed from %p\n", | 450 | "get_free_pages failed from %pf\n", |
451 | __builtin_return_address(0)); | 451 | __builtin_return_address(0)); |
452 | /* ??? Really atomic allocation? Otherwise we could play | 452 | /* ??? Really atomic allocation? Otherwise we could play |
453 | with vmalloc and sg if we can't find contiguous memory. */ | 453 | with vmalloc and sg if we can't find contiguous memory. */ |
@@ -466,7 +466,7 @@ try_again: | |||
466 | goto try_again; | 466 | goto try_again; |
467 | } | 467 | } |
468 | 468 | ||
469 | DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %p\n", | 469 | DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %pf\n", |
470 | size, cpu_addr, *dma_addrp, __builtin_return_address(0)); | 470 | size, cpu_addr, *dma_addrp, __builtin_return_address(0)); |
471 | 471 | ||
472 | return cpu_addr; | 472 | return cpu_addr; |
@@ -486,7 +486,7 @@ static void alpha_pci_free_coherent(struct device *dev, size_t size, | |||
486 | pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); | 486 | pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); |
487 | free_pages((unsigned long)cpu_addr, get_order(size)); | 487 | free_pages((unsigned long)cpu_addr, get_order(size)); |
488 | 488 | ||
489 | DBGA2("pci_free_consistent: [%llx,%zx] from %p\n", | 489 | DBGA2("pci_free_consistent: [%llx,%zx] from %pf\n", |
490 | dma_addr, size, __builtin_return_address(0)); | 490 | dma_addr, size, __builtin_return_address(0)); |
491 | } | 491 | } |
492 | 492 | ||