diff options
36 files changed, 512 insertions, 568 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index ea5eb5f79adb..257033c691f2 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
| @@ -17,6 +17,7 @@ config ARM | |||
| 17 | select HAVE_KRETPROBES if (HAVE_KPROBES) | 17 | select HAVE_KRETPROBES if (HAVE_KPROBES) |
| 18 | select HAVE_FTRACE if (!XIP_KERNEL) | 18 | select HAVE_FTRACE if (!XIP_KERNEL) |
| 19 | select HAVE_DYNAMIC_FTRACE if (HAVE_FTRACE) | 19 | select HAVE_DYNAMIC_FTRACE if (HAVE_FTRACE) |
| 20 | select HAVE_GENERIC_DMA_COHERENT | ||
| 20 | help | 21 | help |
| 21 | The ARM series is a line of low-power-consumption RISC chip designs | 22 | The ARM series is a line of low-power-consumption RISC chip designs |
| 22 | licensed by ARM Ltd and targeted at embedded applications and | 23 | licensed by ARM Ltd and targeted at embedded applications and |
diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/consistent.c index 333a82a3717e..db7b3e38ef1d 100644 --- a/arch/arm/mm/consistent.c +++ b/arch/arm/mm/consistent.c | |||
| @@ -274,6 +274,11 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
| 274 | void * | 274 | void * |
| 275 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) | 275 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) |
| 276 | { | 276 | { |
| 277 | void *memory; | ||
| 278 | |||
| 279 | if (dma_alloc_from_coherent(dev, size, handle, &memory)) | ||
| 280 | return memory; | ||
| 281 | |||
| 277 | if (arch_is_coherent()) { | 282 | if (arch_is_coherent()) { |
| 278 | void *virt; | 283 | void *virt; |
| 279 | 284 | ||
| @@ -362,6 +367,9 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr | |||
| 362 | 367 | ||
| 363 | WARN_ON(irqs_disabled()); | 368 | WARN_ON(irqs_disabled()); |
| 364 | 369 | ||
| 370 | if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) | ||
| 371 | return; | ||
| 372 | |||
| 365 | if (arch_is_coherent()) { | 373 | if (arch_is_coherent()) { |
| 366 | kfree(cpu_addr); | 374 | kfree(cpu_addr); |
| 367 | return; | 375 | return; |
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig index 2a92cb1886ca..7a64fcef9d07 100644 --- a/arch/cris/arch-v32/drivers/Kconfig +++ b/arch/cris/arch-v32/drivers/Kconfig | |||
| @@ -641,6 +641,7 @@ config PCI | |||
| 641 | bool | 641 | bool |
| 642 | depends on ETRAX_CARDBUS | 642 | depends on ETRAX_CARDBUS |
| 643 | default y | 643 | default y |
| 644 | select HAVE_GENERIC_DMA_COHERENT | ||
| 644 | 645 | ||
| 645 | config ETRAX_IOP_FW_LOAD | 646 | config ETRAX_IOP_FW_LOAD |
| 646 | tristate "IO-processor hotplug firmware loading support" | 647 | tristate "IO-processor hotplug firmware loading support" |
diff --git a/arch/cris/arch-v32/drivers/pci/dma.c b/arch/cris/arch-v32/drivers/pci/dma.c index e0364654fc44..fbe65954ee6c 100644 --- a/arch/cris/arch-v32/drivers/pci/dma.c +++ b/arch/cris/arch-v32/drivers/pci/dma.c | |||
| @@ -15,35 +15,16 @@ | |||
| 15 | #include <linux/pci.h> | 15 | #include <linux/pci.h> |
| 16 | #include <asm/io.h> | 16 | #include <asm/io.h> |
| 17 | 17 | ||
| 18 | struct dma_coherent_mem { | ||
| 19 | void *virt_base; | ||
| 20 | u32 device_base; | ||
| 21 | int size; | ||
| 22 | int flags; | ||
| 23 | unsigned long *bitmap; | ||
| 24 | }; | ||
| 25 | |||
| 26 | void *dma_alloc_coherent(struct device *dev, size_t size, | 18 | void *dma_alloc_coherent(struct device *dev, size_t size, |
| 27 | dma_addr_t *dma_handle, gfp_t gfp) | 19 | dma_addr_t *dma_handle, gfp_t gfp) |
| 28 | { | 20 | { |
| 29 | void *ret; | 21 | void *ret; |
| 30 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | ||
| 31 | int order = get_order(size); | 22 | int order = get_order(size); |
| 32 | /* ignore region specifiers */ | 23 | /* ignore region specifiers */ |
| 33 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | 24 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); |
| 34 | 25 | ||
| 35 | if (mem) { | 26 | if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) |
| 36 | int page = bitmap_find_free_region(mem->bitmap, mem->size, | 27 | return ret; |
| 37 | order); | ||
| 38 | if (page >= 0) { | ||
| 39 | *dma_handle = mem->device_base + (page << PAGE_SHIFT); | ||
| 40 | ret = mem->virt_base + (page << PAGE_SHIFT); | ||
| 41 | memset(ret, 0, size); | ||
| 42 | return ret; | ||
| 43 | } | ||
| 44 | if (mem->flags & DMA_MEMORY_EXCLUSIVE) | ||
| 45 | return NULL; | ||
| 46 | } | ||
| 47 | 28 | ||
| 48 | if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) | 29 | if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) |
| 49 | gfp |= GFP_DMA; | 30 | gfp |= GFP_DMA; |
| @@ -60,90 +41,9 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
| 60 | void dma_free_coherent(struct device *dev, size_t size, | 41 | void dma_free_coherent(struct device *dev, size_t size, |
| 61 | void *vaddr, dma_addr_t dma_handle) | 42 | void *vaddr, dma_addr_t dma_handle) |
| 62 | { | 43 | { |
| 63 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | ||
| 64 | int order = get_order(size); | 44 | int order = get_order(size); |
| 65 | 45 | ||
| 66 | if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) { | 46 | if (!dma_release_from_coherent(dev, order, vaddr)) |
| 67 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; | ||
| 68 | |||
| 69 | bitmap_release_region(mem->bitmap, page, order); | ||
| 70 | } else | ||
| 71 | free_pages((unsigned long)vaddr, order); | 47 | free_pages((unsigned long)vaddr, order); |
| 72 | } | 48 | } |
| 73 | 49 | ||
| 74 | int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | ||
| 75 | dma_addr_t device_addr, size_t size, int flags) | ||
| 76 | { | ||
| 77 | void __iomem *mem_base; | ||
| 78 | int pages = size >> PAGE_SHIFT; | ||
| 79 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); | ||
| 80 | |||
| 81 | if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) | ||
| 82 | goto out; | ||
| 83 | if (!size) | ||
| 84 | goto out; | ||
| 85 | if (dev->dma_mem) | ||
| 86 | goto out; | ||
| 87 | |||
| 88 | /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ | ||
| 89 | |||
| 90 | mem_base = ioremap(bus_addr, size); | ||
| 91 | if (!mem_base) | ||
| 92 | goto out; | ||
| 93 | |||
| 94 | dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); | ||
| 95 | if (!dev->dma_mem) | ||
| 96 | goto iounmap_out; | ||
| 97 | dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | ||
| 98 | if (!dev->dma_mem->bitmap) | ||
| 99 | goto free1_out; | ||
| 100 | |||
| 101 | dev->dma_mem->virt_base = mem_base; | ||
| 102 | dev->dma_mem->device_base = device_addr; | ||
| 103 | dev->dma_mem->size = pages; | ||
| 104 | dev->dma_mem->flags = flags; | ||
| 105 | |||
| 106 | if (flags & DMA_MEMORY_MAP) | ||
| 107 | return DMA_MEMORY_MAP; | ||
| 108 | |||
| 109 | return DMA_MEMORY_IO; | ||
| 110 | |||
| 111 | free1_out: | ||
| 112 | kfree(dev->dma_mem); | ||
| 113 | iounmap_out: | ||
| 114 | iounmap(mem_base); | ||
| 115 | out: | ||
| 116 | return 0; | ||
| 117 | } | ||
| 118 | EXPORT_SYMBOL(dma_declare_coherent_memory); | ||
| 119 | |||
| 120 | void dma_release_declared_memory(struct device *dev) | ||
| 121 | { | ||
| 122 | struct dma_coherent_mem *mem = dev->dma_mem; | ||
| 123 | |||
| 124 | if(!mem) | ||
| 125 | return; | ||
| 126 | dev->dma_mem = NULL; | ||
| 127 | iounmap(mem->virt_base); | ||
| 128 | kfree(mem->bitmap); | ||
| 129 | kfree(mem); | ||
| 130 | } | ||
| 131 | EXPORT_SYMBOL(dma_release_declared_memory); | ||
| 132 | |||
| 133 | void *dma_mark_declared_memory_occupied(struct device *dev, | ||
| 134 | dma_addr_t device_addr, size_t size) | ||
| 135 | { | ||
| 136 | struct dma_coherent_mem *mem = dev->dma_mem; | ||
| 137 | int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
| 138 | int pos, err; | ||
| 139 | |||
| 140 | if (!mem) | ||
| 141 | return ERR_PTR(-EINVAL); | ||
| 142 | |||
| 143 | pos = (device_addr - mem->device_base) >> PAGE_SHIFT; | ||
| 144 | err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages)); | ||
| 145 | if (err != 0) | ||
| 146 | return ERR_PTR(err); | ||
| 147 | return mem->virt_base + (pos << PAGE_SHIFT); | ||
| 148 | } | ||
| 149 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | ||
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 7bfb0d219d67..0b88dc462d73 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
| @@ -11,6 +11,7 @@ config SUPERH | |||
| 11 | select HAVE_CLK | 11 | select HAVE_CLK |
| 12 | select HAVE_IDE | 12 | select HAVE_IDE |
| 13 | select HAVE_OPROFILE | 13 | select HAVE_OPROFILE |
| 14 | select HAVE_GENERIC_DMA_COHERENT | ||
| 14 | help | 15 | help |
| 15 | The SuperH is a RISC processor targeted for use in embedded systems | 16 | The SuperH is a RISC processor targeted for use in embedded systems |
| 16 | and consumer electronics; it was also used in the Sega Dreamcast | 17 | and consumer electronics; it was also used in the Sega Dreamcast |
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index 8277982d0938..b2ce014401b5 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c | |||
| @@ -28,21 +28,10 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
| 28 | dma_addr_t *dma_handle, gfp_t gfp) | 28 | dma_addr_t *dma_handle, gfp_t gfp) |
| 29 | { | 29 | { |
| 30 | void *ret, *ret_nocache; | 30 | void *ret, *ret_nocache; |
| 31 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | ||
| 32 | int order = get_order(size); | 31 | int order = get_order(size); |
| 33 | 32 | ||
| 34 | if (mem) { | 33 | if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) |
| 35 | int page = bitmap_find_free_region(mem->bitmap, mem->size, | 34 | return ret; |
| 36 | order); | ||
| 37 | if (page >= 0) { | ||
| 38 | *dma_handle = mem->device_base + (page << PAGE_SHIFT); | ||
| 39 | ret = mem->virt_base + (page << PAGE_SHIFT); | ||
| 40 | memset(ret, 0, size); | ||
| 41 | return ret; | ||
| 42 | } | ||
| 43 | if (mem->flags & DMA_MEMORY_EXCLUSIVE) | ||
| 44 | return NULL; | ||
| 45 | } | ||
| 46 | 35 | ||
| 47 | ret = (void *)__get_free_pages(gfp, order); | 36 | ret = (void *)__get_free_pages(gfp, order); |
| 48 | if (!ret) | 37 | if (!ret) |
| @@ -72,11 +61,7 @@ void dma_free_coherent(struct device *dev, size_t size, | |||
| 72 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | 61 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; |
| 73 | int order = get_order(size); | 62 | int order = get_order(size); |
| 74 | 63 | ||
| 75 | if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) { | 64 | if (!dma_release_from_coherent(dev, order, vaddr)) { |
| 76 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; | ||
| 77 | |||
| 78 | bitmap_release_region(mem->bitmap, page, order); | ||
| 79 | } else { | ||
| 80 | WARN_ON(irqs_disabled()); /* for portability */ | 65 | WARN_ON(irqs_disabled()); /* for portability */ |
| 81 | BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE); | 66 | BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE); |
| 82 | free_pages((unsigned long)phys_to_virt(dma_handle), order); | 67 | free_pages((unsigned long)phys_to_virt(dma_handle), order); |
| @@ -85,83 +70,6 @@ void dma_free_coherent(struct device *dev, size_t size, | |||
| 85 | } | 70 | } |
| 86 | EXPORT_SYMBOL(dma_free_coherent); | 71 | EXPORT_SYMBOL(dma_free_coherent); |
| 87 | 72 | ||
| 88 | int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | ||
| 89 | dma_addr_t device_addr, size_t size, int flags) | ||
| 90 | { | ||
| 91 | void __iomem *mem_base = NULL; | ||
| 92 | int pages = size >> PAGE_SHIFT; | ||
| 93 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); | ||
| 94 | |||
| 95 | if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) | ||
| 96 | goto out; | ||
| 97 | if (!size) | ||
| 98 | goto out; | ||
| 99 | if (dev->dma_mem) | ||
| 100 | goto out; | ||
| 101 | |||
| 102 | /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ | ||
| 103 | |||
| 104 | mem_base = ioremap_nocache(bus_addr, size); | ||
| 105 | if (!mem_base) | ||
| 106 | goto out; | ||
| 107 | |||
| 108 | dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); | ||
| 109 | if (!dev->dma_mem) | ||
| 110 | goto out; | ||
| 111 | dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | ||
| 112 | if (!dev->dma_mem->bitmap) | ||
| 113 | goto free1_out; | ||
| 114 | |||
| 115 | dev->dma_mem->virt_base = mem_base; | ||
| 116 | dev->dma_mem->device_base = device_addr; | ||
| 117 | dev->dma_mem->size = pages; | ||
| 118 | dev->dma_mem->flags = flags; | ||
| 119 | |||
| 120 | if (flags & DMA_MEMORY_MAP) | ||
| 121 | return DMA_MEMORY_MAP; | ||
| 122 | |||
| 123 | return DMA_MEMORY_IO; | ||
| 124 | |||
| 125 | free1_out: | ||
| 126 | kfree(dev->dma_mem); | ||
| 127 | out: | ||
| 128 | if (mem_base) | ||
| 129 | iounmap(mem_base); | ||
| 130 | return 0; | ||
| 131 | } | ||
| 132 | EXPORT_SYMBOL(dma_declare_coherent_memory); | ||
| 133 | |||
| 134 | void dma_release_declared_memory(struct device *dev) | ||
| 135 | { | ||
| 136 | struct dma_coherent_mem *mem = dev->dma_mem; | ||
| 137 | |||
| 138 | if (!mem) | ||
| 139 | return; | ||
| 140 | dev->dma_mem = NULL; | ||
| 141 | iounmap(mem->virt_base); | ||
| 142 | kfree(mem->bitmap); | ||
| 143 | kfree(mem); | ||
| 144 | } | ||
| 145 | EXPORT_SYMBOL(dma_release_declared_memory); | ||
| 146 | |||
| 147 | void *dma_mark_declared_memory_occupied(struct device *dev, | ||
| 148 | dma_addr_t device_addr, size_t size) | ||
| 149 | { | ||
| 150 | struct dma_coherent_mem *mem = dev->dma_mem; | ||
| 151 | int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
| 152 | int pos, err; | ||
| 153 | |||
| 154 | if (!mem) | ||
| 155 | return ERR_PTR(-EINVAL); | ||
| 156 | |||
| 157 | pos = (device_addr - mem->device_base) >> PAGE_SHIFT; | ||
| 158 | err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages)); | ||
| 159 | if (err != 0) | ||
| 160 | return ERR_PTR(err); | ||
| 161 | return mem->virt_base + (pos << PAGE_SHIFT); | ||
| 162 | } | ||
| 163 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | ||
| 164 | |||
| 165 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 73 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
| 166 | enum dma_data_direction direction) | 74 | enum dma_data_direction direction) |
| 167 | { | 75 | { |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index b6fa2877b173..3d0f2b6a5a16 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -30,6 +30,7 @@ config X86 | |||
| 30 | select HAVE_FTRACE | 30 | select HAVE_FTRACE |
| 31 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) | 31 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) |
| 32 | select HAVE_ARCH_KGDB if !X86_VOYAGER | 32 | select HAVE_ARCH_KGDB if !X86_VOYAGER |
| 33 | select HAVE_GENERIC_DMA_COHERENT if X86_32 | ||
| 33 | select HAVE_EFFICIENT_UNALIGNED_ACCESS | 34 | select HAVE_EFFICIENT_UNALIGNED_ACCESS |
| 34 | 35 | ||
| 35 | config ARCH_DEFCONFIG | 36 | config ARCH_DEFCONFIG |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 74697408576f..22d7d050905d 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
| @@ -29,9 +29,6 @@ | |||
| 29 | 29 | ||
| 30 | #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) | 30 | #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) |
| 31 | 31 | ||
| 32 | #define to_pages(addr, size) \ | ||
| 33 | (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT) | ||
| 34 | |||
| 35 | #define EXIT_LOOP_COUNT 10000000 | 32 | #define EXIT_LOOP_COUNT 10000000 |
| 36 | 33 | ||
| 37 | static DEFINE_RWLOCK(amd_iommu_devtable_lock); | 34 | static DEFINE_RWLOCK(amd_iommu_devtable_lock); |
| @@ -185,7 +182,7 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid, | |||
| 185 | u64 address, size_t size) | 182 | u64 address, size_t size) |
| 186 | { | 183 | { |
| 187 | int s = 0; | 184 | int s = 0; |
| 188 | unsigned pages = to_pages(address, size); | 185 | unsigned pages = iommu_num_pages(address, size); |
| 189 | 186 | ||
| 190 | address &= PAGE_MASK; | 187 | address &= PAGE_MASK; |
| 191 | 188 | ||
| @@ -557,8 +554,8 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu, | |||
| 557 | if (iommu->exclusion_start && | 554 | if (iommu->exclusion_start && |
| 558 | iommu->exclusion_start < dma_dom->aperture_size) { | 555 | iommu->exclusion_start < dma_dom->aperture_size) { |
| 559 | unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT; | 556 | unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT; |
| 560 | int pages = to_pages(iommu->exclusion_start, | 557 | int pages = iommu_num_pages(iommu->exclusion_start, |
| 561 | iommu->exclusion_length); | 558 | iommu->exclusion_length); |
| 562 | dma_ops_reserve_addresses(dma_dom, startpage, pages); | 559 | dma_ops_reserve_addresses(dma_dom, startpage, pages); |
| 563 | } | 560 | } |
| 564 | 561 | ||
| @@ -767,7 +764,7 @@ static dma_addr_t __map_single(struct device *dev, | |||
| 767 | unsigned int pages; | 764 | unsigned int pages; |
| 768 | int i; | 765 | int i; |
| 769 | 766 | ||
| 770 | pages = to_pages(paddr, size); | 767 | pages = iommu_num_pages(paddr, size); |
| 771 | paddr &= PAGE_MASK; | 768 | paddr &= PAGE_MASK; |
| 772 | 769 | ||
| 773 | address = dma_ops_alloc_addresses(dev, dma_dom, pages); | 770 | address = dma_ops_alloc_addresses(dev, dma_dom, pages); |
| @@ -802,7 +799,7 @@ static void __unmap_single(struct amd_iommu *iommu, | |||
| 802 | if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) | 799 | if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) |
| 803 | return; | 800 | return; |
| 804 | 801 | ||
| 805 | pages = to_pages(dma_addr, size); | 802 | pages = iommu_num_pages(dma_addr, size); |
| 806 | dma_addr &= PAGE_MASK; | 803 | dma_addr &= PAGE_MASK; |
| 807 | start = dma_addr; | 804 | start = dma_addr; |
| 808 | 805 | ||
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 37544123896d..8dbffb846de9 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
| @@ -192,124 +192,6 @@ static __init int iommu_setup(char *p) | |||
| 192 | } | 192 | } |
| 193 | early_param("iommu", iommu_setup); | 193 | early_param("iommu", iommu_setup); |
| 194 | 194 | ||
| 195 | #ifdef CONFIG_X86_32 | ||
| 196 | int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | ||
| 197 | dma_addr_t device_addr, size_t size, int flags) | ||
| 198 | { | ||
| 199 | void __iomem *mem_base = NULL; | ||
| 200 | int pages = size >> PAGE_SHIFT; | ||
| 201 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); | ||
| 202 | |||
| 203 | if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) | ||
| 204 | goto out; | ||
| 205 | if (!size) | ||
| 206 | goto out; | ||
| 207 | if (dev->dma_mem) | ||
| 208 | goto out; | ||
| 209 | |||
| 210 | /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ | ||
| 211 | |||
| 212 | mem_base = ioremap(bus_addr, size); | ||
| 213 | if (!mem_base) | ||
| 214 | goto out; | ||
| 215 | |||
| 216 | dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); | ||
| 217 | if (!dev->dma_mem) | ||
| 218 | goto out; | ||
| 219 | dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | ||
| 220 | if (!dev->dma_mem->bitmap) | ||
| 221 | goto free1_out; | ||
| 222 | |||
| 223 | dev->dma_mem->virt_base = mem_base; | ||
| 224 | dev->dma_mem->device_base = device_addr; | ||
| 225 | dev->dma_mem->size = pages; | ||
| 226 | dev->dma_mem->flags = flags; | ||
| 227 | |||
| 228 | if (flags & DMA_MEMORY_MAP) | ||
| 229 | return DMA_MEMORY_MAP; | ||
| 230 | |||
| 231 | return DMA_MEMORY_IO; | ||
| 232 | |||
| 233 | free1_out: | ||
| 234 | kfree(dev->dma_mem); | ||
| 235 | out: | ||
| 236 | if (mem_base) | ||
| 237 | iounmap(mem_base); | ||
| 238 | return 0; | ||
| 239 | } | ||
| 240 | EXPORT_SYMBOL(dma_declare_coherent_memory); | ||
| 241 | |||
| 242 | void dma_release_declared_memory(struct device *dev) | ||
| 243 | { | ||
| 244 | struct dma_coherent_mem *mem = dev->dma_mem; | ||
| 245 | |||
| 246 | if (!mem) | ||
| 247 | return; | ||
| 248 | dev->dma_mem = NULL; | ||
| 249 | iounmap(mem->virt_base); | ||
| 250 | kfree(mem->bitmap); | ||
| 251 | kfree(mem); | ||
| 252 | } | ||
| 253 | EXPORT_SYMBOL(dma_release_declared_memory); | ||
| 254 | |||
| 255 | void *dma_mark_declared_memory_occupied(struct device *dev, | ||
| 256 | dma_addr_t device_addr, size_t size) | ||
| 257 | { | ||
| 258 | struct dma_coherent_mem *mem = dev->dma_mem; | ||
| 259 | int pos, err; | ||
| 260 | int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1); | ||
| 261 | |||
| 262 | pages >>= PAGE_SHIFT; | ||
| 263 | |||
| 264 | if (!mem) | ||
| 265 | return ERR_PTR(-EINVAL); | ||
| 266 | |||
| 267 | pos = (device_addr - mem->device_base) >> PAGE_SHIFT; | ||
| 268 | err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages)); | ||
| 269 | if (err != 0) | ||
| 270 | return ERR_PTR(err); | ||
| 271 | return mem->virt_base + (pos << PAGE_SHIFT); | ||
| 272 | } | ||
| 273 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | ||
| 274 | |||
| 275 | static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size, | ||
| 276 | dma_addr_t *dma_handle, void **ret) | ||
| 277 | { | ||
| 278 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | ||
| 279 | int order = get_order(size); | ||
| 280 | |||
| 281 | if (mem) { | ||
| 282 | int page = bitmap_find_free_region(mem->bitmap, mem->size, | ||
| 283 | order); | ||
| 284 | if (page >= 0) { | ||
| 285 | *dma_handle = mem->device_base + (page << PAGE_SHIFT); | ||
| 286 | *ret = mem->virt_base + (page << PAGE_SHIFT); | ||
| 287 | memset(*ret, 0, size); | ||
| 288 | } | ||
| 289 | if (mem->flags & DMA_MEMORY_EXCLUSIVE) | ||
| 290 | *ret = NULL; | ||
| 291 | } | ||
| 292 | return (mem != NULL); | ||
| 293 | } | ||
| 294 | |||
| 295 | static int dma_release_coherent(struct device *dev, int order, void *vaddr) | ||
| 296 | { | ||
| 297 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | ||
| 298 | |||
| 299 | if (mem && vaddr >= mem->virt_base && vaddr < | ||
| 300 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | ||
| 301 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; | ||
| 302 | |||
| 303 | bitmap_release_region(mem->bitmap, page, order); | ||
| 304 | return 1; | ||
| 305 | } | ||
| 306 | return 0; | ||
| 307 | } | ||
| 308 | #else | ||
| 309 | #define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0) | ||
| 310 | #define dma_release_coherent(dev, order, vaddr) (0) | ||
| 311 | #endif /* CONFIG_X86_32 */ | ||
| 312 | |||
| 313 | int dma_supported(struct device *dev, u64 mask) | 195 | int dma_supported(struct device *dev, u64 mask) |
| 314 | { | 196 | { |
| 315 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 197 | struct dma_mapping_ops *ops = get_dma_ops(dev); |
| @@ -379,7 +261,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
| 379 | /* ignore region specifiers */ | 261 | /* ignore region specifiers */ |
| 380 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); | 262 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); |
| 381 | 263 | ||
| 382 | if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory)) | 264 | if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) |
| 383 | return memory; | 265 | return memory; |
| 384 | 266 | ||
| 385 | if (!dev) { | 267 | if (!dev) { |
| @@ -484,7 +366,7 @@ void dma_free_coherent(struct device *dev, size_t size, | |||
| 484 | 366 | ||
| 485 | int order = get_order(size); | 367 | int order = get_order(size); |
| 486 | WARN_ON(irqs_disabled()); /* for portability */ | 368 | WARN_ON(irqs_disabled()); /* for portability */ |
| 487 | if (dma_release_coherent(dev, order, vaddr)) | 369 | if (dma_release_from_coherent(dev, order, vaddr)) |
| 488 | return; | 370 | return; |
| 489 | if (ops->unmap_single) | 371 | if (ops->unmap_single) |
| 490 | ops->unmap_single(dev, bus, size, 0); | 372 | ops->unmap_single(dev, bus, size, 0); |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 744126e64950..49285f8fd4d5 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
| @@ -67,9 +67,6 @@ static u32 gart_unmapped_entry; | |||
| 67 | (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) | 67 | (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) |
| 68 | #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) | 68 | #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) |
| 69 | 69 | ||
| 70 | #define to_pages(addr, size) \ | ||
| 71 | (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT) | ||
| 72 | |||
| 73 | #define EMERGENCY_PAGES 32 /* = 128KB */ | 70 | #define EMERGENCY_PAGES 32 /* = 128KB */ |
| 74 | 71 | ||
| 75 | #ifdef CONFIG_AGP | 72 | #ifdef CONFIG_AGP |
| @@ -241,7 +238,7 @@ nonforced_iommu(struct device *dev, unsigned long addr, size_t size) | |||
| 241 | static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, | 238 | static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, |
| 242 | size_t size, int dir) | 239 | size_t size, int dir) |
| 243 | { | 240 | { |
| 244 | unsigned long npages = to_pages(phys_mem, size); | 241 | unsigned long npages = iommu_num_pages(phys_mem, size); |
| 245 | unsigned long iommu_page = alloc_iommu(dev, npages); | 242 | unsigned long iommu_page = alloc_iommu(dev, npages); |
| 246 | int i; | 243 | int i; |
| 247 | 244 | ||
| @@ -304,7 +301,7 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, | |||
| 304 | return; | 301 | return; |
| 305 | 302 | ||
| 306 | iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; | 303 | iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; |
| 307 | npages = to_pages(dma_addr, size); | 304 | npages = iommu_num_pages(dma_addr, size); |
| 308 | for (i = 0; i < npages; i++) { | 305 | for (i = 0; i < npages; i++) { |
| 309 | iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; | 306 | iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; |
| 310 | CLEAR_LEAK(iommu_page + i); | 307 | CLEAR_LEAK(iommu_page + i); |
| @@ -387,7 +384,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start, | |||
| 387 | } | 384 | } |
| 388 | 385 | ||
| 389 | addr = phys_addr; | 386 | addr = phys_addr; |
| 390 | pages = to_pages(s->offset, s->length); | 387 | pages = iommu_num_pages(s->offset, s->length); |
| 391 | while (pages--) { | 388 | while (pages--) { |
| 392 | iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); | 389 | iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); |
| 393 | SET_LEAK(iommu_page); | 390 | SET_LEAK(iommu_page); |
| @@ -470,7 +467,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |||
| 470 | 467 | ||
| 471 | seg_size += s->length; | 468 | seg_size += s->length; |
| 472 | need = nextneed; | 469 | need = nextneed; |
| 473 | pages += to_pages(s->offset, s->length); | 470 | pages += iommu_num_pages(s->offset, s->length); |
| 474 | ps = s; | 471 | ps = s; |
| 475 | } | 472 | } |
| 476 | if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) | 473 | if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) |
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index ff3a6a336342..4bdaa590375d 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c | |||
| @@ -23,7 +23,8 @@ static void __devinit pci_fixup_i450nx(struct pci_dev *d) | |||
| 23 | pci_read_config_byte(d, reg++, &busno); | 23 | pci_read_config_byte(d, reg++, &busno); |
| 24 | pci_read_config_byte(d, reg++, &suba); | 24 | pci_read_config_byte(d, reg++, &suba); |
| 25 | pci_read_config_byte(d, reg++, &subb); | 25 | pci_read_config_byte(d, reg++, &subb); |
| 26 | DBG("i450NX PXB %d: %02x/%02x/%02x\n", pxb, busno, suba, subb); | 26 | dev_dbg(&d->dev, "i450NX PXB %d: %02x/%02x/%02x\n", pxb, busno, |
| 27 | suba, subb); | ||
| 27 | if (busno) | 28 | if (busno) |
| 28 | pci_scan_bus_with_sysdata(busno); /* Bus A */ | 29 | pci_scan_bus_with_sysdata(busno); /* Bus A */ |
| 29 | if (suba < subb) | 30 | if (suba < subb) |
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index a09505806b82..5807d1bc73f7 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
| @@ -128,10 +128,8 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list) | |||
| 128 | pr = pci_find_parent_resource(dev, r); | 128 | pr = pci_find_parent_resource(dev, r); |
| 129 | if (!r->start || !pr || | 129 | if (!r->start || !pr || |
| 130 | request_resource(pr, r) < 0) { | 130 | request_resource(pr, r) < 0) { |
| 131 | printk(KERN_ERR "PCI: Cannot allocate " | 131 | dev_err(&dev->dev, "BAR %d: can't " |
| 132 | "resource region %d " | 132 | "allocate resource\n", idx); |
| 133 | "of bridge %s\n", | ||
| 134 | idx, pci_name(dev)); | ||
| 135 | /* | 133 | /* |
| 136 | * Something is wrong with the region. | 134 | * Something is wrong with the region. |
| 137 | * Invalidate the resource to prevent | 135 | * Invalidate the resource to prevent |
| @@ -166,15 +164,15 @@ static void __init pcibios_allocate_resources(int pass) | |||
| 166 | else | 164 | else |
| 167 | disabled = !(command & PCI_COMMAND_MEMORY); | 165 | disabled = !(command & PCI_COMMAND_MEMORY); |
| 168 | if (pass == disabled) { | 166 | if (pass == disabled) { |
| 169 | DBG("PCI: Resource %08lx-%08lx " | 167 | dev_dbg(&dev->dev, "resource %#08llx-%#08llx " |
| 170 | "(f=%lx, d=%d, p=%d)\n", | 168 | "(f=%lx, d=%d, p=%d)\n", |
| 171 | r->start, r->end, r->flags, disabled, pass); | 169 | (unsigned long long) r->start, |
| 170 | (unsigned long long) r->end, | ||
| 171 | r->flags, disabled, pass); | ||
| 172 | pr = pci_find_parent_resource(dev, r); | 172 | pr = pci_find_parent_resource(dev, r); |
| 173 | if (!pr || request_resource(pr, r) < 0) { | 173 | if (!pr || request_resource(pr, r) < 0) { |
| 174 | printk(KERN_ERR "PCI: Cannot allocate " | 174 | dev_err(&dev->dev, "BAR %d: can't " |
| 175 | "resource region %d " | 175 | "allocate resource\n", idx); |
| 176 | "of device %s\n", | ||
| 177 | idx, pci_name(dev)); | ||
| 178 | /* We'll assign a new address later */ | 176 | /* We'll assign a new address later */ |
| 179 | r->end -= r->start; | 177 | r->end -= r->start; |
| 180 | r->start = 0; | 178 | r->start = 0; |
| @@ -187,8 +185,7 @@ static void __init pcibios_allocate_resources(int pass) | |||
| 187 | /* Turn the ROM off, leave the resource region, | 185 | /* Turn the ROM off, leave the resource region, |
| 188 | * but keep it unregistered. */ | 186 | * but keep it unregistered. */ |
| 189 | u32 reg; | 187 | u32 reg; |
| 190 | DBG("PCI: Switching off ROM of %s\n", | 188 | dev_dbg(&dev->dev, "disabling ROM\n"); |
| 191 | pci_name(dev)); | ||
| 192 | r->flags &= ~IORESOURCE_ROM_ENABLE; | 189 | r->flags &= ~IORESOURCE_ROM_ENABLE; |
| 193 | pci_read_config_dword(dev, | 190 | pci_read_config_dword(dev, |
| 194 | dev->rom_base_reg, ®); | 191 | dev->rom_base_reg, ®); |
| @@ -257,8 +254,7 @@ void pcibios_set_master(struct pci_dev *dev) | |||
| 257 | lat = pcibios_max_latency; | 254 | lat = pcibios_max_latency; |
| 258 | else | 255 | else |
| 259 | return; | 256 | return; |
| 260 | printk(KERN_DEBUG "PCI: Setting latency timer of device %s to %d\n", | 257 | dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat); |
| 261 | pci_name(dev), lat); | ||
| 262 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); | 258 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); |
| 263 | } | 259 | } |
| 264 | 260 | ||
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 6a06a2eb0597..fec0123b33a9 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c | |||
| @@ -436,7 +436,7 @@ static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq) | |||
| 436 | { | 436 | { |
| 437 | WARN_ON_ONCE(pirq >= 9); | 437 | WARN_ON_ONCE(pirq >= 9); |
| 438 | if (pirq > 8) { | 438 | if (pirq > 8) { |
| 439 | printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq); | 439 | dev_info(&dev->dev, "VLSI router PIRQ escape (%d)\n", pirq); |
| 440 | return 0; | 440 | return 0; |
| 441 | } | 441 | } |
| 442 | return read_config_nybble(router, 0x74, pirq-1); | 442 | return read_config_nybble(router, 0x74, pirq-1); |
| @@ -446,7 +446,7 @@ static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq, | |||
| 446 | { | 446 | { |
| 447 | WARN_ON_ONCE(pirq >= 9); | 447 | WARN_ON_ONCE(pirq >= 9); |
| 448 | if (pirq > 8) { | 448 | if (pirq > 8) { |
| 449 | printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq); | 449 | dev_info(&dev->dev, "VLSI router PIRQ escape (%d)\n", pirq); |
| 450 | return 0; | 450 | return 0; |
| 451 | } | 451 | } |
| 452 | write_config_nybble(router, 0x74, pirq-1, irq); | 452 | write_config_nybble(router, 0x74, pirq-1, irq); |
| @@ -492,15 +492,17 @@ static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq | |||
| 492 | irq = 0; | 492 | irq = 0; |
| 493 | if (pirq <= 4) | 493 | if (pirq <= 4) |
| 494 | irq = read_config_nybble(router, 0x56, pirq - 1); | 494 | irq = read_config_nybble(router, 0x56, pirq - 1); |
| 495 | printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d get irq : %2d\n", | 495 | dev_info(&dev->dev, |
| 496 | dev->vendor, dev->device, pirq, irq); | 496 | "AMD756: dev [%04x/%04x], router PIRQ %d get IRQ %d\n", |
| 497 | dev->vendor, dev->device, pirq, irq); | ||
| 497 | return irq; | 498 | return irq; |
| 498 | } | 499 | } |
| 499 | 500 | ||
| 500 | static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) | 501 | static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) |
| 501 | { | 502 | { |
| 502 | printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d SET irq : %2d\n", | 503 | dev_info(&dev->dev, |
| 503 | dev->vendor, dev->device, pirq, irq); | 504 | "AMD756: dev [%04x/%04x], router PIRQ %d set IRQ %d\n", |
| 505 | dev->vendor, dev->device, pirq, irq); | ||
| 504 | if (pirq <= 4) | 506 | if (pirq <= 4) |
| 505 | write_config_nybble(router, 0x56, pirq - 1, irq); | 507 | write_config_nybble(router, 0x56, pirq - 1, irq); |
| 506 | return 1; | 508 | return 1; |
| @@ -730,7 +732,6 @@ static __init int ali_router_probe(struct irq_router *r, struct pci_dev *router, | |||
| 730 | switch (device) { | 732 | switch (device) { |
| 731 | case PCI_DEVICE_ID_AL_M1533: | 733 | case PCI_DEVICE_ID_AL_M1533: |
| 732 | case PCI_DEVICE_ID_AL_M1563: | 734 | case PCI_DEVICE_ID_AL_M1563: |
| 733 | printk(KERN_DEBUG "PCI: Using ALI IRQ Router\n"); | ||
| 734 | r->name = "ALI"; | 735 | r->name = "ALI"; |
| 735 | r->get = pirq_ali_get; | 736 | r->get = pirq_ali_get; |
| 736 | r->set = pirq_ali_set; | 737 | r->set = pirq_ali_set; |
| @@ -840,11 +841,9 @@ static void __init pirq_find_router(struct irq_router *r) | |||
| 840 | h->probe(r, pirq_router_dev, pirq_router_dev->device)) | 841 | h->probe(r, pirq_router_dev, pirq_router_dev->device)) |
| 841 | break; | 842 | break; |
| 842 | } | 843 | } |
| 843 | printk(KERN_INFO "PCI: Using IRQ router %s [%04x/%04x] at %s\n", | 844 | dev_info(&pirq_router_dev->dev, "%s IRQ router [%04x/%04x]\n", |
| 844 | pirq_router.name, | 845 | pirq_router.name, |
| 845 | pirq_router_dev->vendor, | 846 | pirq_router_dev->vendor, pirq_router_dev->device); |
| 846 | pirq_router_dev->device, | ||
| 847 | pci_name(pirq_router_dev)); | ||
| 848 | 847 | ||
| 849 | /* The device remains referenced for the kernel lifetime */ | 848 | /* The device remains referenced for the kernel lifetime */ |
| 850 | } | 849 | } |
| @@ -877,7 +876,7 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) | |||
| 877 | /* Find IRQ pin */ | 876 | /* Find IRQ pin */ |
| 878 | pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); | 877 | pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); |
| 879 | if (!pin) { | 878 | if (!pin) { |
| 880 | DBG(KERN_DEBUG " -> no interrupt pin\n"); | 879 | dev_dbg(&dev->dev, "no interrupt pin\n"); |
| 881 | return 0; | 880 | return 0; |
| 882 | } | 881 | } |
| 883 | pin = pin - 1; | 882 | pin = pin - 1; |
| @@ -887,20 +886,20 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) | |||
| 887 | if (!pirq_table) | 886 | if (!pirq_table) |
| 888 | return 0; | 887 | return 0; |
| 889 | 888 | ||
| 890 | DBG(KERN_DEBUG "IRQ for %s[%c]", pci_name(dev), 'A' + pin); | ||
| 891 | info = pirq_get_info(dev); | 889 | info = pirq_get_info(dev); |
| 892 | if (!info) { | 890 | if (!info) { |
| 893 | DBG(" -> not found in routing table\n" KERN_DEBUG); | 891 | dev_dbg(&dev->dev, "PCI INT %c not found in routing table\n", |
| 892 | 'A' + pin); | ||
| 894 | return 0; | 893 | return 0; |
| 895 | } | 894 | } |
| 896 | pirq = info->irq[pin].link; | 895 | pirq = info->irq[pin].link; |
| 897 | mask = info->irq[pin].bitmap; | 896 | mask = info->irq[pin].bitmap; |
| 898 | if (!pirq) { | 897 | if (!pirq) { |
| 899 | DBG(" -> not routed\n" KERN_DEBUG); | 898 | dev_dbg(&dev->dev, "PCI INT %c not routed\n", 'A' + pin); |
| 900 | return 0; | 899 | return 0; |
| 901 | } | 900 | } |
| 902 | DBG(" -> PIRQ %02x, mask %04x, excl %04x", pirq, mask, | 901 | dev_dbg(&dev->dev, "PCI INT %c -> PIRQ %02x, mask %04x, excl %04x", |
| 903 | pirq_table->exclusive_irqs); | 902 | 'A' + pin, pirq, mask, pirq_table->exclusive_irqs); |
| 904 | mask &= pcibios_irq_mask; | 903 | mask &= pcibios_irq_mask; |
| 905 | 904 | ||
| 906 | /* Work around broken HP Pavilion Notebooks which assign USB to | 905 | /* Work around broken HP Pavilion Notebooks which assign USB to |
| @@ -930,10 +929,8 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) | |||
| 930 | if (pci_probe & PCI_USE_PIRQ_MASK) | 929 | if (pci_probe & PCI_USE_PIRQ_MASK) |
| 931 | newirq = 0; | 930 | newirq = 0; |
| 932 | else | 931 | else |
| 933 | printk("\n" KERN_WARNING | 932 | dev_warn(&dev->dev, "IRQ %d doesn't match PIRQ mask " |
| 934 | "PCI: IRQ %i for device %s doesn't match PIRQ mask - try pci=usepirqmask\n" | 933 | "%#x; try pci=usepirqmask\n", newirq, mask); |
| 935 | KERN_DEBUG, newirq, | ||
| 936 | pci_name(dev)); | ||
| 937 | } | 934 | } |
| 938 | if (!newirq && assign) { | 935 | if (!newirq && assign) { |
| 939 | for (i = 0; i < 16; i++) { | 936 | for (i = 0; i < 16; i++) { |
| @@ -944,39 +941,35 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) | |||
| 944 | newirq = i; | 941 | newirq = i; |
| 945 | } | 942 | } |
| 946 | } | 943 | } |
| 947 | DBG(" -> newirq=%d", newirq); | 944 | dev_dbg(&dev->dev, "PCI INT %c -> newirq %d", 'A' + pin, newirq); |
| 948 | 945 | ||
| 949 | /* Check if it is hardcoded */ | 946 | /* Check if it is hardcoded */ |
| 950 | if ((pirq & 0xf0) == 0xf0) { | 947 | if ((pirq & 0xf0) == 0xf0) { |
| 951 | irq = pirq & 0xf; | 948 | irq = pirq & 0xf; |
| 952 | DBG(" -> hardcoded IRQ %d\n", irq); | 949 | msg = "hardcoded"; |
| 953 | msg = "Hardcoded"; | ||
| 954 | } else if (r->get && (irq = r->get(pirq_router_dev, dev, pirq)) && \ | 950 | } else if (r->get && (irq = r->get(pirq_router_dev, dev, pirq)) && \ |
| 955 | ((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask))) { | 951 | ((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask))) { |
| 956 | DBG(" -> got IRQ %d\n", irq); | 952 | msg = "found"; |
| 957 | msg = "Found"; | ||
| 958 | eisa_set_level_irq(irq); | 953 | eisa_set_level_irq(irq); |
| 959 | } else if (newirq && r->set && | 954 | } else if (newirq && r->set && |
| 960 | (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) { | 955 | (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) { |
| 961 | DBG(" -> assigning IRQ %d", newirq); | ||
| 962 | if (r->set(pirq_router_dev, dev, pirq, newirq)) { | 956 | if (r->set(pirq_router_dev, dev, pirq, newirq)) { |
| 963 | eisa_set_level_irq(newirq); | 957 | eisa_set_level_irq(newirq); |
| 964 | DBG(" ... OK\n"); | 958 | msg = "assigned"; |
| 965 | msg = "Assigned"; | ||
| 966 | irq = newirq; | 959 | irq = newirq; |
| 967 | } | 960 | } |
| 968 | } | 961 | } |
| 969 | 962 | ||
| 970 | if (!irq) { | 963 | if (!irq) { |
| 971 | DBG(" ... failed\n"); | ||
| 972 | if (newirq && mask == (1 << newirq)) { | 964 | if (newirq && mask == (1 << newirq)) { |
| 973 | msg = "Guessed"; | 965 | msg = "guessed"; |
| 974 | irq = newirq; | 966 | irq = newirq; |
| 975 | } else | 967 | } else { |
| 968 | dev_dbg(&dev->dev, "can't route interrupt\n"); | ||
| 976 | return 0; | 969 | return 0; |
| 970 | } | ||
| 977 | } | 971 | } |
| 978 | printk(KERN_INFO "PCI: %s IRQ %d for device %s\n", msg, irq, | 972 | dev_info(&dev->dev, "%s PCI INT %c -> IRQ %d\n", msg, 'A' + pin, irq); |
| 979 | pci_name(dev)); | ||
| 980 | 973 | ||
| 981 | /* Update IRQ for all devices with the same pirq value */ | 974 | /* Update IRQ for all devices with the same pirq value */ |
| 982 | while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) { | 975 | while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) { |
| @@ -996,17 +989,17 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) | |||
| 996 | (!(pci_probe & PCI_USE_PIRQ_MASK) || \ | 989 | (!(pci_probe & PCI_USE_PIRQ_MASK) || \ |
| 997 | ((1 << dev2->irq) & mask))) { | 990 | ((1 << dev2->irq) & mask))) { |
| 998 | #ifndef CONFIG_PCI_MSI | 991 | #ifndef CONFIG_PCI_MSI |
| 999 | printk(KERN_INFO "IRQ routing conflict for %s, have irq %d, want irq %d\n", | 992 | dev_info(&dev2->dev, "IRQ routing conflict: " |
| 1000 | pci_name(dev2), dev2->irq, irq); | 993 | "have IRQ %d, want IRQ %d\n", |
| 994 | dev2->irq, irq); | ||
| 1001 | #endif | 995 | #endif |
| 1002 | continue; | 996 | continue; |
| 1003 | } | 997 | } |
| 1004 | dev2->irq = irq; | 998 | dev2->irq = irq; |
| 1005 | pirq_penalty[irq]++; | 999 | pirq_penalty[irq]++; |
| 1006 | if (dev != dev2) | 1000 | if (dev != dev2) |
| 1007 | printk(KERN_INFO | 1001 | dev_info(&dev->dev, "sharing IRQ %d with %s\n", |
| 1008 | "PCI: Sharing IRQ %d with %s\n", | 1002 | irq, pci_name(dev2)); |
| 1009 | irq, pci_name(dev2)); | ||
| 1010 | } | 1003 | } |
| 1011 | } | 1004 | } |
| 1012 | return 1; | 1005 | return 1; |
| @@ -1025,8 +1018,7 @@ static void __init pcibios_fixup_irqs(void) | |||
| 1025 | * already in use. | 1018 | * already in use. |
| 1026 | */ | 1019 | */ |
| 1027 | if (dev->irq >= 16) { | 1020 | if (dev->irq >= 16) { |
| 1028 | DBG(KERN_DEBUG "%s: ignoring bogus IRQ %d\n", | 1021 | dev_dbg(&dev->dev, "ignoring bogus IRQ %d\n", dev->irq); |
| 1029 | pci_name(dev), dev->irq); | ||
| 1030 | dev->irq = 0; | 1022 | dev->irq = 0; |
| 1031 | } | 1023 | } |
| 1032 | /* | 1024 | /* |
| @@ -1070,12 +1062,12 @@ static void __init pcibios_fixup_irqs(void) | |||
| 1070 | irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, | 1062 | irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, |
| 1071 | PCI_SLOT(bridge->devfn), pin); | 1063 | PCI_SLOT(bridge->devfn), pin); |
| 1072 | if (irq >= 0) | 1064 | if (irq >= 0) |
| 1073 | printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n", | 1065 | dev_warn(&dev->dev, "using bridge %s INT %c to get IRQ %d\n", |
| 1074 | pci_name(bridge), 'A' + pin, irq); | 1066 | pci_name(bridge), |
| 1067 | 'A' + pin, irq); | ||
| 1075 | } | 1068 | } |
| 1076 | if (irq >= 0) { | 1069 | if (irq >= 0) { |
| 1077 | printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n", | 1070 | dev_info(&dev->dev, "PCI->APIC IRQ transform: INT %c -> IRQ %d\n", 'A' + pin, irq); |
| 1078 | pci_name(dev), 'A' + pin, irq); | ||
| 1079 | dev->irq = irq; | 1071 | dev->irq = irq; |
| 1080 | } | 1072 | } |
| 1081 | } | 1073 | } |
| @@ -1231,25 +1223,24 @@ static int pirq_enable_irq(struct pci_dev *dev) | |||
| 1231 | irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, | 1223 | irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, |
| 1232 | PCI_SLOT(bridge->devfn), pin); | 1224 | PCI_SLOT(bridge->devfn), pin); |
| 1233 | if (irq >= 0) | 1225 | if (irq >= 0) |
| 1234 | printk(KERN_WARNING | 1226 | dev_warn(&dev->dev, "using bridge %s " |
| 1235 | "PCI: using PPB %s[%c] to get irq %d\n", | 1227 | "INT %c to get IRQ %d\n", |
| 1236 | pci_name(bridge), | 1228 | pci_name(bridge), 'A' + pin, |
| 1237 | 'A' + pin, irq); | 1229 | irq); |
| 1238 | dev = bridge; | 1230 | dev = bridge; |
| 1239 | } | 1231 | } |
| 1240 | dev = temp_dev; | 1232 | dev = temp_dev; |
| 1241 | if (irq >= 0) { | 1233 | if (irq >= 0) { |
| 1242 | printk(KERN_INFO | 1234 | dev_info(&dev->dev, "PCI->APIC IRQ transform: " |
| 1243 | "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n", | 1235 | "INT %c -> IRQ %d\n", 'A' + pin, irq); |
| 1244 | pci_name(dev), 'A' + pin, irq); | ||
| 1245 | dev->irq = irq; | 1236 | dev->irq = irq; |
| 1246 | return 0; | 1237 | return 0; |
| 1247 | } else | 1238 | } else |
| 1248 | msg = " Probably buggy MP table."; | 1239 | msg = "; probably buggy MP table"; |
| 1249 | } else if (pci_probe & PCI_BIOS_IRQ_SCAN) | 1240 | } else if (pci_probe & PCI_BIOS_IRQ_SCAN) |
| 1250 | msg = ""; | 1241 | msg = ""; |
| 1251 | else | 1242 | else |
| 1252 | msg = " Please try using pci=biosirq."; | 1243 | msg = "; please try using pci=biosirq"; |
| 1253 | 1244 | ||
| 1254 | /* | 1245 | /* |
| 1255 | * With IDE legacy devices the IRQ lookup failure is not | 1246 | * With IDE legacy devices the IRQ lookup failure is not |
| @@ -1259,9 +1250,8 @@ static int pirq_enable_irq(struct pci_dev *dev) | |||
| 1259 | !(dev->class & 0x5)) | 1250 | !(dev->class & 0x5)) |
| 1260 | return 0; | 1251 | return 0; |
| 1261 | 1252 | ||
| 1262 | printk(KERN_WARNING | 1253 | dev_warn(&dev->dev, "can't find IRQ for PCI INT %c%s\n", |
| 1263 | "PCI: No IRQ known for interrupt pin %c of device %s.%s\n", | 1254 | 'A' + pin, msg); |
| 1264 | 'A' + pin, pci_name(dev), msg); | ||
| 1265 | } | 1255 | } |
| 1266 | return 0; | 1256 | return 0; |
| 1267 | } | 1257 | } |
diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c index f4b16dc11dad..1177845d3186 100644 --- a/arch/x86/pci/numaq_32.c +++ b/arch/x86/pci/numaq_32.c | |||
| @@ -131,13 +131,14 @@ static void __devinit pci_fixup_i450nx(struct pci_dev *d) | |||
| 131 | u8 busno, suba, subb; | 131 | u8 busno, suba, subb; |
| 132 | int quad = BUS2QUAD(d->bus->number); | 132 | int quad = BUS2QUAD(d->bus->number); |
| 133 | 133 | ||
| 134 | printk("PCI: Searching for i450NX host bridges on %s\n", pci_name(d)); | 134 | dev_info(&d->dev, "searching for i450NX host bridges\n"); |
| 135 | reg = 0xd0; | 135 | reg = 0xd0; |
| 136 | for(pxb=0; pxb<2; pxb++) { | 136 | for(pxb=0; pxb<2; pxb++) { |
| 137 | pci_read_config_byte(d, reg++, &busno); | 137 | pci_read_config_byte(d, reg++, &busno); |
| 138 | pci_read_config_byte(d, reg++, &suba); | 138 | pci_read_config_byte(d, reg++, &suba); |
| 139 | pci_read_config_byte(d, reg++, &subb); | 139 | pci_read_config_byte(d, reg++, &subb); |
| 140 | DBG("i450NX PXB %d: %02x/%02x/%02x\n", pxb, busno, suba, subb); | 140 | dev_dbg(&d->dev, "i450NX PXB %d: %02x/%02x/%02x\n", |
| 141 | pxb, busno, suba, subb); | ||
| 141 | if (busno) { | 142 | if (busno) { |
| 142 | /* Bus A */ | 143 | /* Bus A */ |
| 143 | pci_scan_bus_with_sysdata(QUADLOCAL2BUS(quad, busno)); | 144 | pci_scan_bus_with_sysdata(QUADLOCAL2BUS(quad, busno)); |
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c index dd376f7ad090..d5b4ef898879 100644 --- a/drivers/acpi/pci_slot.c +++ b/drivers/acpi/pci_slot.c | |||
| @@ -76,9 +76,9 @@ static struct acpi_pci_driver acpi_pci_slot_driver = { | |||
| 76 | }; | 76 | }; |
| 77 | 77 | ||
| 78 | static int | 78 | static int |
| 79 | check_slot(acpi_handle handle, int *device, unsigned long *sun) | 79 | check_slot(acpi_handle handle, unsigned long *sun) |
| 80 | { | 80 | { |
| 81 | int retval = 0; | 81 | int device = -1; |
| 82 | unsigned long adr, sta; | 82 | unsigned long adr, sta; |
| 83 | acpi_status status; | 83 | acpi_status status; |
| 84 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 84 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
| @@ -89,32 +89,27 @@ check_slot(acpi_handle handle, int *device, unsigned long *sun) | |||
| 89 | if (check_sta_before_sun) { | 89 | if (check_sta_before_sun) { |
| 90 | /* If SxFy doesn't have _STA, we just assume it's there */ | 90 | /* If SxFy doesn't have _STA, we just assume it's there */ |
| 91 | status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); | 91 | status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); |
| 92 | if (ACPI_SUCCESS(status) && !(sta & ACPI_STA_DEVICE_PRESENT)) { | 92 | if (ACPI_SUCCESS(status) && !(sta & ACPI_STA_DEVICE_PRESENT)) |
| 93 | retval = -1; | ||
| 94 | goto out; | 93 | goto out; |
| 95 | } | ||
| 96 | } | 94 | } |
| 97 | 95 | ||
| 98 | status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); | 96 | status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); |
| 99 | if (ACPI_FAILURE(status)) { | 97 | if (ACPI_FAILURE(status)) { |
| 100 | dbg("_ADR returned %d on %s\n", status, (char *)buffer.pointer); | 98 | dbg("_ADR returned %d on %s\n", status, (char *)buffer.pointer); |
| 101 | retval = -1; | ||
| 102 | goto out; | 99 | goto out; |
| 103 | } | 100 | } |
| 104 | 101 | ||
| 105 | *device = (adr >> 16) & 0xffff; | ||
| 106 | |||
| 107 | /* No _SUN == not a slot == bail */ | 102 | /* No _SUN == not a slot == bail */ |
| 108 | status = acpi_evaluate_integer(handle, "_SUN", NULL, sun); | 103 | status = acpi_evaluate_integer(handle, "_SUN", NULL, sun); |
| 109 | if (ACPI_FAILURE(status)) { | 104 | if (ACPI_FAILURE(status)) { |
| 110 | dbg("_SUN returned %d on %s\n", status, (char *)buffer.pointer); | 105 | dbg("_SUN returned %d on %s\n", status, (char *)buffer.pointer); |
| 111 | retval = -1; | ||
| 112 | goto out; | 106 | goto out; |
| 113 | } | 107 | } |
| 114 | 108 | ||
| 109 | device = (adr >> 16) & 0xffff; | ||
| 115 | out: | 110 | out: |
| 116 | kfree(buffer.pointer); | 111 | kfree(buffer.pointer); |
| 117 | return retval; | 112 | return device; |
| 118 | } | 113 | } |
| 119 | 114 | ||
| 120 | struct callback_args { | 115 | struct callback_args { |
| @@ -144,7 +139,8 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
| 144 | struct callback_args *parent_context = context; | 139 | struct callback_args *parent_context = context; |
| 145 | struct pci_bus *pci_bus = parent_context->pci_bus; | 140 | struct pci_bus *pci_bus = parent_context->pci_bus; |
| 146 | 141 | ||
| 147 | if (check_slot(handle, &device, &sun)) | 142 | device = check_slot(handle, &sun); |
| 143 | if (device < 0) | ||
| 148 | return AE_OK; | 144 | return AE_OK; |
| 149 | 145 | ||
| 150 | slot = kmalloc(sizeof(*slot), GFP_KERNEL); | 146 | slot = kmalloc(sizeof(*slot), GFP_KERNEL); |
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 1323a43285d7..ad27e9e225a6 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
| @@ -1103,7 +1103,7 @@ static inline void dbg_ctrl(struct controller *ctrl) | |||
| 1103 | dbg(" Power Indicator : %3s\n", PWR_LED(ctrl) ? "yes" : "no"); | 1103 | dbg(" Power Indicator : %3s\n", PWR_LED(ctrl) ? "yes" : "no"); |
| 1104 | dbg(" Hot-Plug Surprise : %3s\n", HP_SUPR_RM(ctrl) ? "yes" : "no"); | 1104 | dbg(" Hot-Plug Surprise : %3s\n", HP_SUPR_RM(ctrl) ? "yes" : "no"); |
| 1105 | dbg(" EMI Present : %3s\n", EMI(ctrl) ? "yes" : "no"); | 1105 | dbg(" EMI Present : %3s\n", EMI(ctrl) ? "yes" : "no"); |
| 1106 | dbg(" Comamnd Completed : %3s\n", NO_CMD_CMPL(ctrl)? "no" : "yes"); | 1106 | dbg(" Command Completed : %3s\n", NO_CMD_CMPL(ctrl)? "no" : "yes"); |
| 1107 | pciehp_readw(ctrl, SLOTSTATUS, ®16); | 1107 | pciehp_readw(ctrl, SLOTSTATUS, ®16); |
| 1108 | dbg("Slot Status : 0x%04x\n", reg16); | 1108 | dbg("Slot Status : 0x%04x\n", reg16); |
| 1109 | pciehp_readw(ctrl, SLOTCTRL, ®16); | 1109 | pciehp_readw(ctrl, SLOTCTRL, ®16); |
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 15af618d36e2..18354817173c 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
| @@ -126,7 +126,16 @@ static void msix_flush_writes(unsigned int irq) | |||
| 126 | } | 126 | } |
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | static void msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag) | 129 | /* |
| 130 | * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to | ||
| 131 | * mask all MSI interrupts by clearing the MSI enable bit does not work | ||
| 132 | * reliably as devices without an INTx disable bit will then generate a | ||
| 133 | * level IRQ which will never be cleared. | ||
| 134 | * | ||
| 135 | * Returns 1 if it succeeded in masking the interrupt and 0 if the device | ||
| 136 | * doesn't support MSI masking. | ||
| 137 | */ | ||
| 138 | static int msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag) | ||
| 130 | { | 139 | { |
| 131 | struct msi_desc *entry; | 140 | struct msi_desc *entry; |
| 132 | 141 | ||
| @@ -144,8 +153,7 @@ static void msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag) | |||
| 144 | mask_bits |= flag & mask; | 153 | mask_bits |= flag & mask; |
| 145 | pci_write_config_dword(entry->dev, pos, mask_bits); | 154 | pci_write_config_dword(entry->dev, pos, mask_bits); |
| 146 | } else { | 155 | } else { |
| 147 | __msi_set_enable(entry->dev, entry->msi_attrib.pos, | 156 | return 0; |
| 148 | !flag); | ||
| 149 | } | 157 | } |
| 150 | break; | 158 | break; |
| 151 | case PCI_CAP_ID_MSIX: | 159 | case PCI_CAP_ID_MSIX: |
| @@ -161,6 +169,7 @@ static void msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag) | |||
| 161 | break; | 169 | break; |
| 162 | } | 170 | } |
| 163 | entry->msi_attrib.masked = !!flag; | 171 | entry->msi_attrib.masked = !!flag; |
| 172 | return 1; | ||
| 164 | } | 173 | } |
| 165 | 174 | ||
| 166 | void read_msi_msg(unsigned int irq, struct msi_msg *msg) | 175 | void read_msi_msg(unsigned int irq, struct msi_msg *msg) |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 7764768b6a0e..89a2f0fa10f9 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
| 12 | #include <linux/pci.h> | 12 | #include <linux/pci.h> |
| 13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
| 14 | #include <linux/pci-aspm.h> | ||
| 14 | #include <acpi/acpi.h> | 15 | #include <acpi/acpi.h> |
| 15 | #include <acpi/acnamesp.h> | 16 | #include <acpi/acnamesp.h> |
| 16 | #include <acpi/acresrc.h> | 17 | #include <acpi/acresrc.h> |
| @@ -372,6 +373,12 @@ static int __init acpi_pci_init(void) | |||
| 372 | printk(KERN_INFO"ACPI FADT declares the system doesn't support MSI, so disable it\n"); | 373 | printk(KERN_INFO"ACPI FADT declares the system doesn't support MSI, so disable it\n"); |
| 373 | pci_no_msi(); | 374 | pci_no_msi(); |
| 374 | } | 375 | } |
| 376 | |||
| 377 | if (acpi_gbl_FADT.boot_flags & BAF_PCIE_ASPM_CONTROL) { | ||
| 378 | printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n"); | ||
| 379 | pcie_no_aspm(); | ||
| 380 | } | ||
| 381 | |||
| 375 | ret = register_acpi_bus_type(&acpi_pci_bus); | 382 | ret = register_acpi_bus_type(&acpi_pci_bus); |
| 376 | if (ret) | 383 | if (ret) |
| 377 | return 0; | 384 | return 0; |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index e9c356236d27..0a3d856833fc 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
| @@ -572,6 +572,10 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
| 572 | if (!ret) | 572 | if (!ret) |
| 573 | pci_update_current_state(dev); | 573 | pci_update_current_state(dev); |
| 574 | } | 574 | } |
| 575 | /* This device is quirked not to be put into D3, so | ||
| 576 | don't put it in D3 */ | ||
| 577 | if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) | ||
| 578 | return 0; | ||
| 575 | 579 | ||
| 576 | error = pci_raw_set_power_state(dev, state); | 580 | error = pci_raw_set_power_state(dev, state); |
| 577 | 581 | ||
| @@ -1123,6 +1127,12 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) | |||
| 1123 | } | 1127 | } |
| 1124 | 1128 | ||
| 1125 | /** | 1129 | /** |
| 1130 | * pci_target_state - find an appropriate low power state for a given PCI dev | ||
| 1131 | * @dev: PCI device | ||
| 1132 | * | ||
| 1133 | * Use underlying platform code to find a supported low power state for @dev. | ||
| 1134 | * If the platform can't manage @dev, return the deepest state from which it | ||
| 1135 | * can generate wake events, based on any available PME info. | ||
| 1126 | */ | 1136 | */ |
| 1127 | pci_power_t pci_target_state(struct pci_dev *dev) | 1137 | pci_power_t pci_target_state(struct pci_dev *dev) |
| 1128 | { | 1138 | { |
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index f82495583e63..9a7c9e1408a4 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c | |||
| @@ -55,7 +55,7 @@ struct pcie_link_state { | |||
| 55 | struct endpoint_state endpoints[8]; | 55 | struct endpoint_state endpoints[8]; |
| 56 | }; | 56 | }; |
| 57 | 57 | ||
| 58 | static int aspm_disabled; | 58 | static int aspm_disabled, aspm_force; |
| 59 | static DEFINE_MUTEX(aspm_lock); | 59 | static DEFINE_MUTEX(aspm_lock); |
| 60 | static LIST_HEAD(link_list); | 60 | static LIST_HEAD(link_list); |
| 61 | 61 | ||
| @@ -510,6 +510,7 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev) | |||
| 510 | { | 510 | { |
| 511 | struct pci_dev *child_dev; | 511 | struct pci_dev *child_dev; |
| 512 | int child_pos; | 512 | int child_pos; |
| 513 | u32 reg32; | ||
| 513 | 514 | ||
| 514 | /* | 515 | /* |
| 515 | * Some functions in a slot might not all be PCIE functions, very | 516 | * Some functions in a slot might not all be PCIE functions, very |
| @@ -519,6 +520,19 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev) | |||
| 519 | child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); | 520 | child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); |
| 520 | if (!child_pos) | 521 | if (!child_pos) |
| 521 | return -EINVAL; | 522 | return -EINVAL; |
| 523 | |||
| 524 | /* | ||
| 525 | * Disable ASPM for pre-1.1 PCIe device, we follow MS to use | ||
| 526 | * RBER bit to determine if a function is 1.1 version device | ||
| 527 | */ | ||
| 528 | pci_read_config_dword(child_dev, child_pos + PCI_EXP_DEVCAP, | ||
| 529 | ®32); | ||
| 530 | if (!(reg32 & PCI_EXP_DEVCAP_RBER && !aspm_force)) { | ||
| 531 | printk("Pre-1.1 PCIe device detected, " | ||
| 532 | "disable ASPM for %s. It can be enabled forcedly" | ||
| 533 | " with 'pcie_aspm=force'\n", pci_name(pdev)); | ||
| 534 | return -EINVAL; | ||
| 535 | } | ||
| 522 | } | 536 | } |
| 523 | return 0; | 537 | return 0; |
| 524 | } | 538 | } |
| @@ -802,11 +816,23 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) | |||
| 802 | 816 | ||
| 803 | static int __init pcie_aspm_disable(char *str) | 817 | static int __init pcie_aspm_disable(char *str) |
| 804 | { | 818 | { |
| 805 | aspm_disabled = 1; | 819 | if (!strcmp(str, "off")) { |
| 820 | aspm_disabled = 1; | ||
| 821 | printk(KERN_INFO "PCIe ASPM is disabled\n"); | ||
| 822 | } else if (!strcmp(str, "force")) { | ||
| 823 | aspm_force = 1; | ||
| 824 | printk(KERN_INFO "PCIe ASPM is forcedly enabled\n"); | ||
| 825 | } | ||
| 806 | return 1; | 826 | return 1; |
| 807 | } | 827 | } |
| 808 | 828 | ||
| 809 | __setup("pcie_noaspm", pcie_aspm_disable); | 829 | __setup("pcie_aspm=", pcie_aspm_disable); |
| 830 | |||
| 831 | void pcie_no_aspm(void) | ||
| 832 | { | ||
| 833 | if (!aspm_force) | ||
| 834 | aspm_disabled = 1; | ||
| 835 | } | ||
| 810 | 836 | ||
| 811 | #ifdef CONFIG_ACPI | 837 | #ifdef CONFIG_ACPI |
| 812 | #include <acpi/acpi_bus.h> | 838 | #include <acpi/acpi_bus.h> |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index b1724cf31b66..7098dfb07449 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
| @@ -163,12 +163,9 @@ static inline unsigned int pci_calc_resource_flags(unsigned int flags) | |||
| 163 | return IORESOURCE_MEM; | 163 | return IORESOURCE_MEM; |
| 164 | } | 164 | } |
| 165 | 165 | ||
| 166 | /* | 166 | static u64 pci_size(u64 base, u64 maxbase, u64 mask) |
| 167 | * Find the extent of a PCI decode.. | ||
| 168 | */ | ||
| 169 | static u32 pci_size(u32 base, u32 maxbase, u32 mask) | ||
| 170 | { | 167 | { |
| 171 | u32 size = mask & maxbase; /* Find the significant bits */ | 168 | u64 size = mask & maxbase; /* Find the significant bits */ |
| 172 | if (!size) | 169 | if (!size) |
| 173 | return 0; | 170 | return 0; |
| 174 | 171 | ||
| @@ -184,135 +181,142 @@ static u32 pci_size(u32 base, u32 maxbase, u32 mask) | |||
| 184 | return size; | 181 | return size; |
| 185 | } | 182 | } |
| 186 | 183 | ||
| 187 | static u64 pci_size64(u64 base, u64 maxbase, u64 mask) | 184 | enum pci_bar_type { |
| 188 | { | 185 | pci_bar_unknown, /* Standard PCI BAR probe */ |
| 189 | u64 size = mask & maxbase; /* Find the significant bits */ | 186 | pci_bar_io, /* An io port BAR */ |
| 190 | if (!size) | 187 | pci_bar_mem32, /* A 32-bit memory BAR */ |
| 191 | return 0; | 188 | pci_bar_mem64, /* A 64-bit memory BAR */ |
| 189 | }; | ||
| 192 | 190 | ||
| 193 | /* Get the lowest of them to find the decode size, and | 191 | static inline enum pci_bar_type decode_bar(struct resource *res, u32 bar) |
| 194 | from that the extent. */ | 192 | { |
| 195 | size = (size & ~(size-1)) - 1; | 193 | if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { |
| 194 | res->flags = bar & ~PCI_BASE_ADDRESS_IO_MASK; | ||
| 195 | return pci_bar_io; | ||
| 196 | } | ||
| 196 | 197 | ||
| 197 | /* base == maxbase can be valid only if the BAR has | 198 | res->flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK; |
| 198 | already been programmed with all 1s. */ | ||
| 199 | if (base == maxbase && ((base | size) & mask) != mask) | ||
| 200 | return 0; | ||
| 201 | 199 | ||
| 202 | return size; | 200 | if (res->flags == PCI_BASE_ADDRESS_MEM_TYPE_64) |
| 201 | return pci_bar_mem64; | ||
| 202 | return pci_bar_mem32; | ||
| 203 | } | 203 | } |
| 204 | 204 | ||
| 205 | static inline int is_64bit_memory(u32 mask) | 205 | /* |
| 206 | * If the type is not unknown, we assume that the lowest bit is 'enable'. | ||
| 207 | * Returns 1 if the BAR was 64-bit and 0 if it was 32-bit. | ||
| 208 | */ | ||
| 209 | static int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | ||
| 210 | struct resource *res, unsigned int pos) | ||
| 206 | { | 211 | { |
| 207 | if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == | 212 | u32 l, sz, mask; |
| 208 | (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) | ||
| 209 | return 1; | ||
| 210 | return 0; | ||
| 211 | } | ||
| 212 | 213 | ||
| 213 | static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) | 214 | mask = type ? ~PCI_ROM_ADDRESS_ENABLE : ~0; |
| 214 | { | ||
| 215 | unsigned int pos, reg, next; | ||
| 216 | u32 l, sz; | ||
| 217 | struct resource *res; | ||
| 218 | 215 | ||
| 219 | for(pos=0; pos<howmany; pos = next) { | 216 | res->name = pci_name(dev); |
| 220 | u64 l64; | ||
| 221 | u64 sz64; | ||
| 222 | u32 raw_sz; | ||
| 223 | 217 | ||
| 224 | next = pos+1; | 218 | pci_read_config_dword(dev, pos, &l); |
| 225 | res = &dev->resource[pos]; | 219 | pci_write_config_dword(dev, pos, mask); |
| 226 | res->name = pci_name(dev); | 220 | pci_read_config_dword(dev, pos, &sz); |
| 227 | reg = PCI_BASE_ADDRESS_0 + (pos << 2); | 221 | pci_write_config_dword(dev, pos, l); |
| 228 | pci_read_config_dword(dev, reg, &l); | 222 | |
| 229 | pci_write_config_dword(dev, reg, ~0); | 223 | /* |
| 230 | pci_read_config_dword(dev, reg, &sz); | 224 | * All bits set in sz means the device isn't working properly. |
| 231 | pci_write_config_dword(dev, reg, l); | 225 | * If the BAR isn't implemented, all bits must be 0. If it's a |
| 232 | if (!sz || sz == 0xffffffff) | 226 | * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit |
| 233 | continue; | 227 | * 1 must be clear. |
| 234 | if (l == 0xffffffff) | 228 | */ |
| 235 | l = 0; | 229 | if (!sz || sz == 0xffffffff) |
| 236 | raw_sz = sz; | 230 | goto fail; |
| 237 | if ((l & PCI_BASE_ADDRESS_SPACE) == | 231 | |
| 238 | PCI_BASE_ADDRESS_SPACE_MEMORY) { | 232 | /* |
| 239 | sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK); | 233 | * I don't know how l can have all bits set. Copied from old code. |
| 240 | /* | 234 | * Maybe it fixes a bug on some ancient platform. |
| 241 | * For 64bit prefetchable memory sz could be 0, if the | 235 | */ |
| 242 | * real size is bigger than 4G, so we need to check | 236 | if (l == 0xffffffff) |
| 243 | * szhi for that. | 237 | l = 0; |
| 244 | */ | 238 | |
| 245 | if (!is_64bit_memory(l) && !sz) | 239 | if (type == pci_bar_unknown) { |
| 246 | continue; | 240 | type = decode_bar(res, l); |
| 247 | res->start = l & PCI_BASE_ADDRESS_MEM_MASK; | 241 | res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN; |
| 248 | res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; | 242 | if (type == pci_bar_io) { |
| 243 | l &= PCI_BASE_ADDRESS_IO_MASK; | ||
| 244 | mask = PCI_BASE_ADDRESS_IO_MASK & 0xffff; | ||
| 249 | } else { | 245 | } else { |
| 250 | sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); | 246 | l &= PCI_BASE_ADDRESS_MEM_MASK; |
| 251 | if (!sz) | 247 | mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; |
| 252 | continue; | ||
| 253 | res->start = l & PCI_BASE_ADDRESS_IO_MASK; | ||
| 254 | res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK; | ||
| 255 | } | 248 | } |
| 256 | res->end = res->start + (unsigned long) sz; | 249 | } else { |
| 257 | res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN; | 250 | res->flags |= (l & IORESOURCE_ROM_ENABLE); |
| 258 | if (is_64bit_memory(l)) { | 251 | l &= PCI_ROM_ADDRESS_MASK; |
| 259 | u32 szhi, lhi; | 252 | mask = (u32)PCI_ROM_ADDRESS_MASK; |
| 260 | 253 | } | |
| 261 | pci_read_config_dword(dev, reg+4, &lhi); | 254 | |
| 262 | pci_write_config_dword(dev, reg+4, ~0); | 255 | if (type == pci_bar_mem64) { |
| 263 | pci_read_config_dword(dev, reg+4, &szhi); | 256 | u64 l64 = l; |
| 264 | pci_write_config_dword(dev, reg+4, lhi); | 257 | u64 sz64 = sz; |
| 265 | sz64 = ((u64)szhi << 32) | raw_sz; | 258 | u64 mask64 = mask | (u64)~0 << 32; |
| 266 | l64 = ((u64)lhi << 32) | l; | 259 | |
| 267 | sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK); | 260 | pci_read_config_dword(dev, pos + 4, &l); |
| 268 | next++; | 261 | pci_write_config_dword(dev, pos + 4, ~0); |
| 269 | #if BITS_PER_LONG == 64 | 262 | pci_read_config_dword(dev, pos + 4, &sz); |
| 270 | if (!sz64) { | 263 | pci_write_config_dword(dev, pos + 4, l); |
| 271 | res->start = 0; | 264 | |
| 272 | res->end = 0; | 265 | l64 |= ((u64)l << 32); |
| 273 | res->flags = 0; | 266 | sz64 |= ((u64)sz << 32); |
| 274 | continue; | 267 | |
| 275 | } | 268 | sz64 = pci_size(l64, sz64, mask64); |
| 276 | res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK; | 269 | |
| 277 | res->end = res->start + sz64; | 270 | if (!sz64) |
| 278 | #else | 271 | goto fail; |
| 279 | if (sz64 > 0x100000000ULL) { | 272 | |
| 280 | dev_err(&dev->dev, "BAR %d: can't handle 64-bit" | 273 | if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) { |
| 281 | " BAR\n", pos); | 274 | dev_err(&dev->dev, "can't handle 64-bit BAR\n"); |
| 282 | res->start = 0; | 275 | goto fail; |
| 283 | res->flags = 0; | 276 | } else if ((sizeof(resource_size_t) < 8) && l) { |
| 284 | } else if (lhi) { | 277 | /* Address above 32-bit boundary; disable the BAR */ |
| 285 | /* 64-bit wide address, treat as disabled */ | 278 | pci_write_config_dword(dev, pos, 0); |
| 286 | pci_write_config_dword(dev, reg, | 279 | pci_write_config_dword(dev, pos + 4, 0); |
| 287 | l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK); | 280 | res->start = 0; |
| 288 | pci_write_config_dword(dev, reg+4, 0); | 281 | res->end = sz64; |
| 289 | res->start = 0; | 282 | } else { |
| 290 | res->end = sz; | 283 | res->start = l64; |
| 291 | } | 284 | res->end = l64 + sz64; |
| 292 | #endif | ||
| 293 | } | 285 | } |
| 286 | } else { | ||
| 287 | sz = pci_size(l, sz, mask); | ||
| 288 | |||
| 289 | if (!sz) | ||
| 290 | goto fail; | ||
| 291 | |||
| 292 | res->start = l; | ||
| 293 | res->end = l + sz; | ||
| 294 | } | 294 | } |
| 295 | |||
| 296 | out: | ||
| 297 | return (type == pci_bar_mem64) ? 1 : 0; | ||
| 298 | fail: | ||
| 299 | res->flags = 0; | ||
| 300 | goto out; | ||
| 301 | } | ||
| 302 | |||
| 303 | static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) | ||
| 304 | { | ||
| 305 | unsigned int pos, reg; | ||
| 306 | |||
| 307 | for (pos = 0; pos < howmany; pos++) { | ||
| 308 | struct resource *res = &dev->resource[pos]; | ||
| 309 | reg = PCI_BASE_ADDRESS_0 + (pos << 2); | ||
| 310 | pos += __pci_read_base(dev, pci_bar_unknown, res, reg); | ||
| 311 | } | ||
| 312 | |||
| 295 | if (rom) { | 313 | if (rom) { |
| 314 | struct resource *res = &dev->resource[PCI_ROM_RESOURCE]; | ||
| 296 | dev->rom_base_reg = rom; | 315 | dev->rom_base_reg = rom; |
| 297 | res = &dev->resource[PCI_ROM_RESOURCE]; | 316 | res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH | |
| 298 | res->name = pci_name(dev); | 317 | IORESOURCE_READONLY | IORESOURCE_CACHEABLE | |
| 299 | pci_read_config_dword(dev, rom, &l); | 318 | IORESOURCE_SIZEALIGN; |
| 300 | pci_write_config_dword(dev, rom, ~PCI_ROM_ADDRESS_ENABLE); | 319 | __pci_read_base(dev, pci_bar_mem32, res, rom); |
| 301 | pci_read_config_dword(dev, rom, &sz); | ||
| 302 | pci_write_config_dword(dev, rom, l); | ||
| 303 | if (l == 0xffffffff) | ||
| 304 | l = 0; | ||
| 305 | if (sz && sz != 0xffffffff) { | ||
| 306 | sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK); | ||
| 307 | if (sz) { | ||
| 308 | res->flags = (l & IORESOURCE_ROM_ENABLE) | | ||
| 309 | IORESOURCE_MEM | IORESOURCE_PREFETCH | | ||
| 310 | IORESOURCE_READONLY | IORESOURCE_CACHEABLE | | ||
| 311 | IORESOURCE_SIZEALIGN; | ||
| 312 | res->start = l & PCI_ROM_ADDRESS_MASK; | ||
| 313 | res->end = res->start + (unsigned long) sz; | ||
| 314 | } | ||
| 315 | } | ||
| 316 | } | 320 | } |
| 317 | } | 321 | } |
| 318 | 322 | ||
| @@ -1053,7 +1057,8 @@ int pci_scan_slot(struct pci_bus *bus, int devfn) | |||
| 1053 | } | 1057 | } |
| 1054 | } | 1058 | } |
| 1055 | 1059 | ||
| 1056 | if (bus->self) | 1060 | /* only one slot has pcie device */ |
| 1061 | if (bus->self && nr) | ||
| 1057 | pcie_aspm_init_link_state(bus->self); | 1062 | pcie_aspm_init_link_state(bus->self); |
| 1058 | 1063 | ||
| 1059 | return nr; | 1064 | return nr; |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 12d489395fad..0fb365074288 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -923,6 +923,19 @@ static void __init quirk_ide_samemode(struct pci_dev *pdev) | |||
| 923 | } | 923 | } |
| 924 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode); | 924 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode); |
| 925 | 925 | ||
| 926 | /* | ||
| 927 | * Some ATA devices break if put into D3 | ||
| 928 | */ | ||
| 929 | |||
| 930 | static void __devinit quirk_no_ata_d3(struct pci_dev *pdev) | ||
| 931 | { | ||
| 932 | /* Quirk the legacy ATA devices only. The AHCI ones are ok */ | ||
| 933 | if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) | ||
| 934 | pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3; | ||
| 935 | } | ||
| 936 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID, quirk_no_ata_d3); | ||
| 937 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID, quirk_no_ata_d3); | ||
| 938 | |||
| 926 | /* This was originally an Alpha specific thing, but it really fits here. | 939 | /* This was originally an Alpha specific thing, but it really fits here. |
| 927 | * The i82375 PCI/EISA bridge appears as non-classified. Fix that. | 940 | * The i82375 PCI/EISA bridge appears as non-classified. Fix that. |
| 928 | */ | 941 | */ |
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h index 1ebbe883f786..13a3d9ad92db 100644 --- a/include/acpi/actbl.h +++ b/include/acpi/actbl.h | |||
| @@ -277,6 +277,7 @@ enum acpi_prefered_pm_profiles { | |||
| 277 | #define BAF_LEGACY_DEVICES 0x0001 | 277 | #define BAF_LEGACY_DEVICES 0x0001 |
| 278 | #define BAF_8042_KEYBOARD_CONTROLLER 0x0002 | 278 | #define BAF_8042_KEYBOARD_CONTROLLER 0x0002 |
| 279 | #define BAF_MSI_NOT_SUPPORTED 0x0008 | 279 | #define BAF_MSI_NOT_SUPPORTED 0x0008 |
| 280 | #define BAF_PCIE_ASPM_CONTROL 0x0010 | ||
| 280 | 281 | ||
| 281 | #define FADT2_REVISION_ID 3 | 282 | #define FADT2_REVISION_ID 3 |
| 282 | #define FADT2_MINUS_REVISION_ID 2 | 283 | #define FADT2_MINUS_REVISION_ID 2 |
diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h index f41335ba6337..45329fca1b64 100644 --- a/include/asm-arm/dma-mapping.h +++ b/include/asm-arm/dma-mapping.h | |||
| @@ -7,6 +7,8 @@ | |||
| 7 | 7 | ||
| 8 | #include <linux/scatterlist.h> | 8 | #include <linux/scatterlist.h> |
| 9 | 9 | ||
| 10 | #include <asm-generic/dma-coherent.h> | ||
| 11 | |||
| 10 | /* | 12 | /* |
| 11 | * DMA-consistent mapping functions. These allocate/free a region of | 13 | * DMA-consistent mapping functions. These allocate/free a region of |
| 12 | * uncached, unwrite-buffered mapped memory space for use with DMA | 14 | * uncached, unwrite-buffered mapped memory space for use with DMA |
diff --git a/include/asm-cris/dma-mapping.h b/include/asm-cris/dma-mapping.h index cb2fb25ff8d9..da8ef8e8f842 100644 --- a/include/asm-cris/dma-mapping.h +++ b/include/asm-cris/dma-mapping.h | |||
| @@ -14,6 +14,8 @@ | |||
| 14 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 14 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
| 15 | 15 | ||
| 16 | #ifdef CONFIG_PCI | 16 | #ifdef CONFIG_PCI |
| 17 | #include <asm-generic/dma-coherent.h> | ||
| 18 | |||
| 17 | void *dma_alloc_coherent(struct device *dev, size_t size, | 19 | void *dma_alloc_coherent(struct device *dev, size_t size, |
| 18 | dma_addr_t *dma_handle, gfp_t flag); | 20 | dma_addr_t *dma_handle, gfp_t flag); |
| 19 | 21 | ||
diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h new file mode 100644 index 000000000000..85a3ffaa0242 --- /dev/null +++ b/include/asm-generic/dma-coherent.h | |||
| @@ -0,0 +1,32 @@ | |||
| 1 | #ifndef DMA_COHERENT_H | ||
| 2 | #define DMA_COHERENT_H | ||
| 3 | |||
| 4 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT | ||
| 5 | /* | ||
| 6 | * These two functions are only for dma allocator. | ||
| 7 | * Don't use them in device drivers. | ||
| 8 | */ | ||
| 9 | int dma_alloc_from_coherent(struct device *dev, ssize_t size, | ||
| 10 | dma_addr_t *dma_handle, void **ret); | ||
| 11 | int dma_release_from_coherent(struct device *dev, int order, void *vaddr); | ||
| 12 | |||
| 13 | /* | ||
| 14 | * Standard interface | ||
| 15 | */ | ||
| 16 | #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY | ||
| 17 | extern int | ||
| 18 | dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | ||
| 19 | dma_addr_t device_addr, size_t size, int flags); | ||
| 20 | |||
| 21 | extern void | ||
| 22 | dma_release_declared_memory(struct device *dev); | ||
| 23 | |||
| 24 | extern void * | ||
| 25 | dma_mark_declared_memory_occupied(struct device *dev, | ||
| 26 | dma_addr_t device_addr, size_t size); | ||
| 27 | #else | ||
| 28 | #define dma_alloc_from_coherent(dev, size, handle, ret) (0) | ||
| 29 | #define dma_release_from_coherent(dev, order, vaddr) (0) | ||
| 30 | #endif | ||
| 31 | |||
| 32 | #endif | ||
diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h index 6c0b8a2de143..627315ecdb52 100644 --- a/include/asm-sh/dma-mapping.h +++ b/include/asm-sh/dma-mapping.h | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | #include <linux/scatterlist.h> | 5 | #include <linux/scatterlist.h> |
| 6 | #include <asm/cacheflush.h> | 6 | #include <asm/cacheflush.h> |
| 7 | #include <asm/io.h> | 7 | #include <asm/io.h> |
| 8 | #include <asm-generic/dma-coherent.h> | ||
| 8 | 9 | ||
| 9 | extern struct bus_type pci_bus_type; | 10 | extern struct bus_type pci_bus_type; |
| 10 | 11 | ||
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h index 0eaa9bf6011f..ad9cd6d49bfc 100644 --- a/include/asm-x86/dma-mapping.h +++ b/include/asm-x86/dma-mapping.h | |||
| @@ -249,25 +249,5 @@ static inline int dma_get_cache_alignment(void) | |||
| 249 | 249 | ||
| 250 | #define dma_is_consistent(d, h) (1) | 250 | #define dma_is_consistent(d, h) (1) |
| 251 | 251 | ||
| 252 | #ifdef CONFIG_X86_32 | 252 | #include <asm-generic/dma-coherent.h> |
| 253 | # define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY | ||
| 254 | struct dma_coherent_mem { | ||
| 255 | void *virt_base; | ||
| 256 | u32 device_base; | ||
| 257 | int size; | ||
| 258 | int flags; | ||
| 259 | unsigned long *bitmap; | ||
| 260 | }; | ||
| 261 | |||
| 262 | extern int | ||
| 263 | dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | ||
| 264 | dma_addr_t device_addr, size_t size, int flags); | ||
| 265 | |||
| 266 | extern void | ||
| 267 | dma_release_declared_memory(struct device *dev); | ||
| 268 | |||
| 269 | extern void * | ||
| 270 | dma_mark_declared_memory_occupied(struct device *dev, | ||
| 271 | dma_addr_t device_addr, size_t size); | ||
| 272 | #endif /* CONFIG_X86_32 */ | ||
| 273 | #endif | 253 | #endif |
diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h index c975caf75385..f8598f583944 100644 --- a/include/linux/iommu-helper.h +++ b/include/linux/iommu-helper.h | |||
| @@ -8,3 +8,4 @@ extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, | |||
| 8 | unsigned long align_mask); | 8 | unsigned long align_mask); |
| 9 | extern void iommu_area_free(unsigned long *map, unsigned long start, | 9 | extern void iommu_area_free(unsigned long *map, unsigned long start, |
| 10 | unsigned int nr); | 10 | unsigned int nr); |
| 11 | extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len); | ||
diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h index a1a1e618e996..91ba0b338b47 100644 --- a/include/linux/pci-aspm.h +++ b/include/linux/pci-aspm.h | |||
| @@ -27,6 +27,7 @@ extern void pcie_aspm_init_link_state(struct pci_dev *pdev); | |||
| 27 | extern void pcie_aspm_exit_link_state(struct pci_dev *pdev); | 27 | extern void pcie_aspm_exit_link_state(struct pci_dev *pdev); |
| 28 | extern void pcie_aspm_pm_state_change(struct pci_dev *pdev); | 28 | extern void pcie_aspm_pm_state_change(struct pci_dev *pdev); |
| 29 | extern void pci_disable_link_state(struct pci_dev *pdev, int state); | 29 | extern void pci_disable_link_state(struct pci_dev *pdev, int state); |
| 30 | extern void pcie_no_aspm(void); | ||
| 30 | #else | 31 | #else |
| 31 | static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) | 32 | static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) |
| 32 | { | 33 | { |
| @@ -40,6 +41,10 @@ static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) | |||
| 40 | static inline void pci_disable_link_state(struct pci_dev *pdev, int state) | 41 | static inline void pci_disable_link_state(struct pci_dev *pdev, int state) |
| 41 | { | 42 | { |
| 42 | } | 43 | } |
| 44 | |||
| 45 | static inline void pcie_no_aspm(void) | ||
| 46 | { | ||
| 47 | } | ||
| 43 | #endif | 48 | #endif |
| 44 | 49 | ||
| 45 | #ifdef CONFIG_PCIEASPM_DEBUG /* this depends on CONFIG_PCIEASPM */ | 50 | #ifdef CONFIG_PCIEASPM_DEBUG /* this depends on CONFIG_PCIEASPM */ |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 1d296d31abe0..825be3878f68 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
| @@ -124,6 +124,8 @@ enum pci_dev_flags { | |||
| 124 | * generation too. | 124 | * generation too. |
| 125 | */ | 125 | */ |
| 126 | PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) 1, | 126 | PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) 1, |
| 127 | /* Device configuration is irrevocably lost if disabled into D3 */ | ||
| 128 | PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2, | ||
| 127 | }; | 129 | }; |
| 128 | 130 | ||
| 129 | typedef unsigned short __bitwise pci_bus_flags_t; | 131 | typedef unsigned short __bitwise pci_bus_flags_t; |
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h index 19958b929905..450684f7eaac 100644 --- a/include/linux/pci_regs.h +++ b/include/linux/pci_regs.h | |||
| @@ -374,6 +374,7 @@ | |||
| 374 | #define PCI_EXP_DEVCAP_ATN_BUT 0x1000 /* Attention Button Present */ | 374 | #define PCI_EXP_DEVCAP_ATN_BUT 0x1000 /* Attention Button Present */ |
| 375 | #define PCI_EXP_DEVCAP_ATN_IND 0x2000 /* Attention Indicator Present */ | 375 | #define PCI_EXP_DEVCAP_ATN_IND 0x2000 /* Attention Indicator Present */ |
| 376 | #define PCI_EXP_DEVCAP_PWR_IND 0x4000 /* Power Indicator Present */ | 376 | #define PCI_EXP_DEVCAP_PWR_IND 0x4000 /* Power Indicator Present */ |
| 377 | #define PCI_EXP_DEVCAP_RBER 0x8000 /* Role-Based Error Reporting */ | ||
| 377 | #define PCI_EXP_DEVCAP_PWR_VAL 0x3fc0000 /* Slot Power Limit Value */ | 378 | #define PCI_EXP_DEVCAP_PWR_VAL 0x3fc0000 /* Slot Power Limit Value */ |
| 378 | #define PCI_EXP_DEVCAP_PWR_SCL 0xc000000 /* Slot Power Limit Scale */ | 379 | #define PCI_EXP_DEVCAP_PWR_SCL 0xc000000 /* Slot Power Limit Scale */ |
| 379 | #define PCI_EXP_DEVCTL 8 /* Device Control */ | 380 | #define PCI_EXP_DEVCTL 8 /* Device Control */ |
diff --git a/init/Kconfig b/init/Kconfig index 43d6989c275f..250e02c8f8f9 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
| @@ -802,6 +802,10 @@ config PROC_PAGE_MONITOR | |||
| 802 | 802 | ||
| 803 | endmenu # General setup | 803 | endmenu # General setup |
| 804 | 804 | ||
| 805 | config HAVE_GENERIC_DMA_COHERENT | ||
| 806 | bool | ||
| 807 | default n | ||
| 808 | |||
| 805 | config SLABINFO | 809 | config SLABINFO |
| 806 | bool | 810 | bool |
| 807 | depends on PROC_FS | 811 | depends on PROC_FS |
diff --git a/kernel/Makefile b/kernel/Makefile index 54f69837d35a..4e1d7df7c3e2 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
| @@ -84,6 +84,7 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o | |||
| 84 | obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o | 84 | obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o |
| 85 | obj-$(CONFIG_MARKERS) += marker.o | 85 | obj-$(CONFIG_MARKERS) += marker.o |
| 86 | obj-$(CONFIG_LATENCYTOP) += latencytop.o | 86 | obj-$(CONFIG_LATENCYTOP) += latencytop.o |
| 87 | obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o | ||
| 87 | obj-$(CONFIG_FTRACE) += trace/ | 88 | obj-$(CONFIG_FTRACE) += trace/ |
| 88 | obj-$(CONFIG_TRACING) += trace/ | 89 | obj-$(CONFIG_TRACING) += trace/ |
| 89 | obj-$(CONFIG_SMP) += sched_cpupri.o | 90 | obj-$(CONFIG_SMP) += sched_cpupri.o |
diff --git a/kernel/dma-coherent.c b/kernel/dma-coherent.c new file mode 100644 index 000000000000..7517115a8cce --- /dev/null +++ b/kernel/dma-coherent.c | |||
| @@ -0,0 +1,154 @@ | |||
| 1 | /* | ||
| 2 | * Coherent per-device memory handling. | ||
| 3 | * Borrowed from i386 | ||
| 4 | */ | ||
| 5 | #include <linux/kernel.h> | ||
| 6 | #include <linux/dma-mapping.h> | ||
| 7 | |||
| 8 | struct dma_coherent_mem { | ||
| 9 | void *virt_base; | ||
| 10 | u32 device_base; | ||
| 11 | int size; | ||
| 12 | int flags; | ||
| 13 | unsigned long *bitmap; | ||
| 14 | }; | ||
| 15 | |||
| 16 | int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | ||
| 17 | dma_addr_t device_addr, size_t size, int flags) | ||
| 18 | { | ||
| 19 | void __iomem *mem_base = NULL; | ||
| 20 | int pages = size >> PAGE_SHIFT; | ||
| 21 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); | ||
| 22 | |||
| 23 | if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) | ||
| 24 | goto out; | ||
| 25 | if (!size) | ||
| 26 | goto out; | ||
| 27 | if (dev->dma_mem) | ||
| 28 | goto out; | ||
| 29 | |||
| 30 | /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ | ||
| 31 | |||
| 32 | mem_base = ioremap(bus_addr, size); | ||
| 33 | if (!mem_base) | ||
| 34 | goto out; | ||
| 35 | |||
| 36 | dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); | ||
| 37 | if (!dev->dma_mem) | ||
| 38 | goto out; | ||
| 39 | dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | ||
| 40 | if (!dev->dma_mem->bitmap) | ||
| 41 | goto free1_out; | ||
| 42 | |||
| 43 | dev->dma_mem->virt_base = mem_base; | ||
| 44 | dev->dma_mem->device_base = device_addr; | ||
| 45 | dev->dma_mem->size = pages; | ||
| 46 | dev->dma_mem->flags = flags; | ||
| 47 | |||
| 48 | if (flags & DMA_MEMORY_MAP) | ||
| 49 | return DMA_MEMORY_MAP; | ||
| 50 | |||
| 51 | return DMA_MEMORY_IO; | ||
| 52 | |||
| 53 | free1_out: | ||
| 54 | kfree(dev->dma_mem); | ||
| 55 | out: | ||
| 56 | if (mem_base) | ||
| 57 | iounmap(mem_base); | ||
| 58 | return 0; | ||
| 59 | } | ||
| 60 | EXPORT_SYMBOL(dma_declare_coherent_memory); | ||
| 61 | |||
| 62 | void dma_release_declared_memory(struct device *dev) | ||
| 63 | { | ||
| 64 | struct dma_coherent_mem *mem = dev->dma_mem; | ||
| 65 | |||
| 66 | if (!mem) | ||
| 67 | return; | ||
| 68 | dev->dma_mem = NULL; | ||
| 69 | iounmap(mem->virt_base); | ||
| 70 | kfree(mem->bitmap); | ||
| 71 | kfree(mem); | ||
| 72 | } | ||
| 73 | EXPORT_SYMBOL(dma_release_declared_memory); | ||
| 74 | |||
| 75 | void *dma_mark_declared_memory_occupied(struct device *dev, | ||
| 76 | dma_addr_t device_addr, size_t size) | ||
| 77 | { | ||
| 78 | struct dma_coherent_mem *mem = dev->dma_mem; | ||
| 79 | int pos, err; | ||
| 80 | int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1); | ||
| 81 | |||
| 82 | pages >>= PAGE_SHIFT; | ||
| 83 | |||
| 84 | if (!mem) | ||
| 85 | return ERR_PTR(-EINVAL); | ||
| 86 | |||
| 87 | pos = (device_addr - mem->device_base) >> PAGE_SHIFT; | ||
| 88 | err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages)); | ||
| 89 | if (err != 0) | ||
| 90 | return ERR_PTR(err); | ||
| 91 | return mem->virt_base + (pos << PAGE_SHIFT); | ||
| 92 | } | ||
| 93 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | ||
| 94 | |||
| 95 | /** | ||
| 96 | * Try to allocate memory from the per-device coherent area. | ||
| 97 | * | ||
| 98 | * @dev: device from which we allocate memory | ||
| 99 | * @size: size of requested memory area | ||
| 100 | * @dma_handle: This will be filled with the correct dma handle | ||
| 101 | * @ret: This pointer will be filled with the virtual address | ||
| 102 | * to allocated area. | ||
| 103 | * | ||
| 104 | * This function should be only called from per-arch %dma_alloc_coherent() | ||
| 105 | * to support allocation from per-device coherent memory pools. | ||
| 106 | * | ||
| 107 | * Returns 0 if dma_alloc_coherent should continue with allocating from | ||
| 108 | * generic memory areas, or !0 if dma_alloc_coherent should return %ret. | ||
| 109 | */ | ||
| 110 | int dma_alloc_from_coherent(struct device *dev, ssize_t size, | ||
| 111 | dma_addr_t *dma_handle, void **ret) | ||
| 112 | { | ||
| 113 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | ||
| 114 | int order = get_order(size); | ||
| 115 | |||
| 116 | if (mem) { | ||
| 117 | int page = bitmap_find_free_region(mem->bitmap, mem->size, | ||
| 118 | order); | ||
| 119 | if (page >= 0) { | ||
| 120 | *dma_handle = mem->device_base + (page << PAGE_SHIFT); | ||
| 121 | *ret = mem->virt_base + (page << PAGE_SHIFT); | ||
| 122 | memset(*ret, 0, size); | ||
| 123 | } else if (mem->flags & DMA_MEMORY_EXCLUSIVE) | ||
| 124 | *ret = NULL; | ||
| 125 | } | ||
| 126 | return (mem != NULL); | ||
| 127 | } | ||
| 128 | |||
| 129 | /** | ||
| 130 | * Try to free the memory allocated from per-device coherent memory pool. | ||
| 131 | * @dev: device from which the memory was allocated | ||
| 132 | * @order: the order of pages allocated | ||
| 133 | * @vaddr: virtual address of allocated pages | ||
| 134 | * | ||
| 135 | * This checks whether the memory was allocated from the per-device | ||
| 136 | * coherent memory pool and if so, releases that memory. | ||
| 137 | * | ||
| 138 | * Returns 1 if we correctly released the memory, or 0 if | ||
| 139 | * %dma_release_coherent() should proceed with releasing memory from | ||
| 140 | * generic pools. | ||
| 141 | */ | ||
| 142 | int dma_release_from_coherent(struct device *dev, int order, void *vaddr) | ||
| 143 | { | ||
| 144 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | ||
| 145 | |||
| 146 | if (mem && vaddr >= mem->virt_base && vaddr < | ||
| 147 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | ||
| 148 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; | ||
| 149 | |||
| 150 | bitmap_release_region(mem->bitmap, page, order); | ||
| 151 | return 1; | ||
| 152 | } | ||
| 153 | return 0; | ||
| 154 | } | ||
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c index a3b8d4c3f77a..889ddce2021e 100644 --- a/lib/iommu-helper.c +++ b/lib/iommu-helper.c | |||
| @@ -80,3 +80,11 @@ void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr) | |||
| 80 | } | 80 | } |
| 81 | } | 81 | } |
| 82 | EXPORT_SYMBOL(iommu_area_free); | 82 | EXPORT_SYMBOL(iommu_area_free); |
| 83 | |||
| 84 | unsigned long iommu_num_pages(unsigned long addr, unsigned long len) | ||
| 85 | { | ||
| 86 | unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE); | ||
| 87 | |||
| 88 | return size >> PAGE_SHIFT; | ||
| 89 | } | ||
| 90 | EXPORT_SYMBOL(iommu_num_pages); | ||
