aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@novell.com>2007-02-05 21:51:25 -0500
committerTony Luck <tony.luck@intel.com>2007-02-05 21:51:25 -0500
commit563aaf064f3776623ff5e7aef511ac2eb7e5f0bb (patch)
tree0ae2565cf94dc705a58984f804d0c2046339abf9
parent93fbff63e62b87fe450814db41f859d60b048fb8 (diff)
[IA64] swiotlb cleanup
- add proper __init decoration to swiotlb's init code (and the code calling it, where not already the case) - replace uses of 'unsigned long' with dma_addr_t where appropriate - do miscellaneous simplicfication and cleanup Signed-off-by: Jan Beulich <jbeulich@novell.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c2
-rw-r--r--lib/swiotlb.c56
2 files changed, 33 insertions, 25 deletions
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
index 697f0aa794b9..eb18be5a6569 100644
--- a/arch/x86_64/kernel/pci-swiotlb.c
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -29,7 +29,7 @@ struct dma_mapping_ops swiotlb_dma_ops = {
29 .dma_supported = NULL, 29 .dma_supported = NULL,
30}; 30};
31 31
32void pci_swiotlb_init(void) 32void __init pci_swiotlb_init(void)
33{ 33{
34 /* don't initialize swiotlb if iommu=off (no_iommu=1) */ 34 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
35 if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN) 35 if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN)
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index bc684d1fd426..067eed5b2758 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Dynamic DMA mapping support. 2 * Dynamic DMA mapping support.
3 * 3 *
4 * This implementation is for IA-64 and EM64T platforms that do not support 4 * This implementation is a fallback for platforms that do not support
5 * I/O TLBs (aka DMA address translation hardware). 5 * I/O TLBs (aka DMA address translation hardware).
6 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> 6 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
7 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> 7 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
@@ -129,23 +129,25 @@ __setup("swiotlb=", setup_io_tlb_npages);
129 * Statically reserve bounce buffer space and initialize bounce buffer data 129 * Statically reserve bounce buffer space and initialize bounce buffer data
130 * structures for the software IO TLB used to implement the DMA API. 130 * structures for the software IO TLB used to implement the DMA API.
131 */ 131 */
132void 132void __init
133swiotlb_init_with_default_size (size_t default_size) 133swiotlb_init_with_default_size(size_t default_size)
134{ 134{
135 unsigned long i; 135 unsigned long i, bytes;
136 136
137 if (!io_tlb_nslabs) { 137 if (!io_tlb_nslabs) {
138 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); 138 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
139 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); 139 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
140 } 140 }
141 141
142 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
143
142 /* 144 /*
143 * Get IO TLB memory from the low pages 145 * Get IO TLB memory from the low pages
144 */ 146 */
145 io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); 147 io_tlb_start = alloc_bootmem_low_pages(bytes);
146 if (!io_tlb_start) 148 if (!io_tlb_start)
147 panic("Cannot allocate SWIOTLB buffer"); 149 panic("Cannot allocate SWIOTLB buffer");
148 io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); 150 io_tlb_end = io_tlb_start + bytes;
149 151
150 /* 152 /*
151 * Allocate and initialize the free list array. This array is used 153 * Allocate and initialize the free list array. This array is used
@@ -162,12 +164,15 @@ swiotlb_init_with_default_size (size_t default_size)
162 * Get the overflow emergency buffer 164 * Get the overflow emergency buffer
163 */ 165 */
164 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); 166 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
167 if (!io_tlb_overflow_buffer)
168 panic("Cannot allocate SWIOTLB overflow buffer!\n");
169
165 printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n", 170 printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
166 virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end)); 171 virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
167} 172}
168 173
169void 174void __init
170swiotlb_init (void) 175swiotlb_init(void)
171{ 176{
172 swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ 177 swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
173} 178}
@@ -178,9 +183,9 @@ swiotlb_init (void)
178 * This should be just like above, but with some error catching. 183 * This should be just like above, but with some error catching.
179 */ 184 */
180int 185int
181swiotlb_late_init_with_default_size (size_t default_size) 186swiotlb_late_init_with_default_size(size_t default_size)
182{ 187{
183 unsigned long i, req_nslabs = io_tlb_nslabs; 188 unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
184 unsigned int order; 189 unsigned int order;
185 190
186 if (!io_tlb_nslabs) { 191 if (!io_tlb_nslabs) {
@@ -191,8 +196,9 @@ swiotlb_late_init_with_default_size (size_t default_size)
191 /* 196 /*
192 * Get IO TLB memory from the low pages 197 * Get IO TLB memory from the low pages
193 */ 198 */
194 order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); 199 order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
195 io_tlb_nslabs = SLABS_PER_PAGE << order; 200 io_tlb_nslabs = SLABS_PER_PAGE << order;
201 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
196 202
197 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 203 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
198 io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN, 204 io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
@@ -205,13 +211,14 @@ swiotlb_late_init_with_default_size (size_t default_size)
205 if (!io_tlb_start) 211 if (!io_tlb_start)
206 goto cleanup1; 212 goto cleanup1;
207 213
208 if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) { 214 if (order != get_order(bytes)) {
209 printk(KERN_WARNING "Warning: only able to allocate %ld MB " 215 printk(KERN_WARNING "Warning: only able to allocate %ld MB "
210 "for software IO TLB\n", (PAGE_SIZE << order) >> 20); 216 "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
211 io_tlb_nslabs = SLABS_PER_PAGE << order; 217 io_tlb_nslabs = SLABS_PER_PAGE << order;
218 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
212 } 219 }
213 io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); 220 io_tlb_end = io_tlb_start + bytes;
214 memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT)); 221 memset(io_tlb_start, 0, bytes);
215 222
216 /* 223 /*
217 * Allocate and initialize the free list array. This array is used 224 * Allocate and initialize the free list array. This array is used
@@ -242,8 +249,8 @@ swiotlb_late_init_with_default_size (size_t default_size)
242 if (!io_tlb_overflow_buffer) 249 if (!io_tlb_overflow_buffer)
243 goto cleanup4; 250 goto cleanup4;
244 251
245 printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - " 252 printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - "
246 "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20, 253 "0x%lx\n", bytes >> 20,
247 virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end)); 254 virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
248 255
249 return 0; 256 return 0;
@@ -256,8 +263,8 @@ cleanup3:
256 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * 263 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
257 sizeof(int))); 264 sizeof(int)));
258 io_tlb_list = NULL; 265 io_tlb_list = NULL;
259 io_tlb_end = NULL;
260cleanup2: 266cleanup2:
267 io_tlb_end = NULL;
261 free_pages((unsigned long)io_tlb_start, order); 268 free_pages((unsigned long)io_tlb_start, order);
262 io_tlb_start = NULL; 269 io_tlb_start = NULL;
263cleanup1: 270cleanup1:
@@ -433,7 +440,7 @@ void *
433swiotlb_alloc_coherent(struct device *hwdev, size_t size, 440swiotlb_alloc_coherent(struct device *hwdev, size_t size,
434 dma_addr_t *dma_handle, gfp_t flags) 441 dma_addr_t *dma_handle, gfp_t flags)
435{ 442{
436 unsigned long dev_addr; 443 dma_addr_t dev_addr;
437 void *ret; 444 void *ret;
438 int order = get_order(size); 445 int order = get_order(size);
439 446
@@ -473,8 +480,9 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
473 480
474 /* Confirm address can be DMA'd by device */ 481 /* Confirm address can be DMA'd by device */
475 if (address_needs_mapping(hwdev, dev_addr)) { 482 if (address_needs_mapping(hwdev, dev_addr)) {
476 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016lx\n", 483 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
477 (unsigned long long)*hwdev->dma_mask, dev_addr); 484 (unsigned long long)*hwdev->dma_mask,
485 (unsigned long long)dev_addr);
478 panic("swiotlb_alloc_coherent: allocated memory is out of " 486 panic("swiotlb_alloc_coherent: allocated memory is out of "
479 "range for device"); 487 "range for device");
480 } 488 }
@@ -504,7 +512,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
504 * When the mapping is small enough return a static buffer to limit 512 * When the mapping is small enough return a static buffer to limit
505 * the damage, or panic when the transfer is too big. 513 * the damage, or panic when the transfer is too big.
506 */ 514 */
507 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %lu bytes at " 515 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
508 "device %s\n", size, dev ? dev->bus_id : "?"); 516 "device %s\n", size, dev ? dev->bus_id : "?");
509 517
510 if (size > io_tlb_overflow && do_panic) { 518 if (size > io_tlb_overflow && do_panic) {
@@ -525,7 +533,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
525dma_addr_t 533dma_addr_t
526swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) 534swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
527{ 535{
528 unsigned long dev_addr = virt_to_bus(ptr); 536 dma_addr_t dev_addr = virt_to_bus(ptr);
529 void *map; 537 void *map;
530 538
531 BUG_ON(dir == DMA_NONE); 539 BUG_ON(dir == DMA_NONE);
@@ -669,7 +677,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
669 int dir) 677 int dir)
670{ 678{
671 void *addr; 679 void *addr;
672 unsigned long dev_addr; 680 dma_addr_t dev_addr;
673 int i; 681 int i;
674 682
675 BUG_ON(dir == DMA_NONE); 683 BUG_ON(dir == DMA_NONE);
@@ -765,7 +773,7 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr)
765 * this function. 773 * this function.
766 */ 774 */
767int 775int
768swiotlb_dma_supported (struct device *hwdev, u64 mask) 776swiotlb_dma_supported(struct device *hwdev, u64 mask)
769{ 777{
770 return virt_to_bus(io_tlb_end - 1) <= mask; 778 return virt_to_bus(io_tlb_end - 1) <= mask;
771} 779}