aboutsummaryrefslogtreecommitdiffstats
path: root/lib/swiotlb.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2012-10-15 13:19:34 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-10-30 09:32:06 -0400
commitee3f6ba896c7e62004b677b0018a0b29b9b26472 (patch)
tree035a4d1280053dbd68a78b1b31df3d6056f94440 /lib/swiotlb.c
parentff7204a74931fc67ed13d8febbc322a46833c1fa (diff)
swiotlb: Make io_tlb_overflow_buffer a physical address
This change makes it so that we can avoid virt_to_phys overhead when using the io_tlb_overflow_buffer. My original plan was to completely remove the value and replace it with a constant but I had seen that there were recent patches that stated this couldn't be done until all device drivers that depended on that functionality be updated. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'lib/swiotlb.c')
-rw-r--r--lib/swiotlb.c61
1 files changed, 34 insertions, 27 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 8c4791f17ec1..f8c0d4e1d1d3 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -70,7 +70,7 @@ static unsigned long io_tlb_nslabs;
70 */ 70 */
71static unsigned long io_tlb_overflow = 32*1024; 71static unsigned long io_tlb_overflow = 32*1024;
72 72
73static void *io_tlb_overflow_buffer; 73static phys_addr_t io_tlb_overflow_buffer;
74 74
75/* 75/*
76 * This is a free list describing the number of free entries available from 76 * This is a free list describing the number of free entries available from
@@ -138,6 +138,7 @@ void swiotlb_print_info(void)
138 138
139void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) 139void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
140{ 140{
141 void *v_overflow_buffer;
141 unsigned long i, bytes; 142 unsigned long i, bytes;
142 143
143 bytes = nslabs << IO_TLB_SHIFT; 144 bytes = nslabs << IO_TLB_SHIFT;
@@ -147,6 +148,15 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
147 io_tlb_end = io_tlb_start + bytes; 148 io_tlb_end = io_tlb_start + bytes;
148 149
149 /* 150 /*
151 * Get the overflow emergency buffer
152 */
153 v_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow));
154 if (!v_overflow_buffer)
155 panic("Cannot allocate SWIOTLB overflow buffer!\n");
156
157 io_tlb_overflow_buffer = __pa(v_overflow_buffer);
158
159 /*
150 * Allocate and initialize the free list array. This array is used 160 * Allocate and initialize the free list array. This array is used
151 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE 161 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
152 * between io_tlb_start and io_tlb_end. 162 * between io_tlb_start and io_tlb_end.
@@ -157,12 +167,6 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
157 io_tlb_index = 0; 167 io_tlb_index = 0;
158 io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); 168 io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
159 169
160 /*
161 * Get the overflow emergency buffer
162 */
163 io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow));
164 if (!io_tlb_overflow_buffer)
165 panic("Cannot allocate SWIOTLB overflow buffer!\n");
166 if (verbose) 170 if (verbose)
167 swiotlb_print_info(); 171 swiotlb_print_info();
168} 172}
@@ -252,6 +256,7 @@ int
252swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) 256swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
253{ 257{
254 unsigned long i, bytes; 258 unsigned long i, bytes;
259 unsigned char *v_overflow_buffer;
255 260
256 bytes = nslabs << IO_TLB_SHIFT; 261 bytes = nslabs << IO_TLB_SHIFT;
257 262
@@ -262,6 +267,16 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
262 memset(tlb, 0, bytes); 267 memset(tlb, 0, bytes);
263 268
264 /* 269 /*
270 * Get the overflow emergency buffer
271 */
272 v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
273 get_order(io_tlb_overflow));
274 if (!v_overflow_buffer)
275 goto cleanup2;
276
277 io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
278
279 /*
265 * Allocate and initialize the free list array. This array is used 280 * Allocate and initialize the free list array. This array is used
266 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE 281 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
267 * between io_tlb_start and io_tlb_end. 282 * between io_tlb_start and io_tlb_end.
@@ -269,7 +284,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
269 io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL, 284 io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
270 get_order(io_tlb_nslabs * sizeof(int))); 285 get_order(io_tlb_nslabs * sizeof(int)));
271 if (!io_tlb_list) 286 if (!io_tlb_list)
272 goto cleanup2; 287 goto cleanup3;
273 288
274 for (i = 0; i < io_tlb_nslabs; i++) 289 for (i = 0; i < io_tlb_nslabs; i++)
275 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 290 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
@@ -280,18 +295,10 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
280 get_order(io_tlb_nslabs * 295 get_order(io_tlb_nslabs *
281 sizeof(phys_addr_t))); 296 sizeof(phys_addr_t)));
282 if (!io_tlb_orig_addr) 297 if (!io_tlb_orig_addr)
283 goto cleanup3; 298 goto cleanup4;
284 299
285 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t)); 300 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
286 301
287 /*
288 * Get the overflow emergency buffer
289 */
290 io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
291 get_order(io_tlb_overflow));
292 if (!io_tlb_overflow_buffer)
293 goto cleanup4;
294
295 swiotlb_print_info(); 302 swiotlb_print_info();
296 303
297 late_alloc = 1; 304 late_alloc = 1;
@@ -299,13 +306,13 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
299 return 0; 306 return 0;
300 307
301cleanup4: 308cleanup4:
302 free_pages((unsigned long)io_tlb_orig_addr,
303 get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
304 io_tlb_orig_addr = NULL;
305cleanup3:
306 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * 309 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
307 sizeof(int))); 310 sizeof(int)));
308 io_tlb_list = NULL; 311 io_tlb_list = NULL;
312cleanup3:
313 free_pages((unsigned long)v_overflow_buffer,
314 get_order(io_tlb_overflow));
315 io_tlb_overflow_buffer = 0;
309cleanup2: 316cleanup2:
310 io_tlb_end = 0; 317 io_tlb_end = 0;
311 io_tlb_start = 0; 318 io_tlb_start = 0;
@@ -315,11 +322,11 @@ cleanup2:
315 322
316void __init swiotlb_free(void) 323void __init swiotlb_free(void)
317{ 324{
318 if (!io_tlb_overflow_buffer) 325 if (!io_tlb_orig_addr)
319 return; 326 return;
320 327
321 if (late_alloc) { 328 if (late_alloc) {
322 free_pages((unsigned long)io_tlb_overflow_buffer, 329 free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
323 get_order(io_tlb_overflow)); 330 get_order(io_tlb_overflow));
324 free_pages((unsigned long)io_tlb_orig_addr, 331 free_pages((unsigned long)io_tlb_orig_addr,
325 get_order(io_tlb_nslabs * sizeof(phys_addr_t))); 332 get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
@@ -328,7 +335,7 @@ void __init swiotlb_free(void)
328 free_pages((unsigned long)phys_to_virt(io_tlb_start), 335 free_pages((unsigned long)phys_to_virt(io_tlb_start),
329 get_order(io_tlb_nslabs << IO_TLB_SHIFT)); 336 get_order(io_tlb_nslabs << IO_TLB_SHIFT));
330 } else { 337 } else {
331 free_bootmem_late(__pa(io_tlb_overflow_buffer), 338 free_bootmem_late(io_tlb_overflow_buffer,
332 PAGE_ALIGN(io_tlb_overflow)); 339 PAGE_ALIGN(io_tlb_overflow));
333 free_bootmem_late(__pa(io_tlb_orig_addr), 340 free_bootmem_late(__pa(io_tlb_orig_addr),
334 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); 341 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
@@ -698,7 +705,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
698 map = map_single(dev, phys, size, dir); 705 map = map_single(dev, phys, size, dir);
699 if (!map) { 706 if (!map) {
700 swiotlb_full(dev, size, dir, 1); 707 swiotlb_full(dev, size, dir, 1);
701 map = io_tlb_overflow_buffer; 708 return phys_to_dma(dev, io_tlb_overflow_buffer);
702 } 709 }
703 710
704 dev_addr = swiotlb_virt_to_bus(dev, map); 711 dev_addr = swiotlb_virt_to_bus(dev, map);
@@ -708,7 +715,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
708 */ 715 */
709 if (!dma_capable(dev, dev_addr, size)) { 716 if (!dma_capable(dev, dev_addr, size)) {
710 swiotlb_tbl_unmap_single(dev, map, size, dir); 717 swiotlb_tbl_unmap_single(dev, map, size, dir);
711 dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer); 718 return phys_to_dma(dev, io_tlb_overflow_buffer);
712 } 719 }
713 720
714 return dev_addr; 721 return dev_addr;
@@ -927,7 +934,7 @@ EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
927int 934int
928swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) 935swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
929{ 936{
930 return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer)); 937 return (dma_addr == phys_to_dma(hwdev, io_tlb_overflow_buffer));
931} 938}
932EXPORT_SYMBOL(swiotlb_dma_mapping_error); 939EXPORT_SYMBOL(swiotlb_dma_mapping_error);
933 940