aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2013-01-24 15:20:16 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2013-01-29 22:36:53 -0500
commitac2cbab21f318e19bc176a7f38a120cec835220f (patch)
tree1348f7e5a7e50b85bf56a9e1155bed70be13cd51
parent38fa4175e60d98fb1c9815fb14f8057576dade73 (diff)
x86: Don't panic if can not alloc buffer for swiotlb
Normal boot path on system with iommu support: swiotlb buffer will be allocated early at first and then try to initialize iommu, if iommu for intel or AMD could setup properly, swiotlb buffer will be freed. The early allocating is with bootmem, and could panic when we try to use kdump with buffer above 4G only, or with memmap to limit mem under 4G. for example: memmap=4095M$1M to remove memory under 4G. According to Eric, add _nopanic version and no_iotlb_memory to fail map single later if swiotlb is still needed. -v2: don't pass nopanic, and use -ENOMEM return value according to Eric. panic early instead of using swiotlb_full to panic...according to Eric/Konrad. -v3: make swiotlb_init to be notpanic, but will affect: arm64, ia64, powerpc, tile, unicore32, x86. -v4: cleanup swiotlb_init by removing swiotlb_init_with_default_size. Suggested-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: Yinghai Lu <yinghai@kernel.org> Link: http://lkml.kernel.org/r/1359058816-7615-36-git-send-email-yinghai@kernel.org Reviewed-and-tested-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Kyungmin Park <kyungmin.park@samsung.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Andrzej Pietrasiewicz <andrzej.p@samsung.com> Cc: linux-mips@linux-mips.org Cc: xen-devel@lists.xensource.com Cc: virtualization@lists.linux-foundation.org Cc: Shuah Khan <shuahkhan@gmail.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c3
-rw-r--r--drivers/xen/swiotlb-xen.c4
-rw-r--r--include/linux/swiotlb.h2
-rw-r--r--lib/swiotlb.c47
4 files changed, 35 insertions, 21 deletions
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index 41dd00884975..02f244475207 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -317,7 +317,8 @@ void __init plat_swiotlb_setup(void)
317 317
318 octeon_swiotlb = alloc_bootmem_low_pages(swiotlbsize); 318 octeon_swiotlb = alloc_bootmem_low_pages(swiotlbsize);
319 319
320 swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1); 320 if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM)
321 panic("Cannot allocate SWIOTLB buffer");
321 322
322 mips_dma_map_ops = &octeon_linear_dma_map_ops.dma_map_ops; 323 mips_dma_map_ops = &octeon_linear_dma_map_ops.dma_map_ops;
323} 324}
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index af47e7594460..1d94316f0ea4 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -231,7 +231,9 @@ retry:
231 } 231 }
232 start_dma_addr = xen_virt_to_bus(xen_io_tlb_start); 232 start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
233 if (early) { 233 if (early) {
234 swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose); 234 if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
235 verbose))
236 panic("Cannot allocate SWIOTLB buffer");
235 rc = 0; 237 rc = 0;
236 } else 238 } else
237 rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs); 239 rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 071d62c214a6..2de42f9401d2 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -23,7 +23,7 @@ extern int swiotlb_force;
23#define IO_TLB_SHIFT 11 23#define IO_TLB_SHIFT 11
24 24
25extern void swiotlb_init(int verbose); 25extern void swiotlb_init(int verbose);
26extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); 26int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
27extern unsigned long swiotlb_nr_tbl(void); 27extern unsigned long swiotlb_nr_tbl(void);
28extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); 28extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
29 29
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 196b06984dec..bfe02b8fc55b 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -122,11 +122,18 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
122 return phys_to_dma(hwdev, virt_to_phys(address)); 122 return phys_to_dma(hwdev, virt_to_phys(address));
123} 123}
124 124
125static bool no_iotlb_memory;
126
125void swiotlb_print_info(void) 127void swiotlb_print_info(void)
126{ 128{
127 unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT; 129 unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
128 unsigned char *vstart, *vend; 130 unsigned char *vstart, *vend;
129 131
132 if (no_iotlb_memory) {
133 pr_warn("software IO TLB: No low mem\n");
134 return;
135 }
136
130 vstart = phys_to_virt(io_tlb_start); 137 vstart = phys_to_virt(io_tlb_start);
131 vend = phys_to_virt(io_tlb_end); 138 vend = phys_to_virt(io_tlb_end);
132 139
@@ -136,7 +143,7 @@ void swiotlb_print_info(void)
136 bytes >> 20, vstart, vend - 1); 143 bytes >> 20, vstart, vend - 1);
137} 144}
138 145
139void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) 146int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
140{ 147{
141 void *v_overflow_buffer; 148 void *v_overflow_buffer;
142 unsigned long i, bytes; 149 unsigned long i, bytes;
@@ -150,9 +157,10 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
150 /* 157 /*
151 * Get the overflow emergency buffer 158 * Get the overflow emergency buffer
152 */ 159 */
153 v_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow)); 160 v_overflow_buffer = alloc_bootmem_low_pages_nopanic(
161 PAGE_ALIGN(io_tlb_overflow));
154 if (!v_overflow_buffer) 162 if (!v_overflow_buffer)
155 panic("Cannot allocate SWIOTLB overflow buffer!\n"); 163 return -ENOMEM;
156 164
157 io_tlb_overflow_buffer = __pa(v_overflow_buffer); 165 io_tlb_overflow_buffer = __pa(v_overflow_buffer);
158 166
@@ -169,15 +177,19 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
169 177
170 if (verbose) 178 if (verbose)
171 swiotlb_print_info(); 179 swiotlb_print_info();
180
181 return 0;
172} 182}
173 183
174/* 184/*
175 * Statically reserve bounce buffer space and initialize bounce buffer data 185 * Statically reserve bounce buffer space and initialize bounce buffer data
176 * structures for the software IO TLB used to implement the DMA API. 186 * structures for the software IO TLB used to implement the DMA API.
177 */ 187 */
178static void __init 188void __init
179swiotlb_init_with_default_size(size_t default_size, int verbose) 189swiotlb_init(int verbose)
180{ 190{
191 /* default to 64MB */
192 size_t default_size = 64UL<<20;
181 unsigned char *vstart; 193 unsigned char *vstart;
182 unsigned long bytes; 194 unsigned long bytes;
183 195
@@ -188,20 +200,16 @@ swiotlb_init_with_default_size(size_t default_size, int verbose)
188 200
189 bytes = io_tlb_nslabs << IO_TLB_SHIFT; 201 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
190 202
191 /* 203 /* Get IO TLB memory from the low pages */
192 * Get IO TLB memory from the low pages 204 vstart = alloc_bootmem_low_pages_nopanic(PAGE_ALIGN(bytes));
193 */ 205 if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
194 vstart = alloc_bootmem_low_pages(PAGE_ALIGN(bytes)); 206 return;
195 if (!vstart)
196 panic("Cannot allocate SWIOTLB buffer");
197
198 swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose);
199}
200 207
201void __init 208 if (io_tlb_start)
202swiotlb_init(int verbose) 209 free_bootmem(io_tlb_start,
203{ 210 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
204 swiotlb_init_with_default_size(64 * (1<<20), verbose); /* default to 64MB */ 211 pr_warn("Cannot allocate SWIOTLB buffer");
212 no_iotlb_memory = true;
205} 213}
206 214
207/* 215/*
@@ -405,6 +413,9 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
405 unsigned long offset_slots; 413 unsigned long offset_slots;
406 unsigned long max_slots; 414 unsigned long max_slots;
407 415
416 if (no_iotlb_memory)
417 panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
418
408 mask = dma_get_seg_boundary(hwdev); 419 mask = dma_get_seg_boundary(hwdev);
409 420
410 tbl_dma_addr &= mask; 421 tbl_dma_addr &= mask;