aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-21 21:06:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-21 21:06:55 -0500
commit2ef14f465b9e096531343f5b734cffc5f759f4a6 (patch)
tree07b504d7105842a4b1a74cf1e153023a02fb9c1e /lib
parentcb715a836642e0ec69350670d1c2f800f3e2d2e4 (diff)
parent0da3e7f526fde7a6522a3038b7ce609fc50f6707 (diff)
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm changes from Peter Anvin: "This is a huge set of several partly interrelated (and concurrently developed) changes, which is why the branch history is messier than one would like. The *really* big items are two humonguous patchsets mostly developed by Yinghai Lu at my request, which completely revamps the way we create initial page tables. In particular, rather than estimating how much memory we will need for page tables and then build them into that memory -- a calculation that has shown to be incredibly fragile -- we now build them (on 64 bits) with the aid of a "pseudo-linear mode" -- a #PF handler which creates temporary page tables on demand. This has several advantages: 1. It makes it much easier to support things that need access to data very early (a followon patchset uses this to load microcode way early in the kernel startup). 2. It allows the kernel and all the kernel data objects to be invoked from above the 4 GB limit. This allows kdump to work on very large systems. 3. It greatly reduces the difference between Xen and native (Xen's equivalent of the #PF handler are the temporary page tables created by the domain builder), eliminating a bunch of fragile hooks. The patch series also gets us a bit closer to W^X. Additional work in this pull is the 64-bit get_user() work which you were also involved with, and a bunch of cleanups/speedups to __phys_addr()/__pa()." * 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (105 commits) x86, mm: Move reserving low memory later in initialization x86, doc: Clarify the use of asm("%edx") in uaccess.h x86, mm: Redesign get_user with a __builtin_choose_expr hack x86: Be consistent with data size in getuser.S x86, mm: Use a bitfield to mask nuisance get_user() warnings x86/kvm: Fix compile warning in kvm_register_steal_time() x86-32: Add support for 64bit get_user() x86-32, mm: Remove reference to alloc_remap() x86-32, mm: Remove reference to resume_map_numa_kva() x86-32, mm: Rip out x86_32 NUMA remapping code x86/numa: Use __pa_nodebug() instead x86: Don't panic if can not alloc buffer for swiotlb mm: Add alloc_bootmem_low_pages_nopanic() x86, 64bit, mm: hibernate use generic mapping_init x86, 64bit, mm: Mark data/bss/brk to nx x86: Merge early kernel reserve for 32bit and 64bit x86: Add Crash kernel low reservation x86, kdump: Remove crashkernel range find limit for 64bit memblock: Add memblock_mem_size() x86, boot: Not need to check setup_header version for setup_data ...
Diffstat (limited to 'lib')
-rw-r--r--lib/swiotlb.c47
1 files changed, 29 insertions, 18 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 196b06984dec..bfe02b8fc55b 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -122,11 +122,18 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
122 return phys_to_dma(hwdev, virt_to_phys(address)); 122 return phys_to_dma(hwdev, virt_to_phys(address));
123} 123}
124 124
125static bool no_iotlb_memory;
126
125void swiotlb_print_info(void) 127void swiotlb_print_info(void)
126{ 128{
127 unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT; 129 unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
128 unsigned char *vstart, *vend; 130 unsigned char *vstart, *vend;
129 131
132 if (no_iotlb_memory) {
133 pr_warn("software IO TLB: No low mem\n");
134 return;
135 }
136
130 vstart = phys_to_virt(io_tlb_start); 137 vstart = phys_to_virt(io_tlb_start);
131 vend = phys_to_virt(io_tlb_end); 138 vend = phys_to_virt(io_tlb_end);
132 139
@@ -136,7 +143,7 @@ void swiotlb_print_info(void)
136 bytes >> 20, vstart, vend - 1); 143 bytes >> 20, vstart, vend - 1);
137} 144}
138 145
139void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) 146int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
140{ 147{
141 void *v_overflow_buffer; 148 void *v_overflow_buffer;
142 unsigned long i, bytes; 149 unsigned long i, bytes;
@@ -150,9 +157,10 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
150 /* 157 /*
151 * Get the overflow emergency buffer 158 * Get the overflow emergency buffer
152 */ 159 */
153 v_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow)); 160 v_overflow_buffer = alloc_bootmem_low_pages_nopanic(
161 PAGE_ALIGN(io_tlb_overflow));
154 if (!v_overflow_buffer) 162 if (!v_overflow_buffer)
155 panic("Cannot allocate SWIOTLB overflow buffer!\n"); 163 return -ENOMEM;
156 164
157 io_tlb_overflow_buffer = __pa(v_overflow_buffer); 165 io_tlb_overflow_buffer = __pa(v_overflow_buffer);
158 166
@@ -169,15 +177,19 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
169 177
170 if (verbose) 178 if (verbose)
171 swiotlb_print_info(); 179 swiotlb_print_info();
180
181 return 0;
172} 182}
173 183
174/* 184/*
175 * Statically reserve bounce buffer space and initialize bounce buffer data 185 * Statically reserve bounce buffer space and initialize bounce buffer data
176 * structures for the software IO TLB used to implement the DMA API. 186 * structures for the software IO TLB used to implement the DMA API.
177 */ 187 */
178static void __init 188void __init
179swiotlb_init_with_default_size(size_t default_size, int verbose) 189swiotlb_init(int verbose)
180{ 190{
191 /* default to 64MB */
192 size_t default_size = 64UL<<20;
181 unsigned char *vstart; 193 unsigned char *vstart;
182 unsigned long bytes; 194 unsigned long bytes;
183 195
@@ -188,20 +200,16 @@ swiotlb_init_with_default_size(size_t default_size, int verbose)
188 200
189 bytes = io_tlb_nslabs << IO_TLB_SHIFT; 201 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
190 202
191 /* 203 /* Get IO TLB memory from the low pages */
192 * Get IO TLB memory from the low pages 204 vstart = alloc_bootmem_low_pages_nopanic(PAGE_ALIGN(bytes));
193 */ 205 if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
194 vstart = alloc_bootmem_low_pages(PAGE_ALIGN(bytes)); 206 return;
195 if (!vstart)
196 panic("Cannot allocate SWIOTLB buffer");
197
198 swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose);
199}
200 207
201void __init 208 if (io_tlb_start)
202swiotlb_init(int verbose) 209 free_bootmem(io_tlb_start,
203{ 210 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
204 swiotlb_init_with_default_size(64 * (1<<20), verbose); /* default to 64MB */ 211 pr_warn("Cannot allocate SWIOTLB buffer");
212 no_iotlb_memory = true;
205} 213}
206 214
207/* 215/*
@@ -405,6 +413,9 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
405 unsigned long offset_slots; 413 unsigned long offset_slots;
406 unsigned long max_slots; 414 unsigned long max_slots;
407 415
416 if (no_iotlb_memory)
417 panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
418
408 mask = dma_get_seg_boundary(hwdev); 419 mask = dma_get_seg_boundary(hwdev);
409 420
410 tbl_dma_addr &= mask; 421 tbl_dma_addr &= mask;