aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug27
-rw-r--r--lib/atomic64_test.c2
-rw-r--r--lib/devres.c2
-rw-r--r--lib/random32.c2
-rw-r--r--lib/swiotlb.c137
-rw-r--r--lib/vsprintf.c9
6 files changed, 116 insertions, 63 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e2cd7fbf31c0..ff87ddc4cbd5 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -312,6 +312,12 @@ config DEBUG_OBJECTS_WORK
312 work queue routines to track the life time of work objects and 312 work queue routines to track the life time of work objects and
313 validate the work operations. 313 validate the work operations.
314 314
315config DEBUG_OBJECTS_RCU_HEAD
316 bool "Debug RCU callbacks objects"
317 depends on DEBUG_OBJECTS && PREEMPT
318 help
319 Enable this to turn on debugging of RCU list heads (call_rcu() usage).
320
315config DEBUG_OBJECTS_ENABLE_DEFAULT 321config DEBUG_OBJECTS_ENABLE_DEFAULT
316 int "debug_objects bootup default value (0-1)" 322 int "debug_objects bootup default value (0-1)"
317 range 0 1 323 range 0 1
@@ -533,7 +539,7 @@ config LOCKDEP
533 bool 539 bool
534 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 540 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
535 select STACKTRACE 541 select STACKTRACE
536 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 542 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE
537 select KALLSYMS 543 select KALLSYMS
538 select KALLSYMS_ALL 544 select KALLSYMS_ALL
539 545
@@ -633,6 +639,19 @@ config DEBUG_INFO
633 639
634 If unsure, say N. 640 If unsure, say N.
635 641
642config DEBUG_INFO_REDUCED
643 bool "Reduce debugging information"
644 depends on DEBUG_INFO
645 help
646 If you say Y here gcc is instructed to generate less debugging
647 information for structure types. This means that tools that
648 need full debugging information (like kgdb or systemtap) won't
649 be happy. But if you merely need debugging information to
650 resolve line numbers there is no loss. Advantage is that
651 build directory object sizes shrink dramatically over a full
652 DEBUG_INFO build and compile times are reduced too.
653 Only works with newer gcc versions.
654
636config DEBUG_VM 655config DEBUG_VM
637 bool "Debug VM" 656 bool "Debug VM"
638 depends on DEBUG_KERNEL 657 depends on DEBUG_KERNEL
@@ -942,7 +961,7 @@ config FAIL_MAKE_REQUEST
942 Provide fault-injection capability for disk IO. 961 Provide fault-injection capability for disk IO.
943 962
944config FAIL_IO_TIMEOUT 963config FAIL_IO_TIMEOUT
945 bool "Faul-injection capability for faking disk interrupts" 964 bool "Fault-injection capability for faking disk interrupts"
946 depends on FAULT_INJECTION && BLOCK 965 depends on FAULT_INJECTION && BLOCK
947 help 966 help
948 Provide fault-injection capability on end IO handling. This 967 Provide fault-injection capability on end IO handling. This
@@ -963,13 +982,13 @@ config FAULT_INJECTION_STACKTRACE_FILTER
963 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT 982 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
964 depends on !X86_64 983 depends on !X86_64
965 select STACKTRACE 984 select STACKTRACE
966 select FRAME_POINTER if !PPC && !S390 985 select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE
967 help 986 help
968 Provide stacktrace filter for fault-injection capabilities 987 Provide stacktrace filter for fault-injection capabilities
969 988
970config LATENCYTOP 989config LATENCYTOP
971 bool "Latency measuring infrastructure" 990 bool "Latency measuring infrastructure"
972 select FRAME_POINTER if !MIPS && !PPC && !S390 991 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
973 select KALLSYMS 992 select KALLSYMS
974 select KALLSYMS_ALL 993 select KALLSYMS_ALL
975 select STACKTRACE 994 select STACKTRACE
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 250ed11d3ed2..44524cc8c32a 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -114,7 +114,7 @@ static __init int test_atomic64(void)
114 BUG_ON(v.counter != r); 114 BUG_ON(v.counter != r);
115 115
116#if defined(CONFIG_X86) || defined(CONFIG_MIPS) || defined(CONFIG_PPC) || \ 116#if defined(CONFIG_X86) || defined(CONFIG_MIPS) || defined(CONFIG_PPC) || \
117 defined(CONFIG_S390) || defined(_ASM_GENERIC_ATOMIC64_H) 117 defined(CONFIG_S390) || defined(_ASM_GENERIC_ATOMIC64_H) || defined(CONFIG_ARM)
118 INIT(onestwos); 118 INIT(onestwos);
119 BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1)); 119 BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1));
120 r -= one; 120 r -= one;
diff --git a/lib/devres.c b/lib/devres.c
index 49368608f988..6efddf53b90c 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -328,7 +328,7 @@ EXPORT_SYMBOL(pcim_iomap_regions_request_all);
328 * @pdev: PCI device to map IO resources for 328 * @pdev: PCI device to map IO resources for
329 * @mask: Mask of BARs to unmap and release 329 * @mask: Mask of BARs to unmap and release
330 * 330 *
331 * Unamp and release regions specified by @mask. 331 * Unmap and release regions specified by @mask.
332 */ 332 */
333void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask) 333void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask)
334{ 334{
diff --git a/lib/random32.c b/lib/random32.c
index 870dc3fc0f0f..fc3545a32771 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -127,7 +127,7 @@ core_initcall(random32_init);
127 127
128/* 128/*
129 * Generate better values after random number generator 129 * Generate better values after random number generator
130 * is fully initalized. 130 * is fully initialized.
131 */ 131 */
132static int __init random32_reseed(void) 132static int __init random32_reseed(void)
133{ 133{
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index a009055140ec..34e3082632d8 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -50,19 +50,11 @@
50 */ 50 */
51#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 51#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
52 52
53/*
54 * Enumeration for sync targets
55 */
56enum dma_sync_target {
57 SYNC_FOR_CPU = 0,
58 SYNC_FOR_DEVICE = 1,
59};
60
61int swiotlb_force; 53int swiotlb_force;
62 54
63/* 55/*
64 * Used to do a quick range check in unmap_single and 56 * Used to do a quick range check in swiotlb_tbl_unmap_single and
65 * sync_single_*, to see if the memory was in fact allocated by this 57 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
66 * API. 58 * API.
67 */ 59 */
68static char *io_tlb_start, *io_tlb_end; 60static char *io_tlb_start, *io_tlb_end;
@@ -140,28 +132,14 @@ void swiotlb_print_info(void)
140 (unsigned long long)pend); 132 (unsigned long long)pend);
141} 133}
142 134
143/* 135void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
144 * Statically reserve bounce buffer space and initialize bounce buffer data
145 * structures for the software IO TLB used to implement the DMA API.
146 */
147void __init
148swiotlb_init_with_default_size(size_t default_size, int verbose)
149{ 136{
150 unsigned long i, bytes; 137 unsigned long i, bytes;
151 138
152 if (!io_tlb_nslabs) { 139 bytes = nslabs << IO_TLB_SHIFT;
153 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
154 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
155 }
156
157 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
158 140
159 /* 141 io_tlb_nslabs = nslabs;
160 * Get IO TLB memory from the low pages 142 io_tlb_start = tlb;
161 */
162 io_tlb_start = alloc_bootmem_low_pages(bytes);
163 if (!io_tlb_start)
164 panic("Cannot allocate SWIOTLB buffer");
165 io_tlb_end = io_tlb_start + bytes; 143 io_tlb_end = io_tlb_start + bytes;
166 144
167 /* 145 /*
@@ -185,6 +163,32 @@ swiotlb_init_with_default_size(size_t default_size, int verbose)
185 swiotlb_print_info(); 163 swiotlb_print_info();
186} 164}
187 165
166/*
167 * Statically reserve bounce buffer space and initialize bounce buffer data
168 * structures for the software IO TLB used to implement the DMA API.
169 */
170void __init
171swiotlb_init_with_default_size(size_t default_size, int verbose)
172{
173 unsigned long bytes;
174
175 if (!io_tlb_nslabs) {
176 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
177 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
178 }
179
180 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
181
182 /*
183 * Get IO TLB memory from the low pages
184 */
185 io_tlb_start = alloc_bootmem_low_pages(bytes);
186 if (!io_tlb_start)
187 panic("Cannot allocate SWIOTLB buffer");
188
189 swiotlb_init_with_tbl(io_tlb_start, io_tlb_nslabs, verbose);
190}
191
188void __init 192void __init
189swiotlb_init(int verbose) 193swiotlb_init(int verbose)
190{ 194{
@@ -323,8 +327,8 @@ static int is_swiotlb_buffer(phys_addr_t paddr)
323/* 327/*
324 * Bounce: copy the swiotlb buffer back to the original dma location 328 * Bounce: copy the swiotlb buffer back to the original dma location
325 */ 329 */
326static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, 330void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
327 enum dma_data_direction dir) 331 enum dma_data_direction dir)
328{ 332{
329 unsigned long pfn = PFN_DOWN(phys); 333 unsigned long pfn = PFN_DOWN(phys);
330 334
@@ -360,26 +364,25 @@ static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
360 memcpy(phys_to_virt(phys), dma_addr, size); 364 memcpy(phys_to_virt(phys), dma_addr, size);
361 } 365 }
362} 366}
367EXPORT_SYMBOL_GPL(swiotlb_bounce);
363 368
364/* 369void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
365 * Allocates bounce buffer and returns its kernel virtual address. 370 phys_addr_t phys, size_t size,
366 */ 371 enum dma_data_direction dir)
367static void *
368map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
369{ 372{
370 unsigned long flags; 373 unsigned long flags;
371 char *dma_addr; 374 char *dma_addr;
372 unsigned int nslots, stride, index, wrap; 375 unsigned int nslots, stride, index, wrap;
373 int i; 376 int i;
374 unsigned long start_dma_addr;
375 unsigned long mask; 377 unsigned long mask;
376 unsigned long offset_slots; 378 unsigned long offset_slots;
377 unsigned long max_slots; 379 unsigned long max_slots;
378 380
379 mask = dma_get_seg_boundary(hwdev); 381 mask = dma_get_seg_boundary(hwdev);
380 start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
381 382
382 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 383 tbl_dma_addr &= mask;
384
385 offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
383 386
384 /* 387 /*
385 * Carefully handle integer overflow which can occur when mask == ~0UL. 388 * Carefully handle integer overflow which can occur when mask == ~0UL.
@@ -466,12 +469,27 @@ found:
466 469
467 return dma_addr; 470 return dma_addr;
468} 471}
472EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
473
474/*
475 * Allocates bounce buffer and returns its kernel virtual address.
476 */
477
478static void *
479map_single(struct device *hwdev, phys_addr_t phys, size_t size,
480 enum dma_data_direction dir)
481{
482 dma_addr_t start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start);
483
484 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
485}
469 486
470/* 487/*
471 * dma_addr is the kernel virtual address of the bounce buffer to unmap. 488 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
472 */ 489 */
473static void 490void
474do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) 491swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size,
492 enum dma_data_direction dir)
475{ 493{
476 unsigned long flags; 494 unsigned long flags;
477 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 495 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
@@ -509,10 +527,12 @@ do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
509 } 527 }
510 spin_unlock_irqrestore(&io_tlb_lock, flags); 528 spin_unlock_irqrestore(&io_tlb_lock, flags);
511} 529}
530EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
512 531
513static void 532void
514sync_single(struct device *hwdev, char *dma_addr, size_t size, 533swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size,
515 int dir, int target) 534 enum dma_data_direction dir,
535 enum dma_sync_target target)
516{ 536{
517 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 537 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
518 phys_addr_t phys = io_tlb_orig_addr[index]; 538 phys_addr_t phys = io_tlb_orig_addr[index];
@@ -536,6 +556,7 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
536 BUG(); 556 BUG();
537 } 557 }
538} 558}
559EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single);
539 560
540void * 561void *
541swiotlb_alloc_coherent(struct device *hwdev, size_t size, 562swiotlb_alloc_coherent(struct device *hwdev, size_t size,
@@ -559,8 +580,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
559 } 580 }
560 if (!ret) { 581 if (!ret) {
561 /* 582 /*
562 * We are either out of memory or the device can't DMA 583 * We are either out of memory or the device can't DMA to
563 * to GFP_DMA memory; fall back on map_single(), which 584 * GFP_DMA memory; fall back on map_single(), which
564 * will grab memory from the lowest available address range. 585 * will grab memory from the lowest available address range.
565 */ 586 */
566 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); 587 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
@@ -578,7 +599,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
578 (unsigned long long)dev_addr); 599 (unsigned long long)dev_addr);
579 600
580 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 601 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
581 do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); 602 swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
582 return NULL; 603 return NULL;
583 } 604 }
584 *dma_handle = dev_addr; 605 *dma_handle = dev_addr;
@@ -596,13 +617,14 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
596 if (!is_swiotlb_buffer(paddr)) 617 if (!is_swiotlb_buffer(paddr))
597 free_pages((unsigned long)vaddr, get_order(size)); 618 free_pages((unsigned long)vaddr, get_order(size));
598 else 619 else
599 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 620 /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */
600 do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); 621 swiotlb_tbl_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
601} 622}
602EXPORT_SYMBOL(swiotlb_free_coherent); 623EXPORT_SYMBOL(swiotlb_free_coherent);
603 624
604static void 625static void
605swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) 626swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
627 int do_panic)
606{ 628{
607 /* 629 /*
608 * Ran out of IOMMU space for this operation. This is very bad. 630 * Ran out of IOMMU space for this operation. This is very bad.
@@ -680,14 +702,14 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
680 * whatever the device wrote there. 702 * whatever the device wrote there.
681 */ 703 */
682static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, 704static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
683 size_t size, int dir) 705 size_t size, enum dma_data_direction dir)
684{ 706{
685 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 707 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
686 708
687 BUG_ON(dir == DMA_NONE); 709 BUG_ON(dir == DMA_NONE);
688 710
689 if (is_swiotlb_buffer(paddr)) { 711 if (is_swiotlb_buffer(paddr)) {
690 do_unmap_single(hwdev, phys_to_virt(paddr), size, dir); 712 swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
691 return; 713 return;
692 } 714 }
693 715
@@ -723,14 +745,16 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
723 */ 745 */
724static void 746static void
725swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 747swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
726 size_t size, int dir, int target) 748 size_t size, enum dma_data_direction dir,
749 enum dma_sync_target target)
727{ 750{
728 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 751 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
729 752
730 BUG_ON(dir == DMA_NONE); 753 BUG_ON(dir == DMA_NONE);
731 754
732 if (is_swiotlb_buffer(paddr)) { 755 if (is_swiotlb_buffer(paddr)) {
733 sync_single(hwdev, phys_to_virt(paddr), size, dir, target); 756 swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir,
757 target);
734 return; 758 return;
735 } 759 }
736 760
@@ -809,7 +833,7 @@ EXPORT_SYMBOL(swiotlb_map_sg_attrs);
809 833
810int 834int
811swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 835swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
812 int dir) 836 enum dma_data_direction dir)
813{ 837{
814 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); 838 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
815} 839}
@@ -836,7 +860,7 @@ EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
836 860
837void 861void
838swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 862swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
839 int dir) 863 enum dma_data_direction dir)
840{ 864{
841 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); 865 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
842} 866}
@@ -851,7 +875,8 @@ EXPORT_SYMBOL(swiotlb_unmap_sg);
851 */ 875 */
852static void 876static void
853swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, 877swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
854 int nelems, int dir, int target) 878 int nelems, enum dma_data_direction dir,
879 enum dma_sync_target target)
855{ 880{
856 struct scatterlist *sg; 881 struct scatterlist *sg;
857 int i; 882 int i;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index b8a2f549ab0e..4ee19d0d3910 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -980,6 +980,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
980 * [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15] 980 * [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15]
981 * little endian output byte order is: 981 * little endian output byte order is:
982 * [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15] 982 * [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15]
983 * - 'V' For a struct va_format which contains a format string * and va_list *,
984 * call vsnprintf(->format, *->va_list).
985 * Implements a "recursive vsnprintf".
986 * Do not use this feature without some mechanism to verify the
987 * correctness of the format string and va_list arguments.
983 * 988 *
984 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 989 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
985 * function pointers are really function descriptors, which contain a 990 * function pointers are really function descriptors, which contain a
@@ -1025,6 +1030,10 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
1025 break; 1030 break;
1026 case 'U': 1031 case 'U':
1027 return uuid_string(buf, end, ptr, spec, fmt); 1032 return uuid_string(buf, end, ptr, spec, fmt);
1033 case 'V':
1034 return buf + vsnprintf(buf, end - buf,
1035 ((struct va_format *)ptr)->fmt,
1036 *(((struct va_format *)ptr)->va));
1028 } 1037 }
1029 spec.flags |= SMALL; 1038 spec.flags |= SMALL;
1030 if (spec.field_width == -1) { 1039 if (spec.field_width == -1) {