diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 3 | ||||
-rw-r--r-- | lib/Kconfig.debug | 63 | ||||
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/atomic64_test.c | 2 | ||||
-rw-r--r-- | lib/devres.c | 2 | ||||
-rw-r--r-- | lib/flex_array.c | 25 | ||||
-rw-r--r-- | lib/iommu-helper.c | 9 | ||||
-rw-r--r-- | lib/ioremap.c | 10 | ||||
-rw-r--r-- | lib/list_debug.c | 6 | ||||
-rw-r--r-- | lib/lmb.c | 541 | ||||
-rw-r--r-- | lib/percpu_counter.c | 27 | ||||
-rw-r--r-- | lib/radix-tree.c | 94 | ||||
-rw-r--r-- | lib/random32.c | 2 | ||||
-rw-r--r-- | lib/rbtree.c | 116 | ||||
-rw-r--r-- | lib/rwsem.c | 150 | ||||
-rw-r--r-- | lib/swiotlb.c | 137 | ||||
-rw-r--r-- | lib/vsprintf.c | 23 |
17 files changed, 456 insertions, 756 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 170d8ca901d8..5b916bc0fbae 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -181,9 +181,6 @@ config HAS_DMA | |||
181 | config CHECK_SIGNATURE | 181 | config CHECK_SIGNATURE |
182 | bool | 182 | bool |
183 | 183 | ||
184 | config HAVE_LMB | ||
185 | boolean | ||
186 | |||
187 | config CPUMASK_OFFSTACK | 184 | config CPUMASK_OFFSTACK |
188 | bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS | 185 | bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS |
189 | help | 186 | help |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 95ab402db9c0..9e06b7f5ecf1 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -76,7 +76,6 @@ config UNUSED_SYMBOLS | |||
76 | 76 | ||
77 | config DEBUG_FS | 77 | config DEBUG_FS |
78 | bool "Debug Filesystem" | 78 | bool "Debug Filesystem" |
79 | depends on SYSFS | ||
80 | help | 79 | help |
81 | debugfs is a virtual file system that kernel developers use to put | 80 | debugfs is a virtual file system that kernel developers use to put |
82 | debugging files into. Enable this option to be able to read and | 81 | debugging files into. Enable this option to be able to read and |
@@ -152,28 +151,33 @@ config DEBUG_SHIRQ | |||
152 | Drivers ought to be able to handle interrupts coming in at those | 151 | Drivers ought to be able to handle interrupts coming in at those |
153 | points; some don't and need to be caught. | 152 | points; some don't and need to be caught. |
154 | 153 | ||
155 | config DETECT_SOFTLOCKUP | 154 | config LOCKUP_DETECTOR |
156 | bool "Detect Soft Lockups" | 155 | bool "Detect Hard and Soft Lockups" |
157 | depends on DEBUG_KERNEL && !S390 | 156 | depends on DEBUG_KERNEL && !S390 |
158 | default y | ||
159 | help | 157 | help |
160 | Say Y here to enable the kernel to detect "soft lockups", | 158 | Say Y here to enable the kernel to act as a watchdog to detect |
161 | which are bugs that cause the kernel to loop in kernel | 159 | hard and soft lockups. |
160 | |||
161 | Softlockups are bugs that cause the kernel to loop in kernel | ||
162 | mode for more than 60 seconds, without giving other tasks a | 162 | mode for more than 60 seconds, without giving other tasks a |
163 | chance to run. | 163 | chance to run. The current stack trace is displayed upon |
164 | detection and the system will stay locked up. | ||
164 | 165 | ||
165 | When a soft-lockup is detected, the kernel will print the | 166 | Hardlockups are bugs that cause the CPU to loop in kernel mode |
166 | current stack trace (which you should report), but the | 167 | for more than 60 seconds, without letting other interrupts have a |
167 | system will stay locked up. This feature has negligible | 168 | chance to run. The current stack trace is displayed upon detection |
168 | overhead. | 169 | and the system will stay locked up. |
169 | 170 | ||
170 | (Note that "hard lockups" are separate type of bugs that | 171 | The overhead should be minimal. A periodic hrtimer runs to |
171 | can be detected via the NMI-watchdog, on platforms that | 172 | generate interrupts and kick the watchdog task every 10-12 seconds. |
172 | support it.) | 173 | An NMI is generated every 60 seconds or so to check for hardlockups. |
174 | |||
175 | config HARDLOCKUP_DETECTOR | ||
176 | def_bool LOCKUP_DETECTOR && PERF_EVENTS && HAVE_PERF_EVENTS_NMI | ||
173 | 177 | ||
174 | config BOOTPARAM_SOFTLOCKUP_PANIC | 178 | config BOOTPARAM_SOFTLOCKUP_PANIC |
175 | bool "Panic (Reboot) On Soft Lockups" | 179 | bool "Panic (Reboot) On Soft Lockups" |
176 | depends on DETECT_SOFTLOCKUP | 180 | depends on LOCKUP_DETECTOR |
177 | help | 181 | help |
178 | Say Y here to enable the kernel to panic on "soft lockups", | 182 | Say Y here to enable the kernel to panic on "soft lockups", |
179 | which are bugs that cause the kernel to loop in kernel | 183 | which are bugs that cause the kernel to loop in kernel |
@@ -190,7 +194,7 @@ config BOOTPARAM_SOFTLOCKUP_PANIC | |||
190 | 194 | ||
191 | config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE | 195 | config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE |
192 | int | 196 | int |
193 | depends on DETECT_SOFTLOCKUP | 197 | depends on LOCKUP_DETECTOR |
194 | range 0 1 | 198 | range 0 1 |
195 | default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC | 199 | default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC |
196 | default 1 if BOOTPARAM_SOFTLOCKUP_PANIC | 200 | default 1 if BOOTPARAM_SOFTLOCKUP_PANIC |
@@ -307,6 +311,12 @@ config DEBUG_OBJECTS_WORK | |||
307 | work queue routines to track the life time of work objects and | 311 | work queue routines to track the life time of work objects and |
308 | validate the work operations. | 312 | validate the work operations. |
309 | 313 | ||
314 | config DEBUG_OBJECTS_RCU_HEAD | ||
315 | bool "Debug RCU callbacks objects" | ||
316 | depends on DEBUG_OBJECTS && PREEMPT | ||
317 | help | ||
318 | Enable this to turn on debugging of RCU list heads (call_rcu() usage). | ||
319 | |||
310 | config DEBUG_OBJECTS_ENABLE_DEFAULT | 320 | config DEBUG_OBJECTS_ENABLE_DEFAULT |
311 | int "debug_objects bootup default value (0-1)" | 321 | int "debug_objects bootup default value (0-1)" |
312 | range 0 1 | 322 | range 0 1 |
@@ -535,7 +545,7 @@ config LOCKDEP | |||
535 | bool | 545 | bool |
536 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 546 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
537 | select STACKTRACE | 547 | select STACKTRACE |
538 | select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 | 548 | select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE |
539 | select KALLSYMS | 549 | select KALLSYMS |
540 | select KALLSYMS_ALL | 550 | select KALLSYMS_ALL |
541 | 551 | ||
@@ -635,6 +645,19 @@ config DEBUG_INFO | |||
635 | 645 | ||
636 | If unsure, say N. | 646 | If unsure, say N. |
637 | 647 | ||
648 | config DEBUG_INFO_REDUCED | ||
649 | bool "Reduce debugging information" | ||
650 | depends on DEBUG_INFO | ||
651 | help | ||
652 | If you say Y here gcc is instructed to generate less debugging | ||
653 | information for structure types. This means that tools that | ||
654 | need full debugging information (like kgdb or systemtap) won't | ||
655 | be happy. But if you merely need debugging information to | ||
656 | resolve line numbers there is no loss. Advantage is that | ||
657 | build directory object sizes shrink dramatically over a full | ||
658 | DEBUG_INFO build and compile times are reduced too. | ||
659 | Only works with newer gcc versions. | ||
660 | |||
638 | config DEBUG_VM | 661 | config DEBUG_VM |
639 | bool "Debug VM" | 662 | bool "Debug VM" |
640 | depends on DEBUG_KERNEL | 663 | depends on DEBUG_KERNEL |
@@ -944,7 +967,7 @@ config FAIL_MAKE_REQUEST | |||
944 | Provide fault-injection capability for disk IO. | 967 | Provide fault-injection capability for disk IO. |
945 | 968 | ||
946 | config FAIL_IO_TIMEOUT | 969 | config FAIL_IO_TIMEOUT |
947 | bool "Faul-injection capability for faking disk interrupts" | 970 | bool "Fault-injection capability for faking disk interrupts" |
948 | depends on FAULT_INJECTION && BLOCK | 971 | depends on FAULT_INJECTION && BLOCK |
949 | help | 972 | help |
950 | Provide fault-injection capability on end IO handling. This | 973 | Provide fault-injection capability on end IO handling. This |
@@ -965,13 +988,13 @@ config FAULT_INJECTION_STACKTRACE_FILTER | |||
965 | depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT | 988 | depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT |
966 | depends on !X86_64 | 989 | depends on !X86_64 |
967 | select STACKTRACE | 990 | select STACKTRACE |
968 | select FRAME_POINTER if !PPC && !S390 | 991 | select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE |
969 | help | 992 | help |
970 | Provide stacktrace filter for fault-injection capabilities | 993 | Provide stacktrace filter for fault-injection capabilities |
971 | 994 | ||
972 | config LATENCYTOP | 995 | config LATENCYTOP |
973 | bool "Latency measuring infrastructure" | 996 | bool "Latency measuring infrastructure" |
974 | select FRAME_POINTER if !MIPS && !PPC && !S390 | 997 | select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE |
975 | select KALLSYMS | 998 | select KALLSYMS |
976 | select KALLSYMS_ALL | 999 | select KALLSYMS_ALL |
977 | select STACKTRACE | 1000 | select STACKTRACE |
diff --git a/lib/Makefile b/lib/Makefile index 3f1062cbbff4..0bfabba1bb32 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -89,8 +89,6 @@ obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o | |||
89 | 89 | ||
90 | lib-$(CONFIG_GENERIC_BUG) += bug.o | 90 | lib-$(CONFIG_GENERIC_BUG) += bug.o |
91 | 91 | ||
92 | obj-$(CONFIG_HAVE_LMB) += lmb.o | ||
93 | |||
94 | obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o | 92 | obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o |
95 | 93 | ||
96 | obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o | 94 | obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o |
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c index 250ed11d3ed2..44524cc8c32a 100644 --- a/lib/atomic64_test.c +++ b/lib/atomic64_test.c | |||
@@ -114,7 +114,7 @@ static __init int test_atomic64(void) | |||
114 | BUG_ON(v.counter != r); | 114 | BUG_ON(v.counter != r); |
115 | 115 | ||
116 | #if defined(CONFIG_X86) || defined(CONFIG_MIPS) || defined(CONFIG_PPC) || \ | 116 | #if defined(CONFIG_X86) || defined(CONFIG_MIPS) || defined(CONFIG_PPC) || \ |
117 | defined(CONFIG_S390) || defined(_ASM_GENERIC_ATOMIC64_H) | 117 | defined(CONFIG_S390) || defined(_ASM_GENERIC_ATOMIC64_H) || defined(CONFIG_ARM) |
118 | INIT(onestwos); | 118 | INIT(onestwos); |
119 | BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1)); | 119 | BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1)); |
120 | r -= one; | 120 | r -= one; |
diff --git a/lib/devres.c b/lib/devres.c index 49368608f988..6efddf53b90c 100644 --- a/lib/devres.c +++ b/lib/devres.c | |||
@@ -328,7 +328,7 @@ EXPORT_SYMBOL(pcim_iomap_regions_request_all); | |||
328 | * @pdev: PCI device to map IO resources for | 328 | * @pdev: PCI device to map IO resources for |
329 | * @mask: Mask of BARs to unmap and release | 329 | * @mask: Mask of BARs to unmap and release |
330 | * | 330 | * |
331 | * Unamp and release regions specified by @mask. | 331 | * Unmap and release regions specified by @mask. |
332 | */ | 332 | */ |
333 | void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask) | 333 | void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask) |
334 | { | 334 | { |
diff --git a/lib/flex_array.c b/lib/flex_array.c index 41b1804fa728..77a6fea7481e 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c | |||
@@ -171,6 +171,8 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags) | |||
171 | * Note that this *copies* the contents of @src into | 171 | * Note that this *copies* the contents of @src into |
172 | * the array. If you are trying to store an array of | 172 | * the array. If you are trying to store an array of |
173 | * pointers, make sure to pass in &ptr instead of ptr. | 173 | * pointers, make sure to pass in &ptr instead of ptr. |
174 | * You may instead wish to use the flex_array_put_ptr() | ||
175 | * helper function. | ||
174 | * | 176 | * |
175 | * Locking must be provided by the caller. | 177 | * Locking must be provided by the caller. |
176 | */ | 178 | */ |
@@ -265,7 +267,8 @@ int flex_array_prealloc(struct flex_array *fa, unsigned int start, | |||
265 | * | 267 | * |
266 | * Returns a pointer to the data at index @element_nr. Note | 268 | * Returns a pointer to the data at index @element_nr. Note |
267 | * that this is a copy of the data that was passed in. If you | 269 | * that this is a copy of the data that was passed in. If you |
268 | * are using this to store pointers, you'll get back &ptr. | 270 | * are using this to store pointers, you'll get back &ptr. You |
271 | * may instead wish to use the flex_array_get_ptr helper. | ||
269 | * | 272 | * |
270 | * Locking must be provided by the caller. | 273 | * Locking must be provided by the caller. |
271 | */ | 274 | */ |
@@ -286,6 +289,26 @@ void *flex_array_get(struct flex_array *fa, unsigned int element_nr) | |||
286 | return &part->elements[index_inside_part(fa, element_nr)]; | 289 | return &part->elements[index_inside_part(fa, element_nr)]; |
287 | } | 290 | } |
288 | 291 | ||
292 | /** | ||
293 | * flex_array_get_ptr - pull a ptr back out of the array | ||
294 | * @fa: the flex array from which to extract data | ||
295 | * @element_nr: index of the element to fetch from the array | ||
296 | * | ||
297 | * Returns the pointer placed in the flex array at element_nr using | ||
298 | * flex_array_put_ptr(). This function should not be called if the | ||
299 | * element in question was not set using the _put_ptr() helper. | ||
300 | */ | ||
301 | void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr) | ||
302 | { | ||
303 | void **tmp; | ||
304 | |||
305 | tmp = flex_array_get(fa, element_nr); | ||
306 | if (!tmp) | ||
307 | return NULL; | ||
308 | |||
309 | return *tmp; | ||
310 | } | ||
311 | |||
289 | static int part_is_free(struct flex_array_part *part) | 312 | static int part_is_free(struct flex_array_part *part) |
290 | { | 313 | { |
291 | int i; | 314 | int i; |
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c index c0251f4ad08b..da053313ee5c 100644 --- a/lib/iommu-helper.c +++ b/lib/iommu-helper.c | |||
@@ -38,12 +38,3 @@ again: | |||
38 | return -1; | 38 | return -1; |
39 | } | 39 | } |
40 | EXPORT_SYMBOL(iommu_area_alloc); | 40 | EXPORT_SYMBOL(iommu_area_alloc); |
41 | |||
42 | unsigned long iommu_num_pages(unsigned long addr, unsigned long len, | ||
43 | unsigned long io_page_size) | ||
44 | { | ||
45 | unsigned long size = (addr & (io_page_size - 1)) + len; | ||
46 | |||
47 | return DIV_ROUND_UP(size, io_page_size); | ||
48 | } | ||
49 | EXPORT_SYMBOL(iommu_num_pages); | ||
diff --git a/lib/ioremap.c b/lib/ioremap.c index 14c6078f17a2..5730ecd3eb66 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c | |||
@@ -13,10 +13,10 @@ | |||
13 | #include <asm/pgtable.h> | 13 | #include <asm/pgtable.h> |
14 | 14 | ||
15 | static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, | 15 | static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, |
16 | unsigned long end, unsigned long phys_addr, pgprot_t prot) | 16 | unsigned long end, phys_addr_t phys_addr, pgprot_t prot) |
17 | { | 17 | { |
18 | pte_t *pte; | 18 | pte_t *pte; |
19 | unsigned long pfn; | 19 | u64 pfn; |
20 | 20 | ||
21 | pfn = phys_addr >> PAGE_SHIFT; | 21 | pfn = phys_addr >> PAGE_SHIFT; |
22 | pte = pte_alloc_kernel(pmd, addr); | 22 | pte = pte_alloc_kernel(pmd, addr); |
@@ -31,7 +31,7 @@ static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, | |||
31 | } | 31 | } |
32 | 32 | ||
33 | static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, | 33 | static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, |
34 | unsigned long end, unsigned long phys_addr, pgprot_t prot) | 34 | unsigned long end, phys_addr_t phys_addr, pgprot_t prot) |
35 | { | 35 | { |
36 | pmd_t *pmd; | 36 | pmd_t *pmd; |
37 | unsigned long next; | 37 | unsigned long next; |
@@ -49,7 +49,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, | |||
49 | } | 49 | } |
50 | 50 | ||
51 | static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, | 51 | static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, |
52 | unsigned long end, unsigned long phys_addr, pgprot_t prot) | 52 | unsigned long end, phys_addr_t phys_addr, pgprot_t prot) |
53 | { | 53 | { |
54 | pud_t *pud; | 54 | pud_t *pud; |
55 | unsigned long next; | 55 | unsigned long next; |
@@ -67,7 +67,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, | |||
67 | } | 67 | } |
68 | 68 | ||
69 | int ioremap_page_range(unsigned long addr, | 69 | int ioremap_page_range(unsigned long addr, |
70 | unsigned long end, unsigned long phys_addr, pgprot_t prot) | 70 | unsigned long end, phys_addr_t phys_addr, pgprot_t prot) |
71 | { | 71 | { |
72 | pgd_t *pgd; | 72 | pgd_t *pgd; |
73 | unsigned long start; | 73 | unsigned long start; |
diff --git a/lib/list_debug.c b/lib/list_debug.c index 1a39f4e3ae1f..344c710d16ca 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c | |||
@@ -43,6 +43,12 @@ EXPORT_SYMBOL(__list_add); | |||
43 | */ | 43 | */ |
44 | void list_del(struct list_head *entry) | 44 | void list_del(struct list_head *entry) |
45 | { | 45 | { |
46 | WARN(entry->next == LIST_POISON1, | ||
47 | "list_del corruption, next is LIST_POISON1 (%p)\n", | ||
48 | LIST_POISON1); | ||
49 | WARN(entry->next != LIST_POISON1 && entry->prev == LIST_POISON2, | ||
50 | "list_del corruption, prev is LIST_POISON2 (%p)\n", | ||
51 | LIST_POISON2); | ||
46 | WARN(entry->prev->next != entry, | 52 | WARN(entry->prev->next != entry, |
47 | "list_del corruption. prev->next should be %p, " | 53 | "list_del corruption. prev->next should be %p, " |
48 | "but was %p\n", entry, entry->prev->next); | 54 | "but was %p\n", entry, entry->prev->next); |
diff --git a/lib/lmb.c b/lib/lmb.c deleted file mode 100644 index b1fc52606524..000000000000 --- a/lib/lmb.c +++ /dev/null | |||
@@ -1,541 +0,0 @@ | |||
1 | /* | ||
2 | * Procedures for maintaining information about logical memory blocks. | ||
3 | * | ||
4 | * Peter Bergner, IBM Corp. June 2001. | ||
5 | * Copyright (C) 2001 Peter Bergner. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/bitops.h> | ||
16 | #include <linux/lmb.h> | ||
17 | |||
18 | #define LMB_ALLOC_ANYWHERE 0 | ||
19 | |||
20 | struct lmb lmb; | ||
21 | |||
22 | static int lmb_debug; | ||
23 | |||
24 | static int __init early_lmb(char *p) | ||
25 | { | ||
26 | if (p && strstr(p, "debug")) | ||
27 | lmb_debug = 1; | ||
28 | return 0; | ||
29 | } | ||
30 | early_param("lmb", early_lmb); | ||
31 | |||
32 | static void lmb_dump(struct lmb_region *region, char *name) | ||
33 | { | ||
34 | unsigned long long base, size; | ||
35 | int i; | ||
36 | |||
37 | pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); | ||
38 | |||
39 | for (i = 0; i < region->cnt; i++) { | ||
40 | base = region->region[i].base; | ||
41 | size = region->region[i].size; | ||
42 | |||
43 | pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n", | ||
44 | name, i, base, base + size - 1, size); | ||
45 | } | ||
46 | } | ||
47 | |||
48 | void lmb_dump_all(void) | ||
49 | { | ||
50 | if (!lmb_debug) | ||
51 | return; | ||
52 | |||
53 | pr_info("LMB configuration:\n"); | ||
54 | pr_info(" rmo_size = 0x%llx\n", (unsigned long long)lmb.rmo_size); | ||
55 | pr_info(" memory.size = 0x%llx\n", (unsigned long long)lmb.memory.size); | ||
56 | |||
57 | lmb_dump(&lmb.memory, "memory"); | ||
58 | lmb_dump(&lmb.reserved, "reserved"); | ||
59 | } | ||
60 | |||
61 | static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2, | ||
62 | u64 size2) | ||
63 | { | ||
64 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); | ||
65 | } | ||
66 | |||
67 | static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2) | ||
68 | { | ||
69 | if (base2 == base1 + size1) | ||
70 | return 1; | ||
71 | else if (base1 == base2 + size2) | ||
72 | return -1; | ||
73 | |||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static long lmb_regions_adjacent(struct lmb_region *rgn, | ||
78 | unsigned long r1, unsigned long r2) | ||
79 | { | ||
80 | u64 base1 = rgn->region[r1].base; | ||
81 | u64 size1 = rgn->region[r1].size; | ||
82 | u64 base2 = rgn->region[r2].base; | ||
83 | u64 size2 = rgn->region[r2].size; | ||
84 | |||
85 | return lmb_addrs_adjacent(base1, size1, base2, size2); | ||
86 | } | ||
87 | |||
88 | static void lmb_remove_region(struct lmb_region *rgn, unsigned long r) | ||
89 | { | ||
90 | unsigned long i; | ||
91 | |||
92 | for (i = r; i < rgn->cnt - 1; i++) { | ||
93 | rgn->region[i].base = rgn->region[i + 1].base; | ||
94 | rgn->region[i].size = rgn->region[i + 1].size; | ||
95 | } | ||
96 | rgn->cnt--; | ||
97 | } | ||
98 | |||
99 | /* Assumption: base addr of region 1 < base addr of region 2 */ | ||
100 | static void lmb_coalesce_regions(struct lmb_region *rgn, | ||
101 | unsigned long r1, unsigned long r2) | ||
102 | { | ||
103 | rgn->region[r1].size += rgn->region[r2].size; | ||
104 | lmb_remove_region(rgn, r2); | ||
105 | } | ||
106 | |||
107 | void __init lmb_init(void) | ||
108 | { | ||
109 | /* Create a dummy zero size LMB which will get coalesced away later. | ||
110 | * This simplifies the lmb_add() code below... | ||
111 | */ | ||
112 | lmb.memory.region[0].base = 0; | ||
113 | lmb.memory.region[0].size = 0; | ||
114 | lmb.memory.cnt = 1; | ||
115 | |||
116 | /* Ditto. */ | ||
117 | lmb.reserved.region[0].base = 0; | ||
118 | lmb.reserved.region[0].size = 0; | ||
119 | lmb.reserved.cnt = 1; | ||
120 | } | ||
121 | |||
122 | void __init lmb_analyze(void) | ||
123 | { | ||
124 | int i; | ||
125 | |||
126 | lmb.memory.size = 0; | ||
127 | |||
128 | for (i = 0; i < lmb.memory.cnt; i++) | ||
129 | lmb.memory.size += lmb.memory.region[i].size; | ||
130 | } | ||
131 | |||
132 | static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) | ||
133 | { | ||
134 | unsigned long coalesced = 0; | ||
135 | long adjacent, i; | ||
136 | |||
137 | if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) { | ||
138 | rgn->region[0].base = base; | ||
139 | rgn->region[0].size = size; | ||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | /* First try and coalesce this LMB with another. */ | ||
144 | for (i = 0; i < rgn->cnt; i++) { | ||
145 | u64 rgnbase = rgn->region[i].base; | ||
146 | u64 rgnsize = rgn->region[i].size; | ||
147 | |||
148 | if ((rgnbase == base) && (rgnsize == size)) | ||
149 | /* Already have this region, so we're done */ | ||
150 | return 0; | ||
151 | |||
152 | adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize); | ||
153 | if (adjacent > 0) { | ||
154 | rgn->region[i].base -= size; | ||
155 | rgn->region[i].size += size; | ||
156 | coalesced++; | ||
157 | break; | ||
158 | } else if (adjacent < 0) { | ||
159 | rgn->region[i].size += size; | ||
160 | coalesced++; | ||
161 | break; | ||
162 | } | ||
163 | } | ||
164 | |||
165 | if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) { | ||
166 | lmb_coalesce_regions(rgn, i, i+1); | ||
167 | coalesced++; | ||
168 | } | ||
169 | |||
170 | if (coalesced) | ||
171 | return coalesced; | ||
172 | if (rgn->cnt >= MAX_LMB_REGIONS) | ||
173 | return -1; | ||
174 | |||
175 | /* Couldn't coalesce the LMB, so add it to the sorted table. */ | ||
176 | for (i = rgn->cnt - 1; i >= 0; i--) { | ||
177 | if (base < rgn->region[i].base) { | ||
178 | rgn->region[i+1].base = rgn->region[i].base; | ||
179 | rgn->region[i+1].size = rgn->region[i].size; | ||
180 | } else { | ||
181 | rgn->region[i+1].base = base; | ||
182 | rgn->region[i+1].size = size; | ||
183 | break; | ||
184 | } | ||
185 | } | ||
186 | |||
187 | if (base < rgn->region[0].base) { | ||
188 | rgn->region[0].base = base; | ||
189 | rgn->region[0].size = size; | ||
190 | } | ||
191 | rgn->cnt++; | ||
192 | |||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | long lmb_add(u64 base, u64 size) | ||
197 | { | ||
198 | struct lmb_region *_rgn = &lmb.memory; | ||
199 | |||
200 | /* On pSeries LPAR systems, the first LMB is our RMO region. */ | ||
201 | if (base == 0) | ||
202 | lmb.rmo_size = size; | ||
203 | |||
204 | return lmb_add_region(_rgn, base, size); | ||
205 | |||
206 | } | ||
207 | |||
208 | static long __lmb_remove(struct lmb_region *rgn, u64 base, u64 size) | ||
209 | { | ||
210 | u64 rgnbegin, rgnend; | ||
211 | u64 end = base + size; | ||
212 | int i; | ||
213 | |||
214 | rgnbegin = rgnend = 0; /* supress gcc warnings */ | ||
215 | |||
216 | /* Find the region where (base, size) belongs to */ | ||
217 | for (i=0; i < rgn->cnt; i++) { | ||
218 | rgnbegin = rgn->region[i].base; | ||
219 | rgnend = rgnbegin + rgn->region[i].size; | ||
220 | |||
221 | if ((rgnbegin <= base) && (end <= rgnend)) | ||
222 | break; | ||
223 | } | ||
224 | |||
225 | /* Didn't find the region */ | ||
226 | if (i == rgn->cnt) | ||
227 | return -1; | ||
228 | |||
229 | /* Check to see if we are removing entire region */ | ||
230 | if ((rgnbegin == base) && (rgnend == end)) { | ||
231 | lmb_remove_region(rgn, i); | ||
232 | return 0; | ||
233 | } | ||
234 | |||
235 | /* Check to see if region is matching at the front */ | ||
236 | if (rgnbegin == base) { | ||
237 | rgn->region[i].base = end; | ||
238 | rgn->region[i].size -= size; | ||
239 | return 0; | ||
240 | } | ||
241 | |||
242 | /* Check to see if the region is matching at the end */ | ||
243 | if (rgnend == end) { | ||
244 | rgn->region[i].size -= size; | ||
245 | return 0; | ||
246 | } | ||
247 | |||
248 | /* | ||
249 | * We need to split the entry - adjust the current one to the | ||
250 | * beginging of the hole and add the region after hole. | ||
251 | */ | ||
252 | rgn->region[i].size = base - rgn->region[i].base; | ||
253 | return lmb_add_region(rgn, end, rgnend - end); | ||
254 | } | ||
255 | |||
256 | long lmb_remove(u64 base, u64 size) | ||
257 | { | ||
258 | return __lmb_remove(&lmb.memory, base, size); | ||
259 | } | ||
260 | |||
261 | long __init lmb_free(u64 base, u64 size) | ||
262 | { | ||
263 | return __lmb_remove(&lmb.reserved, base, size); | ||
264 | } | ||
265 | |||
266 | long __init lmb_reserve(u64 base, u64 size) | ||
267 | { | ||
268 | struct lmb_region *_rgn = &lmb.reserved; | ||
269 | |||
270 | BUG_ON(0 == size); | ||
271 | |||
272 | return lmb_add_region(_rgn, base, size); | ||
273 | } | ||
274 | |||
275 | long lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size) | ||
276 | { | ||
277 | unsigned long i; | ||
278 | |||
279 | for (i = 0; i < rgn->cnt; i++) { | ||
280 | u64 rgnbase = rgn->region[i].base; | ||
281 | u64 rgnsize = rgn->region[i].size; | ||
282 | if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) | ||
283 | break; | ||
284 | } | ||
285 | |||
286 | return (i < rgn->cnt) ? i : -1; | ||
287 | } | ||
288 | |||
289 | static u64 lmb_align_down(u64 addr, u64 size) | ||
290 | { | ||
291 | return addr & ~(size - 1); | ||
292 | } | ||
293 | |||
294 | static u64 lmb_align_up(u64 addr, u64 size) | ||
295 | { | ||
296 | return (addr + (size - 1)) & ~(size - 1); | ||
297 | } | ||
298 | |||
299 | static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end, | ||
300 | u64 size, u64 align) | ||
301 | { | ||
302 | u64 base, res_base; | ||
303 | long j; | ||
304 | |||
305 | base = lmb_align_down((end - size), align); | ||
306 | while (start <= base) { | ||
307 | j = lmb_overlaps_region(&lmb.reserved, base, size); | ||
308 | if (j < 0) { | ||
309 | /* this area isn't reserved, take it */ | ||
310 | if (lmb_add_region(&lmb.reserved, base, size) < 0) | ||
311 | base = ~(u64)0; | ||
312 | return base; | ||
313 | } | ||
314 | res_base = lmb.reserved.region[j].base; | ||
315 | if (res_base < size) | ||
316 | break; | ||
317 | base = lmb_align_down(res_base - size, align); | ||
318 | } | ||
319 | |||
320 | return ~(u64)0; | ||
321 | } | ||
322 | |||
323 | static u64 __init lmb_alloc_nid_region(struct lmb_property *mp, | ||
324 | u64 (*nid_range)(u64, u64, int *), | ||
325 | u64 size, u64 align, int nid) | ||
326 | { | ||
327 | u64 start, end; | ||
328 | |||
329 | start = mp->base; | ||
330 | end = start + mp->size; | ||
331 | |||
332 | start = lmb_align_up(start, align); | ||
333 | while (start < end) { | ||
334 | u64 this_end; | ||
335 | int this_nid; | ||
336 | |||
337 | this_end = nid_range(start, end, &this_nid); | ||
338 | if (this_nid == nid) { | ||
339 | u64 ret = lmb_alloc_nid_unreserved(start, this_end, | ||
340 | size, align); | ||
341 | if (ret != ~(u64)0) | ||
342 | return ret; | ||
343 | } | ||
344 | start = this_end; | ||
345 | } | ||
346 | |||
347 | return ~(u64)0; | ||
348 | } | ||
349 | |||
350 | u64 __init lmb_alloc_nid(u64 size, u64 align, int nid, | ||
351 | u64 (*nid_range)(u64 start, u64 end, int *nid)) | ||
352 | { | ||
353 | struct lmb_region *mem = &lmb.memory; | ||
354 | int i; | ||
355 | |||
356 | BUG_ON(0 == size); | ||
357 | |||
358 | size = lmb_align_up(size, align); | ||
359 | |||
360 | for (i = 0; i < mem->cnt; i++) { | ||
361 | u64 ret = lmb_alloc_nid_region(&mem->region[i], | ||
362 | nid_range, | ||
363 | size, align, nid); | ||
364 | if (ret != ~(u64)0) | ||
365 | return ret; | ||
366 | } | ||
367 | |||
368 | return lmb_alloc(size, align); | ||
369 | } | ||
370 | |||
371 | u64 __init lmb_alloc(u64 size, u64 align) | ||
372 | { | ||
373 | return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); | ||
374 | } | ||
375 | |||
376 | u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr) | ||
377 | { | ||
378 | u64 alloc; | ||
379 | |||
380 | alloc = __lmb_alloc_base(size, align, max_addr); | ||
381 | |||
382 | if (alloc == 0) | ||
383 | panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", | ||
384 | (unsigned long long) size, (unsigned long long) max_addr); | ||
385 | |||
386 | return alloc; | ||
387 | } | ||
388 | |||
389 | u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) | ||
390 | { | ||
391 | long i, j; | ||
392 | u64 base = 0; | ||
393 | u64 res_base; | ||
394 | |||
395 | BUG_ON(0 == size); | ||
396 | |||
397 | size = lmb_align_up(size, align); | ||
398 | |||
399 | /* On some platforms, make sure we allocate lowmem */ | ||
400 | /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */ | ||
401 | if (max_addr == LMB_ALLOC_ANYWHERE) | ||
402 | max_addr = LMB_REAL_LIMIT; | ||
403 | |||
404 | for (i = lmb.memory.cnt - 1; i >= 0; i--) { | ||
405 | u64 lmbbase = lmb.memory.region[i].base; | ||
406 | u64 lmbsize = lmb.memory.region[i].size; | ||
407 | |||
408 | if (lmbsize < size) | ||
409 | continue; | ||
410 | if (max_addr == LMB_ALLOC_ANYWHERE) | ||
411 | base = lmb_align_down(lmbbase + lmbsize - size, align); | ||
412 | else if (lmbbase < max_addr) { | ||
413 | base = min(lmbbase + lmbsize, max_addr); | ||
414 | base = lmb_align_down(base - size, align); | ||
415 | } else | ||
416 | continue; | ||
417 | |||
418 | while (base && lmbbase <= base) { | ||
419 | j = lmb_overlaps_region(&lmb.reserved, base, size); | ||
420 | if (j < 0) { | ||
421 | /* this area isn't reserved, take it */ | ||
422 | if (lmb_add_region(&lmb.reserved, base, size) < 0) | ||
423 | return 0; | ||
424 | return base; | ||
425 | } | ||
426 | res_base = lmb.reserved.region[j].base; | ||
427 | if (res_base < size) | ||
428 | break; | ||
429 | base = lmb_align_down(res_base - size, align); | ||
430 | } | ||
431 | } | ||
432 | return 0; | ||
433 | } | ||
434 | |||
435 | /* You must call lmb_analyze() before this. */ | ||
436 | u64 __init lmb_phys_mem_size(void) | ||
437 | { | ||
438 | return lmb.memory.size; | ||
439 | } | ||
440 | |||
441 | u64 lmb_end_of_DRAM(void) | ||
442 | { | ||
443 | int idx = lmb.memory.cnt - 1; | ||
444 | |||
445 | return (lmb.memory.region[idx].base + lmb.memory.region[idx].size); | ||
446 | } | ||
447 | |||
448 | /* You must call lmb_analyze() after this. */ | ||
449 | void __init lmb_enforce_memory_limit(u64 memory_limit) | ||
450 | { | ||
451 | unsigned long i; | ||
452 | u64 limit; | ||
453 | struct lmb_property *p; | ||
454 | |||
455 | if (!memory_limit) | ||
456 | return; | ||
457 | |||
458 | /* Truncate the lmb regions to satisfy the memory limit. */ | ||
459 | limit = memory_limit; | ||
460 | for (i = 0; i < lmb.memory.cnt; i++) { | ||
461 | if (limit > lmb.memory.region[i].size) { | ||
462 | limit -= lmb.memory.region[i].size; | ||
463 | continue; | ||
464 | } | ||
465 | |||
466 | lmb.memory.region[i].size = limit; | ||
467 | lmb.memory.cnt = i + 1; | ||
468 | break; | ||
469 | } | ||
470 | |||
471 | if (lmb.memory.region[0].size < lmb.rmo_size) | ||
472 | lmb.rmo_size = lmb.memory.region[0].size; | ||
473 | |||
474 | memory_limit = lmb_end_of_DRAM(); | ||
475 | |||
476 | /* And truncate any reserves above the limit also. */ | ||
477 | for (i = 0; i < lmb.reserved.cnt; i++) { | ||
478 | p = &lmb.reserved.region[i]; | ||
479 | |||
480 | if (p->base > memory_limit) | ||
481 | p->size = 0; | ||
482 | else if ((p->base + p->size) > memory_limit) | ||
483 | p->size = memory_limit - p->base; | ||
484 | |||
485 | if (p->size == 0) { | ||
486 | lmb_remove_region(&lmb.reserved, i); | ||
487 | i--; | ||
488 | } | ||
489 | } | ||
490 | } | ||
491 | |||
492 | int __init lmb_is_reserved(u64 addr) | ||
493 | { | ||
494 | int i; | ||
495 | |||
496 | for (i = 0; i < lmb.reserved.cnt; i++) { | ||
497 | u64 upper = lmb.reserved.region[i].base + | ||
498 | lmb.reserved.region[i].size - 1; | ||
499 | if ((addr >= lmb.reserved.region[i].base) && (addr <= upper)) | ||
500 | return 1; | ||
501 | } | ||
502 | return 0; | ||
503 | } | ||
504 | |||
505 | int lmb_is_region_reserved(u64 base, u64 size) | ||
506 | { | ||
507 | return lmb_overlaps_region(&lmb.reserved, base, size); | ||
508 | } | ||
509 | |||
510 | /* | ||
511 | * Given a <base, len>, find which memory regions belong to this range. | ||
512 | * Adjust the request and return a contiguous chunk. | ||
513 | */ | ||
514 | int lmb_find(struct lmb_property *res) | ||
515 | { | ||
516 | int i; | ||
517 | u64 rstart, rend; | ||
518 | |||
519 | rstart = res->base; | ||
520 | rend = rstart + res->size - 1; | ||
521 | |||
522 | for (i = 0; i < lmb.memory.cnt; i++) { | ||
523 | u64 start = lmb.memory.region[i].base; | ||
524 | u64 end = start + lmb.memory.region[i].size - 1; | ||
525 | |||
526 | if (start > rend) | ||
527 | return -1; | ||
528 | |||
529 | if ((end >= rstart) && (start < rend)) { | ||
530 | /* adjust the request */ | ||
531 | if (rstart < start) | ||
532 | rstart = start; | ||
533 | if (rend > end) | ||
534 | rend = end; | ||
535 | res->base = rstart; | ||
536 | res->size = rend - rstart + 1; | ||
537 | return 0; | ||
538 | } | ||
539 | } | ||
540 | return -1; | ||
541 | } | ||
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index aeaa6d734447..ec9048e74f44 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
@@ -137,6 +137,33 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, | |||
137 | return NOTIFY_OK; | 137 | return NOTIFY_OK; |
138 | } | 138 | } |
139 | 139 | ||
140 | /* | ||
141 | * Compare counter against given value. | ||
142 | * Return 1 if greater, 0 if equal and -1 if less | ||
143 | */ | ||
144 | int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) | ||
145 | { | ||
146 | s64 count; | ||
147 | |||
148 | count = percpu_counter_read(fbc); | ||
149 | /* Check to see if rough count will be sufficient for comparison */ | ||
150 | if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) { | ||
151 | if (count > rhs) | ||
152 | return 1; | ||
153 | else | ||
154 | return -1; | ||
155 | } | ||
156 | /* Need to use precise count */ | ||
157 | count = percpu_counter_sum(fbc); | ||
158 | if (count > rhs) | ||
159 | return 1; | ||
160 | else if (count < rhs) | ||
161 | return -1; | ||
162 | else | ||
163 | return 0; | ||
164 | } | ||
165 | EXPORT_SYMBOL(percpu_counter_compare); | ||
166 | |||
140 | static int __init percpu_counter_startup(void) | 167 | static int __init percpu_counter_startup(void) |
141 | { | 168 | { |
142 | compute_batch_value(); | 169 | compute_batch_value(); |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 05da38bcc298..e907858498a6 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -609,6 +609,100 @@ int radix_tree_tag_get(struct radix_tree_root *root, | |||
609 | EXPORT_SYMBOL(radix_tree_tag_get); | 609 | EXPORT_SYMBOL(radix_tree_tag_get); |
610 | 610 | ||
611 | /** | 611 | /** |
612 | * radix_tree_range_tag_if_tagged - for each item in given range set given | ||
613 | * tag if item has another tag set | ||
614 | * @root: radix tree root | ||
615 | * @first_indexp: pointer to a starting index of a range to scan | ||
616 | * @last_index: last index of a range to scan | ||
617 | * @nr_to_tag: maximum number items to tag | ||
618 | * @iftag: tag index to test | ||
619 | * @settag: tag index to set if tested tag is set | ||
620 | * | ||
621 | * This function scans range of radix tree from first_index to last_index | ||
622 | * (inclusive). For each item in the range if iftag is set, the function sets | ||
623 | * also settag. The function stops either after tagging nr_to_tag items or | ||
624 | * after reaching last_index. | ||
625 | * | ||
626 | * The function returns number of leaves where the tag was set and sets | ||
627 | * *first_indexp to the first unscanned index. | ||
628 | */ | ||
629 | unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | ||
630 | unsigned long *first_indexp, unsigned long last_index, | ||
631 | unsigned long nr_to_tag, | ||
632 | unsigned int iftag, unsigned int settag) | ||
633 | { | ||
634 | unsigned int height = root->height, shift; | ||
635 | unsigned long tagged = 0, index = *first_indexp; | ||
636 | struct radix_tree_node *open_slots[height], *slot; | ||
637 | |||
638 | last_index = min(last_index, radix_tree_maxindex(height)); | ||
639 | if (index > last_index) | ||
640 | return 0; | ||
641 | if (!nr_to_tag) | ||
642 | return 0; | ||
643 | if (!root_tag_get(root, iftag)) { | ||
644 | *first_indexp = last_index + 1; | ||
645 | return 0; | ||
646 | } | ||
647 | if (height == 0) { | ||
648 | *first_indexp = last_index + 1; | ||
649 | root_tag_set(root, settag); | ||
650 | return 1; | ||
651 | } | ||
652 | |||
653 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; | ||
654 | slot = radix_tree_indirect_to_ptr(root->rnode); | ||
655 | |||
656 | for (;;) { | ||
657 | int offset; | ||
658 | |||
659 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | ||
660 | if (!slot->slots[offset]) | ||
661 | goto next; | ||
662 | if (!tag_get(slot, iftag, offset)) | ||
663 | goto next; | ||
664 | tag_set(slot, settag, offset); | ||
665 | if (height == 1) { | ||
666 | tagged++; | ||
667 | goto next; | ||
668 | } | ||
669 | /* Go down one level */ | ||
670 | height--; | ||
671 | shift -= RADIX_TREE_MAP_SHIFT; | ||
672 | open_slots[height] = slot; | ||
673 | slot = slot->slots[offset]; | ||
674 | continue; | ||
675 | next: | ||
676 | /* Go to next item at level determined by 'shift' */ | ||
677 | index = ((index >> shift) + 1) << shift; | ||
678 | if (index > last_index) | ||
679 | break; | ||
680 | if (tagged >= nr_to_tag) | ||
681 | break; | ||
682 | while (((index >> shift) & RADIX_TREE_MAP_MASK) == 0) { | ||
683 | /* | ||
684 | * We've fully scanned this node. Go up. Because | ||
685 | * last_index is guaranteed to be in the tree, what | ||
686 | * we do below cannot wander astray. | ||
687 | */ | ||
688 | slot = open_slots[height]; | ||
689 | height++; | ||
690 | shift += RADIX_TREE_MAP_SHIFT; | ||
691 | } | ||
692 | } | ||
693 | /* | ||
694 | * The iftag must have been set somewhere because otherwise | ||
695 | * we would return immediated at the beginning of the function | ||
696 | */ | ||
697 | root_tag_set(root, settag); | ||
698 | *first_indexp = index; | ||
699 | |||
700 | return tagged; | ||
701 | } | ||
702 | EXPORT_SYMBOL(radix_tree_range_tag_if_tagged); | ||
703 | |||
704 | |||
705 | /** | ||
612 | * radix_tree_next_hole - find the next hole (not-present entry) | 706 | * radix_tree_next_hole - find the next hole (not-present entry) |
613 | * @root: tree root | 707 | * @root: tree root |
614 | * @index: index key | 708 | * @index: index key |
diff --git a/lib/random32.c b/lib/random32.c index 870dc3fc0f0f..fc3545a32771 100644 --- a/lib/random32.c +++ b/lib/random32.c | |||
@@ -127,7 +127,7 @@ core_initcall(random32_init); | |||
127 | 127 | ||
128 | /* | 128 | /* |
129 | * Generate better values after random number generator | 129 | * Generate better values after random number generator |
130 | * is fully initalized. | 130 | * is fully initialized. |
131 | */ | 131 | */ |
132 | static int __init random32_reseed(void) | 132 | static int __init random32_reseed(void) |
133 | { | 133 | { |
diff --git a/lib/rbtree.c b/lib/rbtree.c index 15e10b1afdd2..4693f79195d3 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c | |||
@@ -44,11 +44,6 @@ static void __rb_rotate_left(struct rb_node *node, struct rb_root *root) | |||
44 | else | 44 | else |
45 | root->rb_node = right; | 45 | root->rb_node = right; |
46 | rb_set_parent(node, right); | 46 | rb_set_parent(node, right); |
47 | |||
48 | if (root->augment_cb) { | ||
49 | root->augment_cb(node); | ||
50 | root->augment_cb(right); | ||
51 | } | ||
52 | } | 47 | } |
53 | 48 | ||
54 | static void __rb_rotate_right(struct rb_node *node, struct rb_root *root) | 49 | static void __rb_rotate_right(struct rb_node *node, struct rb_root *root) |
@@ -72,20 +67,12 @@ static void __rb_rotate_right(struct rb_node *node, struct rb_root *root) | |||
72 | else | 67 | else |
73 | root->rb_node = left; | 68 | root->rb_node = left; |
74 | rb_set_parent(node, left); | 69 | rb_set_parent(node, left); |
75 | |||
76 | if (root->augment_cb) { | ||
77 | root->augment_cb(node); | ||
78 | root->augment_cb(left); | ||
79 | } | ||
80 | } | 70 | } |
81 | 71 | ||
82 | void rb_insert_color(struct rb_node *node, struct rb_root *root) | 72 | void rb_insert_color(struct rb_node *node, struct rb_root *root) |
83 | { | 73 | { |
84 | struct rb_node *parent, *gparent; | 74 | struct rb_node *parent, *gparent; |
85 | 75 | ||
86 | if (root->augment_cb) | ||
87 | root->augment_cb(node); | ||
88 | |||
89 | while ((parent = rb_parent(node)) && rb_is_red(parent)) | 76 | while ((parent = rb_parent(node)) && rb_is_red(parent)) |
90 | { | 77 | { |
91 | gparent = rb_parent(parent); | 78 | gparent = rb_parent(parent); |
@@ -240,15 +227,12 @@ void rb_erase(struct rb_node *node, struct rb_root *root) | |||
240 | else | 227 | else |
241 | { | 228 | { |
242 | struct rb_node *old = node, *left; | 229 | struct rb_node *old = node, *left; |
243 | int old_parent_cb = 0; | ||
244 | int successor_parent_cb = 0; | ||
245 | 230 | ||
246 | node = node->rb_right; | 231 | node = node->rb_right; |
247 | while ((left = node->rb_left) != NULL) | 232 | while ((left = node->rb_left) != NULL) |
248 | node = left; | 233 | node = left; |
249 | 234 | ||
250 | if (rb_parent(old)) { | 235 | if (rb_parent(old)) { |
251 | old_parent_cb = 1; | ||
252 | if (rb_parent(old)->rb_left == old) | 236 | if (rb_parent(old)->rb_left == old) |
253 | rb_parent(old)->rb_left = node; | 237 | rb_parent(old)->rb_left = node; |
254 | else | 238 | else |
@@ -263,10 +247,8 @@ void rb_erase(struct rb_node *node, struct rb_root *root) | |||
263 | if (parent == old) { | 247 | if (parent == old) { |
264 | parent = node; | 248 | parent = node; |
265 | } else { | 249 | } else { |
266 | successor_parent_cb = 1; | ||
267 | if (child) | 250 | if (child) |
268 | rb_set_parent(child, parent); | 251 | rb_set_parent(child, parent); |
269 | |||
270 | parent->rb_left = child; | 252 | parent->rb_left = child; |
271 | 253 | ||
272 | node->rb_right = old->rb_right; | 254 | node->rb_right = old->rb_right; |
@@ -277,24 +259,6 @@ void rb_erase(struct rb_node *node, struct rb_root *root) | |||
277 | node->rb_left = old->rb_left; | 259 | node->rb_left = old->rb_left; |
278 | rb_set_parent(old->rb_left, node); | 260 | rb_set_parent(old->rb_left, node); |
279 | 261 | ||
280 | if (root->augment_cb) { | ||
281 | /* | ||
282 | * Here, three different nodes can have new children. | ||
283 | * The parent of the successor node that was selected | ||
284 | * to replace the node to be erased. | ||
285 | * The node that is getting erased and is now replaced | ||
286 | * by its successor. | ||
287 | * The parent of the node getting erased-replaced. | ||
288 | */ | ||
289 | if (successor_parent_cb) | ||
290 | root->augment_cb(parent); | ||
291 | |||
292 | root->augment_cb(node); | ||
293 | |||
294 | if (old_parent_cb) | ||
295 | root->augment_cb(rb_parent(old)); | ||
296 | } | ||
297 | |||
298 | goto color; | 262 | goto color; |
299 | } | 263 | } |
300 | 264 | ||
@@ -303,19 +267,15 @@ void rb_erase(struct rb_node *node, struct rb_root *root) | |||
303 | 267 | ||
304 | if (child) | 268 | if (child) |
305 | rb_set_parent(child, parent); | 269 | rb_set_parent(child, parent); |
306 | 270 | if (parent) | |
307 | if (parent) { | 271 | { |
308 | if (parent->rb_left == node) | 272 | if (parent->rb_left == node) |
309 | parent->rb_left = child; | 273 | parent->rb_left = child; |
310 | else | 274 | else |
311 | parent->rb_right = child; | 275 | parent->rb_right = child; |
312 | |||
313 | if (root->augment_cb) | ||
314 | root->augment_cb(parent); | ||
315 | |||
316 | } else { | ||
317 | root->rb_node = child; | ||
318 | } | 276 | } |
277 | else | ||
278 | root->rb_node = child; | ||
319 | 279 | ||
320 | color: | 280 | color: |
321 | if (color == RB_BLACK) | 281 | if (color == RB_BLACK) |
@@ -323,6 +283,74 @@ void rb_erase(struct rb_node *node, struct rb_root *root) | |||
323 | } | 283 | } |
324 | EXPORT_SYMBOL(rb_erase); | 284 | EXPORT_SYMBOL(rb_erase); |
325 | 285 | ||
286 | static void rb_augment_path(struct rb_node *node, rb_augment_f func, void *data) | ||
287 | { | ||
288 | struct rb_node *parent; | ||
289 | |||
290 | up: | ||
291 | func(node, data); | ||
292 | parent = rb_parent(node); | ||
293 | if (!parent) | ||
294 | return; | ||
295 | |||
296 | if (node == parent->rb_left && parent->rb_right) | ||
297 | func(parent->rb_right, data); | ||
298 | else if (parent->rb_left) | ||
299 | func(parent->rb_left, data); | ||
300 | |||
301 | node = parent; | ||
302 | goto up; | ||
303 | } | ||
304 | |||
305 | /* | ||
306 | * after inserting @node into the tree, update the tree to account for | ||
307 | * both the new entry and any damage done by rebalance | ||
308 | */ | ||
309 | void rb_augment_insert(struct rb_node *node, rb_augment_f func, void *data) | ||
310 | { | ||
311 | if (node->rb_left) | ||
312 | node = node->rb_left; | ||
313 | else if (node->rb_right) | ||
314 | node = node->rb_right; | ||
315 | |||
316 | rb_augment_path(node, func, data); | ||
317 | } | ||
318 | |||
319 | /* | ||
320 | * before removing the node, find the deepest node on the rebalance path | ||
321 | * that will still be there after @node gets removed | ||
322 | */ | ||
323 | struct rb_node *rb_augment_erase_begin(struct rb_node *node) | ||
324 | { | ||
325 | struct rb_node *deepest; | ||
326 | |||
327 | if (!node->rb_right && !node->rb_left) | ||
328 | deepest = rb_parent(node); | ||
329 | else if (!node->rb_right) | ||
330 | deepest = node->rb_left; | ||
331 | else if (!node->rb_left) | ||
332 | deepest = node->rb_right; | ||
333 | else { | ||
334 | deepest = rb_next(node); | ||
335 | if (deepest->rb_right) | ||
336 | deepest = deepest->rb_right; | ||
337 | else if (rb_parent(deepest) != node) | ||
338 | deepest = rb_parent(deepest); | ||
339 | } | ||
340 | |||
341 | return deepest; | ||
342 | } | ||
343 | |||
344 | /* | ||
345 | * after removal, update the tree to account for the removed entry | ||
346 | * and any rebalance damage. | ||
347 | */ | ||
348 | void rb_augment_erase_end(struct rb_node *node, rb_augment_f func, void *data) | ||
349 | { | ||
350 | if (node) | ||
351 | rb_augment_path(node, func, data); | ||
352 | } | ||
353 | |||
326 | /* | 354 | /* |
327 | * This function returns the first node (in sort order) of the tree. | 355 | * This function returns the first node (in sort order) of the tree. |
328 | */ | 356 | */ |
diff --git a/lib/rwsem.c b/lib/rwsem.c index ceba8e28807a..f236d7cd5cf3 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c | |||
@@ -36,45 +36,56 @@ struct rwsem_waiter { | |||
36 | #define RWSEM_WAITING_FOR_WRITE 0x00000002 | 36 | #define RWSEM_WAITING_FOR_WRITE 0x00000002 |
37 | }; | 37 | }; |
38 | 38 | ||
39 | /* Wake types for __rwsem_do_wake(). Note that RWSEM_WAKE_NO_ACTIVE and | ||
40 | * RWSEM_WAKE_READ_OWNED imply that the spinlock must have been kept held | ||
41 | * since the rwsem value was observed. | ||
42 | */ | ||
43 | #define RWSEM_WAKE_ANY 0 /* Wake whatever's at head of wait list */ | ||
44 | #define RWSEM_WAKE_NO_ACTIVE 1 /* rwsem was observed with no active thread */ | ||
45 | #define RWSEM_WAKE_READ_OWNED 2 /* rwsem was observed to be read owned */ | ||
46 | |||
39 | /* | 47 | /* |
40 | * handle the lock release when processes blocked on it that can now run | 48 | * handle the lock release when processes blocked on it that can now run |
41 | * - if we come here from up_xxxx(), then: | 49 | * - if we come here from up_xxxx(), then: |
42 | * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) | 50 | * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) |
43 | * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) | 51 | * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) |
44 | * - there must be someone on the queue | 52 | * - there must be someone on the queue |
45 | * - the spinlock must be held by the caller | 53 | * - the spinlock must be held by the caller |
46 | * - woken process blocks are discarded from the list after having task zeroed | 54 | * - woken process blocks are discarded from the list after having task zeroed |
47 | * - writers are only woken if downgrading is false | 55 | * - writers are only woken if downgrading is false |
48 | */ | 56 | */ |
49 | static inline struct rw_semaphore * | 57 | static struct rw_semaphore * |
50 | __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) | 58 | __rwsem_do_wake(struct rw_semaphore *sem, int wake_type) |
51 | { | 59 | { |
52 | struct rwsem_waiter *waiter; | 60 | struct rwsem_waiter *waiter; |
53 | struct task_struct *tsk; | 61 | struct task_struct *tsk; |
54 | struct list_head *next; | 62 | struct list_head *next; |
55 | signed long oldcount, woken, loop; | 63 | signed long oldcount, woken, loop, adjustment; |
56 | |||
57 | if (downgrading) | ||
58 | goto dont_wake_writers; | ||
59 | |||
60 | /* if we came through an up_xxxx() call, we only only wake someone up | ||
61 | * if we can transition the active part of the count from 0 -> 1 | ||
62 | */ | ||
63 | try_again: | ||
64 | oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem) | ||
65 | - RWSEM_ACTIVE_BIAS; | ||
66 | if (oldcount & RWSEM_ACTIVE_MASK) | ||
67 | goto undo; | ||
68 | 64 | ||
69 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | 65 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); |
70 | |||
71 | /* try to grant a single write lock if there's a writer at the front | ||
72 | * of the queue - note we leave the 'active part' of the count | ||
73 | * incremented by 1 and the waiting part incremented by 0x00010000 | ||
74 | */ | ||
75 | if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE)) | 66 | if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE)) |
76 | goto readers_only; | 67 | goto readers_only; |
77 | 68 | ||
69 | if (wake_type == RWSEM_WAKE_READ_OWNED) | ||
70 | /* Another active reader was observed, so wakeup is not | ||
71 | * likely to succeed. Save the atomic op. | ||
72 | */ | ||
73 | goto out; | ||
74 | |||
75 | /* There's a writer at the front of the queue - try to grant it the | ||
76 | * write lock. However, we only wake this writer if we can transition | ||
77 | * the active part of the count from 0 -> 1 | ||
78 | */ | ||
79 | adjustment = RWSEM_ACTIVE_WRITE_BIAS; | ||
80 | if (waiter->list.next == &sem->wait_list) | ||
81 | adjustment -= RWSEM_WAITING_BIAS; | ||
82 | |||
83 | try_again_write: | ||
84 | oldcount = rwsem_atomic_update(adjustment, sem) - adjustment; | ||
85 | if (oldcount & RWSEM_ACTIVE_MASK) | ||
86 | /* Someone grabbed the sem already */ | ||
87 | goto undo_write; | ||
88 | |||
78 | /* We must be careful not to touch 'waiter' after we set ->task = NULL. | 89 | /* We must be careful not to touch 'waiter' after we set ->task = NULL. |
79 | * It is an allocated on the waiter's stack and may become invalid at | 90 | * It is an allocated on the waiter's stack and may become invalid at |
80 | * any time after that point (due to a wakeup from another source). | 91 | * any time after that point (due to a wakeup from another source). |
@@ -87,18 +98,30 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) | |||
87 | put_task_struct(tsk); | 98 | put_task_struct(tsk); |
88 | goto out; | 99 | goto out; |
89 | 100 | ||
90 | /* don't want to wake any writers */ | 101 | readers_only: |
91 | dont_wake_writers: | 102 | /* If we come here from up_xxxx(), another thread might have reached |
92 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | 103 | * rwsem_down_failed_common() before we acquired the spinlock and |
93 | if (waiter->flags & RWSEM_WAITING_FOR_WRITE) | 104 | * woken up a waiter, making it now active. We prefer to check for |
105 | * this first in order to not spend too much time with the spinlock | ||
106 | * held if we're not going to be able to wake up readers in the end. | ||
107 | * | ||
108 | * Note that we do not need to update the rwsem count: any writer | ||
109 | * trying to acquire rwsem will run rwsem_down_write_failed() due | ||
110 | * to the waiting threads and block trying to acquire the spinlock. | ||
111 | * | ||
112 | * We use a dummy atomic update in order to acquire the cache line | ||
113 | * exclusively since we expect to succeed and run the final rwsem | ||
114 | * count adjustment pretty soon. | ||
115 | */ | ||
116 | if (wake_type == RWSEM_WAKE_ANY && | ||
117 | rwsem_atomic_update(0, sem) < RWSEM_WAITING_BIAS) | ||
118 | /* Someone grabbed the sem for write already */ | ||
94 | goto out; | 119 | goto out; |
95 | 120 | ||
96 | /* grant an infinite number of read locks to the readers at the front | 121 | /* Grant an infinite number of read locks to the readers at the front |
97 | * of the queue | 122 | * of the queue. Note we increment the 'active part' of the count by |
98 | * - note we increment the 'active part' of the count by the number of | 123 | * the number of readers before waking any processes up. |
99 | * readers before waking any processes up | ||
100 | */ | 124 | */ |
101 | readers_only: | ||
102 | woken = 0; | 125 | woken = 0; |
103 | do { | 126 | do { |
104 | woken++; | 127 | woken++; |
@@ -111,16 +134,15 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) | |||
111 | 134 | ||
112 | } while (waiter->flags & RWSEM_WAITING_FOR_READ); | 135 | } while (waiter->flags & RWSEM_WAITING_FOR_READ); |
113 | 136 | ||
114 | loop = woken; | 137 | adjustment = woken * RWSEM_ACTIVE_READ_BIAS; |
115 | woken *= RWSEM_ACTIVE_BIAS - RWSEM_WAITING_BIAS; | 138 | if (waiter->flags & RWSEM_WAITING_FOR_READ) |
116 | if (!downgrading) | 139 | /* hit end of list above */ |
117 | /* we'd already done one increment earlier */ | 140 | adjustment -= RWSEM_WAITING_BIAS; |
118 | woken -= RWSEM_ACTIVE_BIAS; | ||
119 | 141 | ||
120 | rwsem_atomic_add(woken, sem); | 142 | rwsem_atomic_add(adjustment, sem); |
121 | 143 | ||
122 | next = sem->wait_list.next; | 144 | next = sem->wait_list.next; |
123 | for (; loop > 0; loop--) { | 145 | for (loop = woken; loop > 0; loop--) { |
124 | waiter = list_entry(next, struct rwsem_waiter, list); | 146 | waiter = list_entry(next, struct rwsem_waiter, list); |
125 | next = waiter->list.next; | 147 | next = waiter->list.next; |
126 | tsk = waiter->task; | 148 | tsk = waiter->task; |
@@ -138,10 +160,10 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) | |||
138 | 160 | ||
139 | /* undo the change to the active count, but check for a transition | 161 | /* undo the change to the active count, but check for a transition |
140 | * 1->0 */ | 162 | * 1->0 */ |
141 | undo: | 163 | undo_write: |
142 | if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK) | 164 | if (rwsem_atomic_update(-adjustment, sem) & RWSEM_ACTIVE_MASK) |
143 | goto out; | 165 | goto out; |
144 | goto try_again; | 166 | goto try_again_write; |
145 | } | 167 | } |
146 | 168 | ||
147 | /* | 169 | /* |
@@ -149,8 +171,9 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) | |||
149 | */ | 171 | */ |
150 | static struct rw_semaphore __sched * | 172 | static struct rw_semaphore __sched * |
151 | rwsem_down_failed_common(struct rw_semaphore *sem, | 173 | rwsem_down_failed_common(struct rw_semaphore *sem, |
152 | struct rwsem_waiter *waiter, signed long adjustment) | 174 | unsigned int flags, signed long adjustment) |
153 | { | 175 | { |
176 | struct rwsem_waiter waiter; | ||
154 | struct task_struct *tsk = current; | 177 | struct task_struct *tsk = current; |
155 | signed long count; | 178 | signed long count; |
156 | 179 | ||
@@ -158,23 +181,34 @@ rwsem_down_failed_common(struct rw_semaphore *sem, | |||
158 | 181 | ||
159 | /* set up my own style of waitqueue */ | 182 | /* set up my own style of waitqueue */ |
160 | spin_lock_irq(&sem->wait_lock); | 183 | spin_lock_irq(&sem->wait_lock); |
161 | waiter->task = tsk; | 184 | waiter.task = tsk; |
185 | waiter.flags = flags; | ||
162 | get_task_struct(tsk); | 186 | get_task_struct(tsk); |
163 | 187 | ||
164 | list_add_tail(&waiter->list, &sem->wait_list); | 188 | if (list_empty(&sem->wait_list)) |
189 | adjustment += RWSEM_WAITING_BIAS; | ||
190 | list_add_tail(&waiter.list, &sem->wait_list); | ||
165 | 191 | ||
166 | /* we're now waiting on the lock, but no longer actively read-locking */ | 192 | /* we're now waiting on the lock, but no longer actively locking */ |
167 | count = rwsem_atomic_update(adjustment, sem); | 193 | count = rwsem_atomic_update(adjustment, sem); |
168 | 194 | ||
169 | /* if there are no active locks, wake the front queued process(es) up */ | 195 | /* If there are no active locks, wake the front queued process(es) up. |
170 | if (!(count & RWSEM_ACTIVE_MASK)) | 196 | * |
171 | sem = __rwsem_do_wake(sem, 0); | 197 | * Alternatively, if we're called from a failed down_write(), there |
198 | * were already threads queued before us and there are no active | ||
199 | * writers, the lock must be read owned; so we try to wake any read | ||
200 | * locks that were queued ahead of us. */ | ||
201 | if (count == RWSEM_WAITING_BIAS) | ||
202 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_NO_ACTIVE); | ||
203 | else if (count > RWSEM_WAITING_BIAS && | ||
204 | adjustment == -RWSEM_ACTIVE_WRITE_BIAS) | ||
205 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); | ||
172 | 206 | ||
173 | spin_unlock_irq(&sem->wait_lock); | 207 | spin_unlock_irq(&sem->wait_lock); |
174 | 208 | ||
175 | /* wait to be given the lock */ | 209 | /* wait to be given the lock */ |
176 | for (;;) { | 210 | for (;;) { |
177 | if (!waiter->task) | 211 | if (!waiter.task) |
178 | break; | 212 | break; |
179 | schedule(); | 213 | schedule(); |
180 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 214 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
@@ -191,12 +225,8 @@ rwsem_down_failed_common(struct rw_semaphore *sem, | |||
191 | asmregparm struct rw_semaphore __sched * | 225 | asmregparm struct rw_semaphore __sched * |
192 | rwsem_down_read_failed(struct rw_semaphore *sem) | 226 | rwsem_down_read_failed(struct rw_semaphore *sem) |
193 | { | 227 | { |
194 | struct rwsem_waiter waiter; | 228 | return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_READ, |
195 | 229 | -RWSEM_ACTIVE_READ_BIAS); | |
196 | waiter.flags = RWSEM_WAITING_FOR_READ; | ||
197 | rwsem_down_failed_common(sem, &waiter, | ||
198 | RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS); | ||
199 | return sem; | ||
200 | } | 230 | } |
201 | 231 | ||
202 | /* | 232 | /* |
@@ -205,12 +235,8 @@ rwsem_down_read_failed(struct rw_semaphore *sem) | |||
205 | asmregparm struct rw_semaphore __sched * | 235 | asmregparm struct rw_semaphore __sched * |
206 | rwsem_down_write_failed(struct rw_semaphore *sem) | 236 | rwsem_down_write_failed(struct rw_semaphore *sem) |
207 | { | 237 | { |
208 | struct rwsem_waiter waiter; | 238 | return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_WRITE, |
209 | 239 | -RWSEM_ACTIVE_WRITE_BIAS); | |
210 | waiter.flags = RWSEM_WAITING_FOR_WRITE; | ||
211 | rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS); | ||
212 | |||
213 | return sem; | ||
214 | } | 240 | } |
215 | 241 | ||
216 | /* | 242 | /* |
@@ -225,7 +251,7 @@ asmregparm struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) | |||
225 | 251 | ||
226 | /* do nothing if list empty */ | 252 | /* do nothing if list empty */ |
227 | if (!list_empty(&sem->wait_list)) | 253 | if (!list_empty(&sem->wait_list)) |
228 | sem = __rwsem_do_wake(sem, 0); | 254 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); |
229 | 255 | ||
230 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 256 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
231 | 257 | ||
@@ -245,7 +271,7 @@ asmregparm struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) | |||
245 | 271 | ||
246 | /* do nothing if list empty */ | 272 | /* do nothing if list empty */ |
247 | if (!list_empty(&sem->wait_list)) | 273 | if (!list_empty(&sem->wait_list)) |
248 | sem = __rwsem_do_wake(sem, 1); | 274 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); |
249 | 275 | ||
250 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 276 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
251 | 277 | ||
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index a009055140ec..34e3082632d8 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -50,19 +50,11 @@ | |||
50 | */ | 50 | */ |
51 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) | 51 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) |
52 | 52 | ||
53 | /* | ||
54 | * Enumeration for sync targets | ||
55 | */ | ||
56 | enum dma_sync_target { | ||
57 | SYNC_FOR_CPU = 0, | ||
58 | SYNC_FOR_DEVICE = 1, | ||
59 | }; | ||
60 | |||
61 | int swiotlb_force; | 53 | int swiotlb_force; |
62 | 54 | ||
63 | /* | 55 | /* |
64 | * Used to do a quick range check in unmap_single and | 56 | * Used to do a quick range check in swiotlb_tbl_unmap_single and |
65 | * sync_single_*, to see if the memory was in fact allocated by this | 57 | * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this |
66 | * API. | 58 | * API. |
67 | */ | 59 | */ |
68 | static char *io_tlb_start, *io_tlb_end; | 60 | static char *io_tlb_start, *io_tlb_end; |
@@ -140,28 +132,14 @@ void swiotlb_print_info(void) | |||
140 | (unsigned long long)pend); | 132 | (unsigned long long)pend); |
141 | } | 133 | } |
142 | 134 | ||
143 | /* | 135 | void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) |
144 | * Statically reserve bounce buffer space and initialize bounce buffer data | ||
145 | * structures for the software IO TLB used to implement the DMA API. | ||
146 | */ | ||
147 | void __init | ||
148 | swiotlb_init_with_default_size(size_t default_size, int verbose) | ||
149 | { | 136 | { |
150 | unsigned long i, bytes; | 137 | unsigned long i, bytes; |
151 | 138 | ||
152 | if (!io_tlb_nslabs) { | 139 | bytes = nslabs << IO_TLB_SHIFT; |
153 | io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); | ||
154 | io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); | ||
155 | } | ||
156 | |||
157 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; | ||
158 | 140 | ||
159 | /* | 141 | io_tlb_nslabs = nslabs; |
160 | * Get IO TLB memory from the low pages | 142 | io_tlb_start = tlb; |
161 | */ | ||
162 | io_tlb_start = alloc_bootmem_low_pages(bytes); | ||
163 | if (!io_tlb_start) | ||
164 | panic("Cannot allocate SWIOTLB buffer"); | ||
165 | io_tlb_end = io_tlb_start + bytes; | 143 | io_tlb_end = io_tlb_start + bytes; |
166 | 144 | ||
167 | /* | 145 | /* |
@@ -185,6 +163,32 @@ swiotlb_init_with_default_size(size_t default_size, int verbose) | |||
185 | swiotlb_print_info(); | 163 | swiotlb_print_info(); |
186 | } | 164 | } |
187 | 165 | ||
166 | /* | ||
167 | * Statically reserve bounce buffer space and initialize bounce buffer data | ||
168 | * structures for the software IO TLB used to implement the DMA API. | ||
169 | */ | ||
170 | void __init | ||
171 | swiotlb_init_with_default_size(size_t default_size, int verbose) | ||
172 | { | ||
173 | unsigned long bytes; | ||
174 | |||
175 | if (!io_tlb_nslabs) { | ||
176 | io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); | ||
177 | io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); | ||
178 | } | ||
179 | |||
180 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; | ||
181 | |||
182 | /* | ||
183 | * Get IO TLB memory from the low pages | ||
184 | */ | ||
185 | io_tlb_start = alloc_bootmem_low_pages(bytes); | ||
186 | if (!io_tlb_start) | ||
187 | panic("Cannot allocate SWIOTLB buffer"); | ||
188 | |||
189 | swiotlb_init_with_tbl(io_tlb_start, io_tlb_nslabs, verbose); | ||
190 | } | ||
191 | |||
188 | void __init | 192 | void __init |
189 | swiotlb_init(int verbose) | 193 | swiotlb_init(int verbose) |
190 | { | 194 | { |
@@ -323,8 +327,8 @@ static int is_swiotlb_buffer(phys_addr_t paddr) | |||
323 | /* | 327 | /* |
324 | * Bounce: copy the swiotlb buffer back to the original dma location | 328 | * Bounce: copy the swiotlb buffer back to the original dma location |
325 | */ | 329 | */ |
326 | static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, | 330 | void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, |
327 | enum dma_data_direction dir) | 331 | enum dma_data_direction dir) |
328 | { | 332 | { |
329 | unsigned long pfn = PFN_DOWN(phys); | 333 | unsigned long pfn = PFN_DOWN(phys); |
330 | 334 | ||
@@ -360,26 +364,25 @@ static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, | |||
360 | memcpy(phys_to_virt(phys), dma_addr, size); | 364 | memcpy(phys_to_virt(phys), dma_addr, size); |
361 | } | 365 | } |
362 | } | 366 | } |
367 | EXPORT_SYMBOL_GPL(swiotlb_bounce); | ||
363 | 368 | ||
364 | /* | 369 | void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, |
365 | * Allocates bounce buffer and returns its kernel virtual address. | 370 | phys_addr_t phys, size_t size, |
366 | */ | 371 | enum dma_data_direction dir) |
367 | static void * | ||
368 | map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir) | ||
369 | { | 372 | { |
370 | unsigned long flags; | 373 | unsigned long flags; |
371 | char *dma_addr; | 374 | char *dma_addr; |
372 | unsigned int nslots, stride, index, wrap; | 375 | unsigned int nslots, stride, index, wrap; |
373 | int i; | 376 | int i; |
374 | unsigned long start_dma_addr; | ||
375 | unsigned long mask; | 377 | unsigned long mask; |
376 | unsigned long offset_slots; | 378 | unsigned long offset_slots; |
377 | unsigned long max_slots; | 379 | unsigned long max_slots; |
378 | 380 | ||
379 | mask = dma_get_seg_boundary(hwdev); | 381 | mask = dma_get_seg_boundary(hwdev); |
380 | start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask; | ||
381 | 382 | ||
382 | offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | 383 | tbl_dma_addr &= mask; |
384 | |||
385 | offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | ||
383 | 386 | ||
384 | /* | 387 | /* |
385 | * Carefully handle integer overflow which can occur when mask == ~0UL. | 388 | * Carefully handle integer overflow which can occur when mask == ~0UL. |
@@ -466,12 +469,27 @@ found: | |||
466 | 469 | ||
467 | return dma_addr; | 470 | return dma_addr; |
468 | } | 471 | } |
472 | EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single); | ||
473 | |||
474 | /* | ||
475 | * Allocates bounce buffer and returns its kernel virtual address. | ||
476 | */ | ||
477 | |||
478 | static void * | ||
479 | map_single(struct device *hwdev, phys_addr_t phys, size_t size, | ||
480 | enum dma_data_direction dir) | ||
481 | { | ||
482 | dma_addr_t start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start); | ||
483 | |||
484 | return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir); | ||
485 | } | ||
469 | 486 | ||
470 | /* | 487 | /* |
471 | * dma_addr is the kernel virtual address of the bounce buffer to unmap. | 488 | * dma_addr is the kernel virtual address of the bounce buffer to unmap. |
472 | */ | 489 | */ |
473 | static void | 490 | void |
474 | do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) | 491 | swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size, |
492 | enum dma_data_direction dir) | ||
475 | { | 493 | { |
476 | unsigned long flags; | 494 | unsigned long flags; |
477 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | 495 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; |
@@ -509,10 +527,12 @@ do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) | |||
509 | } | 527 | } |
510 | spin_unlock_irqrestore(&io_tlb_lock, flags); | 528 | spin_unlock_irqrestore(&io_tlb_lock, flags); |
511 | } | 529 | } |
530 | EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single); | ||
512 | 531 | ||
513 | static void | 532 | void |
514 | sync_single(struct device *hwdev, char *dma_addr, size_t size, | 533 | swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size, |
515 | int dir, int target) | 534 | enum dma_data_direction dir, |
535 | enum dma_sync_target target) | ||
516 | { | 536 | { |
517 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; | 537 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; |
518 | phys_addr_t phys = io_tlb_orig_addr[index]; | 538 | phys_addr_t phys = io_tlb_orig_addr[index]; |
@@ -536,6 +556,7 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size, | |||
536 | BUG(); | 556 | BUG(); |
537 | } | 557 | } |
538 | } | 558 | } |
559 | EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single); | ||
539 | 560 | ||
540 | void * | 561 | void * |
541 | swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 562 | swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
@@ -559,8 +580,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
559 | } | 580 | } |
560 | if (!ret) { | 581 | if (!ret) { |
561 | /* | 582 | /* |
562 | * We are either out of memory or the device can't DMA | 583 | * We are either out of memory or the device can't DMA to |
563 | * to GFP_DMA memory; fall back on map_single(), which | 584 | * GFP_DMA memory; fall back on map_single(), which |
564 | * will grab memory from the lowest available address range. | 585 | * will grab memory from the lowest available address range. |
565 | */ | 586 | */ |
566 | ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); | 587 | ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); |
@@ -578,7 +599,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
578 | (unsigned long long)dev_addr); | 599 | (unsigned long long)dev_addr); |
579 | 600 | ||
580 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 601 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |
581 | do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); | 602 | swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); |
582 | return NULL; | 603 | return NULL; |
583 | } | 604 | } |
584 | *dma_handle = dev_addr; | 605 | *dma_handle = dev_addr; |
@@ -596,13 +617,14 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
596 | if (!is_swiotlb_buffer(paddr)) | 617 | if (!is_swiotlb_buffer(paddr)) |
597 | free_pages((unsigned long)vaddr, get_order(size)); | 618 | free_pages((unsigned long)vaddr, get_order(size)); |
598 | else | 619 | else |
599 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 620 | /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */ |
600 | do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); | 621 | swiotlb_tbl_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); |
601 | } | 622 | } |
602 | EXPORT_SYMBOL(swiotlb_free_coherent); | 623 | EXPORT_SYMBOL(swiotlb_free_coherent); |
603 | 624 | ||
604 | static void | 625 | static void |
605 | swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) | 626 | swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, |
627 | int do_panic) | ||
606 | { | 628 | { |
607 | /* | 629 | /* |
608 | * Ran out of IOMMU space for this operation. This is very bad. | 630 | * Ran out of IOMMU space for this operation. This is very bad. |
@@ -680,14 +702,14 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page); | |||
680 | * whatever the device wrote there. | 702 | * whatever the device wrote there. |
681 | */ | 703 | */ |
682 | static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, | 704 | static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, |
683 | size_t size, int dir) | 705 | size_t size, enum dma_data_direction dir) |
684 | { | 706 | { |
685 | phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); | 707 | phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); |
686 | 708 | ||
687 | BUG_ON(dir == DMA_NONE); | 709 | BUG_ON(dir == DMA_NONE); |
688 | 710 | ||
689 | if (is_swiotlb_buffer(paddr)) { | 711 | if (is_swiotlb_buffer(paddr)) { |
690 | do_unmap_single(hwdev, phys_to_virt(paddr), size, dir); | 712 | swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir); |
691 | return; | 713 | return; |
692 | } | 714 | } |
693 | 715 | ||
@@ -723,14 +745,16 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page); | |||
723 | */ | 745 | */ |
724 | static void | 746 | static void |
725 | swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, | 747 | swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, |
726 | size_t size, int dir, int target) | 748 | size_t size, enum dma_data_direction dir, |
749 | enum dma_sync_target target) | ||
727 | { | 750 | { |
728 | phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); | 751 | phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); |
729 | 752 | ||
730 | BUG_ON(dir == DMA_NONE); | 753 | BUG_ON(dir == DMA_NONE); |
731 | 754 | ||
732 | if (is_swiotlb_buffer(paddr)) { | 755 | if (is_swiotlb_buffer(paddr)) { |
733 | sync_single(hwdev, phys_to_virt(paddr), size, dir, target); | 756 | swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir, |
757 | target); | ||
734 | return; | 758 | return; |
735 | } | 759 | } |
736 | 760 | ||
@@ -809,7 +833,7 @@ EXPORT_SYMBOL(swiotlb_map_sg_attrs); | |||
809 | 833 | ||
810 | int | 834 | int |
811 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | 835 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, |
812 | int dir) | 836 | enum dma_data_direction dir) |
813 | { | 837 | { |
814 | return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); | 838 | return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); |
815 | } | 839 | } |
@@ -836,7 +860,7 @@ EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); | |||
836 | 860 | ||
837 | void | 861 | void |
838 | swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | 862 | swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, |
839 | int dir) | 863 | enum dma_data_direction dir) |
840 | { | 864 | { |
841 | return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); | 865 | return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); |
842 | } | 866 | } |
@@ -851,7 +875,8 @@ EXPORT_SYMBOL(swiotlb_unmap_sg); | |||
851 | */ | 875 | */ |
852 | static void | 876 | static void |
853 | swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, | 877 | swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, |
854 | int nelems, int dir, int target) | 878 | int nelems, enum dma_data_direction dir, |
879 | enum dma_sync_target target) | ||
855 | { | 880 | { |
856 | struct scatterlist *sg; | 881 | struct scatterlist *sg; |
857 | int i; | 882 | int i; |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index b8a2f549ab0e..7af9d841c43b 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
@@ -146,19 +146,16 @@ int strict_strtoul(const char *cp, unsigned int base, unsigned long *res) | |||
146 | { | 146 | { |
147 | char *tail; | 147 | char *tail; |
148 | unsigned long val; | 148 | unsigned long val; |
149 | size_t len; | ||
150 | 149 | ||
151 | *res = 0; | 150 | *res = 0; |
152 | len = strlen(cp); | 151 | if (!*cp) |
153 | if (len == 0) | ||
154 | return -EINVAL; | 152 | return -EINVAL; |
155 | 153 | ||
156 | val = simple_strtoul(cp, &tail, base); | 154 | val = simple_strtoul(cp, &tail, base); |
157 | if (tail == cp) | 155 | if (tail == cp) |
158 | return -EINVAL; | 156 | return -EINVAL; |
159 | 157 | ||
160 | if ((*tail == '\0') || | 158 | if ((tail[0] == '\0') || (tail[0] == '\n' && tail[1] == '\0')) { |
161 | ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) { | ||
162 | *res = val; | 159 | *res = val; |
163 | return 0; | 160 | return 0; |
164 | } | 161 | } |
@@ -220,18 +217,15 @@ int strict_strtoull(const char *cp, unsigned int base, unsigned long long *res) | |||
220 | { | 217 | { |
221 | char *tail; | 218 | char *tail; |
222 | unsigned long long val; | 219 | unsigned long long val; |
223 | size_t len; | ||
224 | 220 | ||
225 | *res = 0; | 221 | *res = 0; |
226 | len = strlen(cp); | 222 | if (!*cp) |
227 | if (len == 0) | ||
228 | return -EINVAL; | 223 | return -EINVAL; |
229 | 224 | ||
230 | val = simple_strtoull(cp, &tail, base); | 225 | val = simple_strtoull(cp, &tail, base); |
231 | if (tail == cp) | 226 | if (tail == cp) |
232 | return -EINVAL; | 227 | return -EINVAL; |
233 | if ((*tail == '\0') || | 228 | if ((tail[0] == '\0') || (tail[0] == '\n' && tail[1] == '\0')) { |
234 | ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) { | ||
235 | *res = val; | 229 | *res = val; |
236 | return 0; | 230 | return 0; |
237 | } | 231 | } |
@@ -980,6 +974,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr, | |||
980 | * [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15] | 974 | * [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15] |
981 | * little endian output byte order is: | 975 | * little endian output byte order is: |
982 | * [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15] | 976 | * [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15] |
977 | * - 'V' For a struct va_format which contains a format string * and va_list *, | ||
978 | * call vsnprintf(->format, *->va_list). | ||
979 | * Implements a "recursive vsnprintf". | ||
980 | * Do not use this feature without some mechanism to verify the | ||
981 | * correctness of the format string and va_list arguments. | ||
983 | * | 982 | * |
984 | * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 | 983 | * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 |
985 | * function pointers are really function descriptors, which contain a | 984 | * function pointers are really function descriptors, which contain a |
@@ -1025,6 +1024,10 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
1025 | break; | 1024 | break; |
1026 | case 'U': | 1025 | case 'U': |
1027 | return uuid_string(buf, end, ptr, spec, fmt); | 1026 | return uuid_string(buf, end, ptr, spec, fmt); |
1027 | case 'V': | ||
1028 | return buf + vsnprintf(buf, end - buf, | ||
1029 | ((struct va_format *)ptr)->fmt, | ||
1030 | *(((struct va_format *)ptr)->va)); | ||
1028 | } | 1031 | } |
1029 | spec.flags |= SMALL; | 1032 | spec.flags |= SMALL; |
1030 | if (spec.field_width == -1) { | 1033 | if (spec.field_width == -1) { |