diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig.debug | 57 | ||||
| -rw-r--r-- | lib/Makefile | 3 | ||||
| -rw-r--r-- | lib/cmdline.c | 2 | ||||
| -rw-r--r-- | lib/iommu-helper.c | 5 | ||||
| -rw-r--r-- | lib/klist.c | 96 | ||||
| -rw-r--r-- | lib/percpu_counter.c | 8 | ||||
| -rw-r--r-- | lib/scatterlist.c | 4 | ||||
| -rw-r--r-- | lib/string_helpers.c | 64 | ||||
| -rw-r--r-- | lib/swiotlb.c | 49 | ||||
| -rw-r--r-- | lib/vsprintf.c | 11 |
10 files changed, 223 insertions, 76 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 0b504814e378..aa81d2848448 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -495,6 +495,15 @@ config DEBUG_VM | |||
| 495 | 495 | ||
| 496 | If unsure, say N. | 496 | If unsure, say N. |
| 497 | 497 | ||
| 498 | config DEBUG_VIRTUAL | ||
| 499 | bool "Debug VM translations" | ||
| 500 | depends on DEBUG_KERNEL && X86 | ||
| 501 | help | ||
| 502 | Enable some costly sanity checks in virtual to page code. This can | ||
| 503 | catch mistakes with virt_to_page() and friends. | ||
| 504 | |||
| 505 | If unsure, say N. | ||
| 506 | |||
| 498 | config DEBUG_WRITECOUNT | 507 | config DEBUG_WRITECOUNT |
| 499 | bool "Debug filesystem writers count" | 508 | bool "Debug filesystem writers count" |
| 500 | depends on DEBUG_KERNEL | 509 | depends on DEBUG_KERNEL |
| @@ -597,6 +606,19 @@ config RCU_TORTURE_TEST_RUNNABLE | |||
| 597 | Say N here if you want the RCU torture tests to start only | 606 | Say N here if you want the RCU torture tests to start only |
| 598 | after being manually enabled via /proc. | 607 | after being manually enabled via /proc. |
| 599 | 608 | ||
| 609 | config RCU_CPU_STALL_DETECTOR | ||
| 610 | bool "Check for stalled CPUs delaying RCU grace periods" | ||
| 611 | depends on CLASSIC_RCU | ||
| 612 | default n | ||
| 613 | help | ||
| 614 | This option causes RCU to printk information on which | ||
| 615 | CPUs are delaying the current grace period, but only when | ||
| 616 | the grace period extends for excessive time periods. | ||
| 617 | |||
| 618 | Say Y if you want RCU to perform such checks. | ||
| 619 | |||
| 620 | Say N if you are unsure. | ||
| 621 | |||
| 600 | config KPROBES_SANITY_TEST | 622 | config KPROBES_SANITY_TEST |
| 601 | bool "Kprobes sanity tests" | 623 | bool "Kprobes sanity tests" |
| 602 | depends on DEBUG_KERNEL | 624 | depends on DEBUG_KERNEL |
| @@ -624,6 +646,28 @@ config BACKTRACE_SELF_TEST | |||
| 624 | 646 | ||
| 625 | Say N if you are unsure. | 647 | Say N if you are unsure. |
| 626 | 648 | ||
| 649 | config DEBUG_BLOCK_EXT_DEVT | ||
| 650 | bool "Force extended block device numbers and spread them" | ||
| 651 | depends on DEBUG_KERNEL | ||
| 652 | depends on BLOCK | ||
| 653 | default n | ||
| 654 | help | ||
| 655 | Conventionally, block device numbers are allocated from | ||
| 656 | predetermined contiguous area. However, extended block area | ||
| 657 | may introduce non-contiguous block device numbers. This | ||
| 658 | option forces most block device numbers to be allocated from | ||
| 659 | the extended space and spreads them to discover kernel or | ||
| 660 | userland code paths which assume predetermined contiguous | ||
| 661 | device number allocation. | ||
| 662 | |||
| 663 | Note that turning on this debug option shuffles all the | ||
| 664 | device numbers for all IDE and SCSI devices including libata | ||
| 665 | ones, so root partition specified using device number | ||
| 666 | directly (via rdev or root=MAJ:MIN) won't work anymore. | ||
| 667 | Textual device names (root=/dev/sdXn) will continue to work. | ||
| 668 | |||
| 669 | Say N if you are unsure. | ||
| 670 | |||
| 627 | config LKDTM | 671 | config LKDTM |
| 628 | tristate "Linux Kernel Dump Test Tool Module" | 672 | tristate "Linux Kernel Dump Test Tool Module" |
| 629 | depends on DEBUG_KERNEL | 673 | depends on DEBUG_KERNEL |
| @@ -661,10 +705,21 @@ config FAIL_PAGE_ALLOC | |||
| 661 | 705 | ||
| 662 | config FAIL_MAKE_REQUEST | 706 | config FAIL_MAKE_REQUEST |
| 663 | bool "Fault-injection capability for disk IO" | 707 | bool "Fault-injection capability for disk IO" |
| 664 | depends on FAULT_INJECTION | 708 | depends on FAULT_INJECTION && BLOCK |
| 665 | help | 709 | help |
| 666 | Provide fault-injection capability for disk IO. | 710 | Provide fault-injection capability for disk IO. |
| 667 | 711 | ||
| 712 | config FAIL_IO_TIMEOUT | ||
| 713 | bool "Faul-injection capability for faking disk interrupts" | ||
| 714 | depends on FAULT_INJECTION && BLOCK | ||
| 715 | help | ||
| 716 | Provide fault-injection capability on end IO handling. This | ||
| 717 | will make the block layer "forget" an interrupt as configured, | ||
| 718 | thus exercising the error handling. | ||
| 719 | |||
| 720 | Only works with drivers that use the generic timeout handling, | ||
| 721 | for others it wont do anything. | ||
| 722 | |||
| 668 | config FAULT_INJECTION_DEBUG_FS | 723 | config FAULT_INJECTION_DEBUG_FS |
| 669 | bool "Debugfs entries for fault-injection capabilities" | 724 | bool "Debugfs entries for fault-injection capabilities" |
| 670 | depends on FAULT_INJECTION && SYSFS && DEBUG_FS | 725 | depends on FAULT_INJECTION && SYSFS && DEBUG_FS |
diff --git a/lib/Makefile b/lib/Makefile index 3b1f94bbe9de..44001af76a7d 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -19,7 +19,8 @@ lib-$(CONFIG_SMP) += cpumask.o | |||
| 19 | lib-y += kobject.o kref.o klist.o | 19 | lib-y += kobject.o kref.o klist.o |
| 20 | 20 | ||
| 21 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 21 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ |
| 22 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o | 22 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
| 23 | string_helpers.o | ||
| 23 | 24 | ||
| 24 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) | 25 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) |
| 25 | CFLAGS_kobject.o += -DDEBUG | 26 | CFLAGS_kobject.o += -DDEBUG |
diff --git a/lib/cmdline.c b/lib/cmdline.c index 5ba8a942a478..f5f3ad8b62ff 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c | |||
| @@ -126,7 +126,7 @@ char *get_options(const char *str, int nints, int *ints) | |||
| 126 | * megabyte, or one gigabyte, respectively. | 126 | * megabyte, or one gigabyte, respectively. |
| 127 | */ | 127 | */ |
| 128 | 128 | ||
| 129 | unsigned long long memparse(char *ptr, char **retptr) | 129 | unsigned long long memparse(const char *ptr, char **retptr) |
| 130 | { | 130 | { |
| 131 | char *endptr; /* local pointer to end of parsed string */ | 131 | char *endptr; /* local pointer to end of parsed string */ |
| 132 | 132 | ||
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c index a3b8d4c3f77a..5d90074dca75 100644 --- a/lib/iommu-helper.c +++ b/lib/iommu-helper.c | |||
| @@ -30,8 +30,7 @@ again: | |||
| 30 | return index; | 30 | return index; |
| 31 | } | 31 | } |
| 32 | 32 | ||
| 33 | static inline void set_bit_area(unsigned long *map, unsigned long i, | 33 | void iommu_area_reserve(unsigned long *map, unsigned long i, int len) |
| 34 | int len) | ||
| 35 | { | 34 | { |
| 36 | unsigned long end = i + len; | 35 | unsigned long end = i + len; |
| 37 | while (i < end) { | 36 | while (i < end) { |
| @@ -64,7 +63,7 @@ again: | |||
| 64 | start = index + 1; | 63 | start = index + 1; |
| 65 | goto again; | 64 | goto again; |
| 66 | } | 65 | } |
| 67 | set_bit_area(map, index, nr); | 66 | iommu_area_reserve(map, index, nr); |
| 68 | } | 67 | } |
| 69 | return index; | 68 | return index; |
| 70 | } | 69 | } |
diff --git a/lib/klist.c b/lib/klist.c index cca37f96faa2..bbdd3015c2c7 100644 --- a/lib/klist.c +++ b/lib/klist.c | |||
| @@ -37,6 +37,37 @@ | |||
| 37 | #include <linux/klist.h> | 37 | #include <linux/klist.h> |
| 38 | #include <linux/module.h> | 38 | #include <linux/module.h> |
| 39 | 39 | ||
| 40 | /* | ||
| 41 | * Use the lowest bit of n_klist to mark deleted nodes and exclude | ||
| 42 | * dead ones from iteration. | ||
| 43 | */ | ||
| 44 | #define KNODE_DEAD 1LU | ||
| 45 | #define KNODE_KLIST_MASK ~KNODE_DEAD | ||
| 46 | |||
| 47 | static struct klist *knode_klist(struct klist_node *knode) | ||
| 48 | { | ||
| 49 | return (struct klist *) | ||
| 50 | ((unsigned long)knode->n_klist & KNODE_KLIST_MASK); | ||
| 51 | } | ||
| 52 | |||
| 53 | static bool knode_dead(struct klist_node *knode) | ||
| 54 | { | ||
| 55 | return (unsigned long)knode->n_klist & KNODE_DEAD; | ||
| 56 | } | ||
| 57 | |||
| 58 | static void knode_set_klist(struct klist_node *knode, struct klist *klist) | ||
| 59 | { | ||
| 60 | knode->n_klist = klist; | ||
| 61 | /* no knode deserves to start its life dead */ | ||
| 62 | WARN_ON(knode_dead(knode)); | ||
| 63 | } | ||
| 64 | |||
| 65 | static void knode_kill(struct klist_node *knode) | ||
| 66 | { | ||
| 67 | /* and no knode should die twice ever either, see we're very humane */ | ||
| 68 | WARN_ON(knode_dead(knode)); | ||
| 69 | *(unsigned long *)&knode->n_klist |= KNODE_DEAD; | ||
| 70 | } | ||
| 40 | 71 | ||
| 41 | /** | 72 | /** |
| 42 | * klist_init - Initialize a klist structure. | 73 | * klist_init - Initialize a klist structure. |
| @@ -79,7 +110,7 @@ static void klist_node_init(struct klist *k, struct klist_node *n) | |||
| 79 | INIT_LIST_HEAD(&n->n_node); | 110 | INIT_LIST_HEAD(&n->n_node); |
| 80 | init_completion(&n->n_removed); | 111 | init_completion(&n->n_removed); |
| 81 | kref_init(&n->n_ref); | 112 | kref_init(&n->n_ref); |
| 82 | n->n_klist = k; | 113 | knode_set_klist(n, k); |
| 83 | if (k->get) | 114 | if (k->get) |
| 84 | k->get(n); | 115 | k->get(n); |
| 85 | } | 116 | } |
| @@ -115,7 +146,7 @@ EXPORT_SYMBOL_GPL(klist_add_tail); | |||
| 115 | */ | 146 | */ |
| 116 | void klist_add_after(struct klist_node *n, struct klist_node *pos) | 147 | void klist_add_after(struct klist_node *n, struct klist_node *pos) |
| 117 | { | 148 | { |
| 118 | struct klist *k = pos->n_klist; | 149 | struct klist *k = knode_klist(pos); |
| 119 | 150 | ||
| 120 | klist_node_init(k, n); | 151 | klist_node_init(k, n); |
| 121 | spin_lock(&k->k_lock); | 152 | spin_lock(&k->k_lock); |
| @@ -131,7 +162,7 @@ EXPORT_SYMBOL_GPL(klist_add_after); | |||
| 131 | */ | 162 | */ |
| 132 | void klist_add_before(struct klist_node *n, struct klist_node *pos) | 163 | void klist_add_before(struct klist_node *n, struct klist_node *pos) |
| 133 | { | 164 | { |
| 134 | struct klist *k = pos->n_klist; | 165 | struct klist *k = knode_klist(pos); |
| 135 | 166 | ||
| 136 | klist_node_init(k, n); | 167 | klist_node_init(k, n); |
| 137 | spin_lock(&k->k_lock); | 168 | spin_lock(&k->k_lock); |
| @@ -144,9 +175,10 @@ static void klist_release(struct kref *kref) | |||
| 144 | { | 175 | { |
| 145 | struct klist_node *n = container_of(kref, struct klist_node, n_ref); | 176 | struct klist_node *n = container_of(kref, struct klist_node, n_ref); |
| 146 | 177 | ||
| 178 | WARN_ON(!knode_dead(n)); | ||
| 147 | list_del(&n->n_node); | 179 | list_del(&n->n_node); |
| 148 | complete(&n->n_removed); | 180 | complete(&n->n_removed); |
| 149 | n->n_klist = NULL; | 181 | knode_set_klist(n, NULL); |
| 150 | } | 182 | } |
| 151 | 183 | ||
| 152 | static int klist_dec_and_del(struct klist_node *n) | 184 | static int klist_dec_and_del(struct klist_node *n) |
| @@ -154,22 +186,29 @@ static int klist_dec_and_del(struct klist_node *n) | |||
| 154 | return kref_put(&n->n_ref, klist_release); | 186 | return kref_put(&n->n_ref, klist_release); |
| 155 | } | 187 | } |
| 156 | 188 | ||
| 157 | /** | 189 | static void klist_put(struct klist_node *n, bool kill) |
| 158 | * klist_del - Decrement the reference count of node and try to remove. | ||
| 159 | * @n: node we're deleting. | ||
| 160 | */ | ||
| 161 | void klist_del(struct klist_node *n) | ||
| 162 | { | 190 | { |
| 163 | struct klist *k = n->n_klist; | 191 | struct klist *k = knode_klist(n); |
| 164 | void (*put)(struct klist_node *) = k->put; | 192 | void (*put)(struct klist_node *) = k->put; |
| 165 | 193 | ||
| 166 | spin_lock(&k->k_lock); | 194 | spin_lock(&k->k_lock); |
| 195 | if (kill) | ||
| 196 | knode_kill(n); | ||
| 167 | if (!klist_dec_and_del(n)) | 197 | if (!klist_dec_and_del(n)) |
| 168 | put = NULL; | 198 | put = NULL; |
| 169 | spin_unlock(&k->k_lock); | 199 | spin_unlock(&k->k_lock); |
| 170 | if (put) | 200 | if (put) |
| 171 | put(n); | 201 | put(n); |
| 172 | } | 202 | } |
| 203 | |||
| 204 | /** | ||
| 205 | * klist_del - Decrement the reference count of node and try to remove. | ||
| 206 | * @n: node we're deleting. | ||
| 207 | */ | ||
| 208 | void klist_del(struct klist_node *n) | ||
| 209 | { | ||
| 210 | klist_put(n, true); | ||
| 211 | } | ||
| 173 | EXPORT_SYMBOL_GPL(klist_del); | 212 | EXPORT_SYMBOL_GPL(klist_del); |
| 174 | 213 | ||
| 175 | /** | 214 | /** |
| @@ -206,7 +245,6 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i, | |||
| 206 | struct klist_node *n) | 245 | struct klist_node *n) |
| 207 | { | 246 | { |
| 208 | i->i_klist = k; | 247 | i->i_klist = k; |
| 209 | i->i_head = &k->k_list; | ||
| 210 | i->i_cur = n; | 248 | i->i_cur = n; |
| 211 | if (n) | 249 | if (n) |
| 212 | kref_get(&n->n_ref); | 250 | kref_get(&n->n_ref); |
| @@ -237,7 +275,7 @@ EXPORT_SYMBOL_GPL(klist_iter_init); | |||
| 237 | void klist_iter_exit(struct klist_iter *i) | 275 | void klist_iter_exit(struct klist_iter *i) |
| 238 | { | 276 | { |
| 239 | if (i->i_cur) { | 277 | if (i->i_cur) { |
| 240 | klist_del(i->i_cur); | 278 | klist_put(i->i_cur, false); |
| 241 | i->i_cur = NULL; | 279 | i->i_cur = NULL; |
| 242 | } | 280 | } |
| 243 | } | 281 | } |
| @@ -258,27 +296,33 @@ static struct klist_node *to_klist_node(struct list_head *n) | |||
| 258 | */ | 296 | */ |
| 259 | struct klist_node *klist_next(struct klist_iter *i) | 297 | struct klist_node *klist_next(struct klist_iter *i) |
| 260 | { | 298 | { |
| 261 | struct list_head *next; | ||
| 262 | struct klist_node *lnode = i->i_cur; | ||
| 263 | struct klist_node *knode = NULL; | ||
| 264 | void (*put)(struct klist_node *) = i->i_klist->put; | 299 | void (*put)(struct klist_node *) = i->i_klist->put; |
| 300 | struct klist_node *last = i->i_cur; | ||
| 301 | struct klist_node *next; | ||
| 265 | 302 | ||
| 266 | spin_lock(&i->i_klist->k_lock); | 303 | spin_lock(&i->i_klist->k_lock); |
| 267 | if (lnode) { | 304 | |
| 268 | next = lnode->n_node.next; | 305 | if (last) { |
| 269 | if (!klist_dec_and_del(lnode)) | 306 | next = to_klist_node(last->n_node.next); |
| 307 | if (!klist_dec_and_del(last)) | ||
| 270 | put = NULL; | 308 | put = NULL; |
| 271 | } else | 309 | } else |
| 272 | next = i->i_head->next; | 310 | next = to_klist_node(i->i_klist->k_list.next); |
| 273 | 311 | ||
| 274 | if (next != i->i_head) { | 312 | i->i_cur = NULL; |
| 275 | knode = to_klist_node(next); | 313 | while (next != to_klist_node(&i->i_klist->k_list)) { |
| 276 | kref_get(&knode->n_ref); | 314 | if (likely(!knode_dead(next))) { |
| 315 | kref_get(&next->n_ref); | ||
| 316 | i->i_cur = next; | ||
| 317 | break; | ||
| 318 | } | ||
| 319 | next = to_klist_node(next->n_node.next); | ||
| 277 | } | 320 | } |
| 278 | i->i_cur = knode; | 321 | |
| 279 | spin_unlock(&i->i_klist->k_lock); | 322 | spin_unlock(&i->i_klist->k_lock); |
| 280 | if (put && lnode) | 323 | |
| 281 | put(lnode); | 324 | if (put && last) |
| 282 | return knode; | 325 | put(last); |
| 326 | return i->i_cur; | ||
| 283 | } | 327 | } |
| 284 | EXPORT_SYMBOL_GPL(klist_next); | 328 | EXPORT_SYMBOL_GPL(klist_next); |
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 4a8ba4bf5f6f..a8663890a88c 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
| @@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add); | |||
| 52 | * Add up all the per-cpu counts, return the result. This is a more accurate | 52 | * Add up all the per-cpu counts, return the result. This is a more accurate |
| 53 | * but much slower version of percpu_counter_read_positive() | 53 | * but much slower version of percpu_counter_read_positive() |
| 54 | */ | 54 | */ |
| 55 | s64 __percpu_counter_sum(struct percpu_counter *fbc, int set) | 55 | s64 __percpu_counter_sum(struct percpu_counter *fbc) |
| 56 | { | 56 | { |
| 57 | s64 ret; | 57 | s64 ret; |
| 58 | int cpu; | 58 | int cpu; |
| @@ -62,11 +62,9 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc, int set) | |||
| 62 | for_each_online_cpu(cpu) { | 62 | for_each_online_cpu(cpu) { |
| 63 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); | 63 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
| 64 | ret += *pcount; | 64 | ret += *pcount; |
| 65 | if (set) | 65 | *pcount = 0; |
| 66 | *pcount = 0; | ||
| 67 | } | 66 | } |
| 68 | if (set) | 67 | fbc->count = ret; |
| 69 | fbc->count = ret; | ||
| 70 | 68 | ||
| 71 | spin_unlock(&fbc->lock); | 69 | spin_unlock(&fbc->lock); |
| 72 | return ret; | 70 | return ret; |
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 876ba6d5b670..8d2688ff1352 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
| @@ -422,9 +422,12 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, | |||
| 422 | { | 422 | { |
| 423 | unsigned int offset = 0; | 423 | unsigned int offset = 0; |
| 424 | struct sg_mapping_iter miter; | 424 | struct sg_mapping_iter miter; |
| 425 | unsigned long flags; | ||
| 425 | 426 | ||
| 426 | sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC); | 427 | sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC); |
| 427 | 428 | ||
| 429 | local_irq_save(flags); | ||
| 430 | |||
| 428 | while (sg_miter_next(&miter) && offset < buflen) { | 431 | while (sg_miter_next(&miter) && offset < buflen) { |
| 429 | unsigned int len; | 432 | unsigned int len; |
| 430 | 433 | ||
| @@ -442,6 +445,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, | |||
| 442 | 445 | ||
| 443 | sg_miter_stop(&miter); | 446 | sg_miter_stop(&miter); |
| 444 | 447 | ||
| 448 | local_irq_restore(flags); | ||
| 445 | return offset; | 449 | return offset; |
| 446 | } | 450 | } |
| 447 | 451 | ||
diff --git a/lib/string_helpers.c b/lib/string_helpers.c new file mode 100644 index 000000000000..8347925030ff --- /dev/null +++ b/lib/string_helpers.c | |||
| @@ -0,0 +1,64 @@ | |||
| 1 | /* | ||
| 2 | * Helpers for formatting and printing strings | ||
| 3 | * | ||
| 4 | * Copyright 31 August 2008 James Bottomley | ||
| 5 | */ | ||
| 6 | #include <linux/kernel.h> | ||
| 7 | #include <linux/math64.h> | ||
| 8 | #include <linux/module.h> | ||
| 9 | #include <linux/string_helpers.h> | ||
| 10 | |||
| 11 | /** | ||
| 12 | * string_get_size - get the size in the specified units | ||
| 13 | * @size: The size to be converted | ||
| 14 | * @units: units to use (powers of 1000 or 1024) | ||
| 15 | * @buf: buffer to format to | ||
| 16 | * @len: length of buffer | ||
| 17 | * | ||
| 18 | * This function returns a string formatted to 3 significant figures | ||
| 19 | * giving the size in the required units. Returns 0 on success or | ||
| 20 | * error on failure. @buf is always zero terminated. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | int string_get_size(u64 size, const enum string_size_units units, | ||
| 24 | char *buf, int len) | ||
| 25 | { | ||
| 26 | const char *units_10[] = { "B", "KB", "MB", "GB", "TB", "PB", | ||
| 27 | "EB", "ZB", "YB", NULL}; | ||
| 28 | const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB", | ||
| 29 | "EiB", "ZiB", "YiB", NULL }; | ||
| 30 | const char **units_str[] = { | ||
| 31 | [STRING_UNITS_10] = units_10, | ||
| 32 | [STRING_UNITS_2] = units_2, | ||
| 33 | }; | ||
| 34 | const int divisor[] = { | ||
| 35 | [STRING_UNITS_10] = 1000, | ||
| 36 | [STRING_UNITS_2] = 1024, | ||
| 37 | }; | ||
| 38 | int i, j; | ||
| 39 | u64 remainder = 0, sf_cap; | ||
| 40 | char tmp[8]; | ||
| 41 | |||
| 42 | tmp[0] = '\0'; | ||
| 43 | |||
| 44 | for (i = 0; size > divisor[units] && units_str[units][i]; i++) | ||
| 45 | remainder = do_div(size, divisor[units]); | ||
| 46 | |||
| 47 | sf_cap = size; | ||
| 48 | for (j = 0; sf_cap*10 < 1000; j++) | ||
| 49 | sf_cap *= 10; | ||
| 50 | |||
| 51 | if (j) { | ||
| 52 | remainder *= 1000; | ||
| 53 | do_div(remainder, divisor[units]); | ||
| 54 | snprintf(tmp, sizeof(tmp), ".%03lld", | ||
| 55 | (unsigned long long)remainder); | ||
| 56 | tmp[j+1] = '\0'; | ||
| 57 | } | ||
| 58 | |||
| 59 | snprintf(buf, len, "%lld%s%s", (unsigned long long)size, | ||
| 60 | tmp, units_str[units][i]); | ||
| 61 | |||
| 62 | return 0; | ||
| 63 | } | ||
| 64 | EXPORT_SYMBOL(string_get_size); | ||
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 977edbdbc1de..f8eebd489149 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
| @@ -274,13 +274,14 @@ cleanup1: | |||
| 274 | } | 274 | } |
| 275 | 275 | ||
| 276 | static int | 276 | static int |
| 277 | address_needs_mapping(struct device *hwdev, dma_addr_t addr) | 277 | address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) |
| 278 | { | 278 | { |
| 279 | dma_addr_t mask = 0xffffffff; | 279 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); |
| 280 | /* If the device has a mask, use it, otherwise default to 32 bits */ | 280 | } |
| 281 | if (hwdev && hwdev->dma_mask) | 281 | |
| 282 | mask = *hwdev->dma_mask; | 282 | static int is_swiotlb_buffer(char *addr) |
| 283 | return (addr & ~mask) != 0; | 283 | { |
| 284 | return addr >= io_tlb_start && addr < io_tlb_end; | ||
| 284 | } | 285 | } |
| 285 | 286 | ||
| 286 | /* | 287 | /* |
| @@ -467,15 +468,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
| 467 | void *ret; | 468 | void *ret; |
| 468 | int order = get_order(size); | 469 | int order = get_order(size); |
| 469 | 470 | ||
| 470 | /* | ||
| 471 | * XXX fix me: the DMA API should pass us an explicit DMA mask | ||
| 472 | * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32 | ||
| 473 | * bit range instead of a 16MB one). | ||
| 474 | */ | ||
| 475 | flags |= GFP_DMA; | ||
| 476 | |||
| 477 | ret = (void *)__get_free_pages(flags, order); | 471 | ret = (void *)__get_free_pages(flags, order); |
| 478 | if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) { | 472 | if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) { |
| 479 | /* | 473 | /* |
| 480 | * The allocated memory isn't reachable by the device. | 474 | * The allocated memory isn't reachable by the device. |
| 481 | * Fall back on swiotlb_map_single(). | 475 | * Fall back on swiotlb_map_single(). |
| @@ -490,19 +484,16 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
| 490 | * swiotlb_map_single(), which will grab memory from | 484 | * swiotlb_map_single(), which will grab memory from |
| 491 | * the lowest available address range. | 485 | * the lowest available address range. |
| 492 | */ | 486 | */ |
| 493 | dma_addr_t handle; | 487 | ret = map_single(hwdev, NULL, size, DMA_FROM_DEVICE); |
| 494 | handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); | 488 | if (!ret) |
| 495 | if (swiotlb_dma_mapping_error(hwdev, handle)) | ||
| 496 | return NULL; | 489 | return NULL; |
| 497 | |||
| 498 | ret = bus_to_virt(handle); | ||
| 499 | } | 490 | } |
| 500 | 491 | ||
| 501 | memset(ret, 0, size); | 492 | memset(ret, 0, size); |
| 502 | dev_addr = virt_to_bus(ret); | 493 | dev_addr = virt_to_bus(ret); |
| 503 | 494 | ||
| 504 | /* Confirm address can be DMA'd by device */ | 495 | /* Confirm address can be DMA'd by device */ |
| 505 | if (address_needs_mapping(hwdev, dev_addr)) { | 496 | if (address_needs_mapping(hwdev, dev_addr, size)) { |
| 506 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", | 497 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", |
| 507 | (unsigned long long)*hwdev->dma_mask, | 498 | (unsigned long long)*hwdev->dma_mask, |
| 508 | (unsigned long long)dev_addr); | 499 | (unsigned long long)dev_addr); |
| @@ -518,12 +509,11 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
| 518 | dma_addr_t dma_handle) | 509 | dma_addr_t dma_handle) |
| 519 | { | 510 | { |
| 520 | WARN_ON(irqs_disabled()); | 511 | WARN_ON(irqs_disabled()); |
| 521 | if (!(vaddr >= (void *)io_tlb_start | 512 | if (!is_swiotlb_buffer(vaddr)) |
| 522 | && vaddr < (void *)io_tlb_end)) | ||
| 523 | free_pages((unsigned long) vaddr, get_order(size)); | 513 | free_pages((unsigned long) vaddr, get_order(size)); |
| 524 | else | 514 | else |
| 525 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 515 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |
| 526 | swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); | 516 | unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); |
| 527 | } | 517 | } |
| 528 | 518 | ||
| 529 | static void | 519 | static void |
| @@ -567,7 +557,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, | |||
| 567 | * we can safely return the device addr and not worry about bounce | 557 | * we can safely return the device addr and not worry about bounce |
| 568 | * buffering it. | 558 | * buffering it. |
| 569 | */ | 559 | */ |
| 570 | if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force) | 560 | if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force) |
| 571 | return dev_addr; | 561 | return dev_addr; |
| 572 | 562 | ||
| 573 | /* | 563 | /* |
| @@ -584,7 +574,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, | |||
| 584 | /* | 574 | /* |
| 585 | * Ensure that the address returned is DMA'ble | 575 | * Ensure that the address returned is DMA'ble |
| 586 | */ | 576 | */ |
| 587 | if (address_needs_mapping(hwdev, dev_addr)) | 577 | if (address_needs_mapping(hwdev, dev_addr, size)) |
| 588 | panic("map_single: bounce buffer is not DMA'ble"); | 578 | panic("map_single: bounce buffer is not DMA'ble"); |
| 589 | 579 | ||
| 590 | return dev_addr; | 580 | return dev_addr; |
| @@ -612,7 +602,7 @@ swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, | |||
| 612 | char *dma_addr = bus_to_virt(dev_addr); | 602 | char *dma_addr = bus_to_virt(dev_addr); |
| 613 | 603 | ||
| 614 | BUG_ON(dir == DMA_NONE); | 604 | BUG_ON(dir == DMA_NONE); |
| 615 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) | 605 | if (is_swiotlb_buffer(dma_addr)) |
| 616 | unmap_single(hwdev, dma_addr, size, dir); | 606 | unmap_single(hwdev, dma_addr, size, dir); |
| 617 | else if (dir == DMA_FROM_DEVICE) | 607 | else if (dir == DMA_FROM_DEVICE) |
| 618 | dma_mark_clean(dma_addr, size); | 608 | dma_mark_clean(dma_addr, size); |
| @@ -642,7 +632,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, | |||
| 642 | char *dma_addr = bus_to_virt(dev_addr); | 632 | char *dma_addr = bus_to_virt(dev_addr); |
| 643 | 633 | ||
| 644 | BUG_ON(dir == DMA_NONE); | 634 | BUG_ON(dir == DMA_NONE); |
| 645 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) | 635 | if (is_swiotlb_buffer(dma_addr)) |
| 646 | sync_single(hwdev, dma_addr, size, dir, target); | 636 | sync_single(hwdev, dma_addr, size, dir, target); |
| 647 | else if (dir == DMA_FROM_DEVICE) | 637 | else if (dir == DMA_FROM_DEVICE) |
| 648 | dma_mark_clean(dma_addr, size); | 638 | dma_mark_clean(dma_addr, size); |
| @@ -673,7 +663,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, | |||
| 673 | char *dma_addr = bus_to_virt(dev_addr) + offset; | 663 | char *dma_addr = bus_to_virt(dev_addr) + offset; |
| 674 | 664 | ||
| 675 | BUG_ON(dir == DMA_NONE); | 665 | BUG_ON(dir == DMA_NONE); |
| 676 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) | 666 | if (is_swiotlb_buffer(dma_addr)) |
| 677 | sync_single(hwdev, dma_addr, size, dir, target); | 667 | sync_single(hwdev, dma_addr, size, dir, target); |
| 678 | else if (dir == DMA_FROM_DEVICE) | 668 | else if (dir == DMA_FROM_DEVICE) |
| 679 | dma_mark_clean(dma_addr, size); | 669 | dma_mark_clean(dma_addr, size); |
| @@ -727,7 +717,8 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
| 727 | for_each_sg(sgl, sg, nelems, i) { | 717 | for_each_sg(sgl, sg, nelems, i) { |
| 728 | addr = SG_ENT_VIRT_ADDRESS(sg); | 718 | addr = SG_ENT_VIRT_ADDRESS(sg); |
| 729 | dev_addr = virt_to_bus(addr); | 719 | dev_addr = virt_to_bus(addr); |
| 730 | if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { | 720 | if (swiotlb_force || |
| 721 | address_needs_mapping(hwdev, dev_addr, sg->length)) { | ||
| 731 | void *map = map_single(hwdev, addr, sg->length, dir); | 722 | void *map = map_single(hwdev, addr, sg->length, dir); |
| 732 | if (!map) { | 723 | if (!map) { |
| 733 | /* Don't panic here, we expect map_sg users | 724 | /* Don't panic here, we expect map_sg users |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index d8d1d1142248..c399bc1093cb 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | 27 | ||
| 28 | #include <asm/page.h> /* for PAGE_SIZE */ | 28 | #include <asm/page.h> /* for PAGE_SIZE */ |
| 29 | #include <asm/div64.h> | 29 | #include <asm/div64.h> |
| 30 | #include <asm/sections.h> /* for dereference_function_descriptor() */ | ||
| 30 | 31 | ||
| 31 | /* Works only for digits and letters, but small and fast */ | 32 | /* Works only for digits and letters, but small and fast */ |
| 32 | #define TOLOWER(x) ((x) | 0x20) | 33 | #define TOLOWER(x) ((x) | 0x20) |
| @@ -513,16 +514,6 @@ static char *string(char *buf, char *end, char *s, int field_width, int precisio | |||
| 513 | return buf; | 514 | return buf; |
| 514 | } | 515 | } |
| 515 | 516 | ||
| 516 | static inline void *dereference_function_descriptor(void *ptr) | ||
| 517 | { | ||
| 518 | #if defined(CONFIG_IA64) || defined(CONFIG_PPC64) | ||
| 519 | void *p; | ||
| 520 | if (!probe_kernel_address(ptr, p)) | ||
| 521 | ptr = p; | ||
| 522 | #endif | ||
| 523 | return ptr; | ||
| 524 | } | ||
| 525 | |||
| 526 | static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags) | 517 | static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags) |
| 527 | { | 518 | { |
| 528 | unsigned long value = (unsigned long) ptr; | 519 | unsigned long value = (unsigned long) ptr; |
