From c39649c331c70952700f99832b03f87e9d7f5b4b Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 19 Jan 2011 11:03:25 +0000 Subject: lib: cpu_rmap: CPU affinity reverse-mapping When initiating I/O on a multiqueue and multi-IRQ device, we may want to select a queue for which the response will be handled on the same or a nearby CPU. This requires a reverse-map of IRQ affinity. Add library functions to support a generic reverse-mapping from CPUs to objects with affinity and the specific case where the objects are IRQs. Signed-off-by: Ben Hutchings Signed-off-by: David S. Miller --- lib/Kconfig | 4 + lib/Makefile | 2 + lib/cpu_rmap.c | 269 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 275 insertions(+) create mode 100644 lib/cpu_rmap.c (limited to 'lib') diff --git a/lib/Kconfig b/lib/Kconfig index 0ee67e08ad3e..8334342e0d05 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -201,6 +201,10 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS depends on EXPERIMENTAL && BROKEN +config CPU_RMAP + bool + depends on SMP + # # Netlink attribute parsing support is select'ed if needed # diff --git a/lib/Makefile b/lib/Makefile index cbb774f7d41d..b73ba01a818a 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -110,6 +110,8 @@ obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o obj-$(CONFIG_AVERAGE) += average.o +obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o + hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c new file mode 100644 index 000000000000..987acfafeb83 --- /dev/null +++ b/lib/cpu_rmap.c @@ -0,0 +1,269 @@ +/* + * cpu_rmap.c: CPU affinity reverse-map support + * Copyright 2011 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#ifdef CONFIG_GENERIC_HARDIRQS +#include +#endif +#include + +/* + * These functions maintain a mapping from CPUs to some ordered set of + * objects with CPU affinities. This can be seen as a reverse-map of + * CPU affinity. However, we do not assume that the object affinities + * cover all CPUs in the system. For those CPUs not directly covered + * by object affinities, we attempt to find a nearest object based on + * CPU topology. + */ + +/** + * alloc_cpu_rmap - allocate CPU affinity reverse-map + * @size: Number of objects to be mapped + * @flags: Allocation flags e.g. %GFP_KERNEL + */ +struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags) +{ + struct cpu_rmap *rmap; + unsigned int cpu; + size_t obj_offset; + + /* This is a silly number of objects, and we use u16 indices. */ + if (size > 0xffff) + return NULL; + + /* Offset of object pointer array from base structure */ + obj_offset = ALIGN(offsetof(struct cpu_rmap, near[nr_cpu_ids]), + sizeof(void *)); + + rmap = kzalloc(obj_offset + size * sizeof(rmap->obj[0]), flags); + if (!rmap) + return NULL; + + rmap->obj = (void **)((char *)rmap + obj_offset); + + /* Initially assign CPUs to objects on a rota, since we have + * no idea where the objects are. Use infinite distance, so + * any object with known distance is preferable. Include the + * CPUs that are not present/online, since we definitely want + * any newly-hotplugged CPUs to have some object assigned. + */ + for_each_possible_cpu(cpu) { + rmap->near[cpu].index = cpu % size; + rmap->near[cpu].dist = CPU_RMAP_DIST_INF; + } + + rmap->size = size; + return rmap; +} +EXPORT_SYMBOL(alloc_cpu_rmap); + +/* Reevaluate nearest object for given CPU, comparing with the given + * neighbours at the given distance. + */ +static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu, + const struct cpumask *mask, u16 dist) +{ + int neigh; + + for_each_cpu(neigh, mask) { + if (rmap->near[cpu].dist > dist && + rmap->near[neigh].dist <= dist) { + rmap->near[cpu].index = rmap->near[neigh].index; + rmap->near[cpu].dist = dist; + return true; + } + } + return false; +} + +#ifdef DEBUG +static void debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix) +{ + unsigned index; + unsigned int cpu; + + pr_info("cpu_rmap %p, %s:\n", rmap, prefix); + + for_each_possible_cpu(cpu) { + index = rmap->near[cpu].index; + pr_info("cpu %d -> obj %u (distance %u)\n", + cpu, index, rmap->near[cpu].dist); + } +} +#else +static inline void +debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix) +{ +} +#endif + +/** + * cpu_rmap_add - add object to a rmap + * @rmap: CPU rmap allocated with alloc_cpu_rmap() + * @obj: Object to add to rmap + * + * Return index of object. + */ +int cpu_rmap_add(struct cpu_rmap *rmap, void *obj) +{ + u16 index; + + BUG_ON(rmap->used >= rmap->size); + index = rmap->used++; + rmap->obj[index] = obj; + return index; +} +EXPORT_SYMBOL(cpu_rmap_add); + +/** + * cpu_rmap_update - update CPU rmap following a change of object affinity + * @rmap: CPU rmap to update + * @index: Index of object whose affinity changed + * @affinity: New CPU affinity of object + */ +int cpu_rmap_update(struct cpu_rmap *rmap, u16 index, + const struct cpumask *affinity) +{ + cpumask_var_t update_mask; + unsigned int cpu; + + if (unlikely(!zalloc_cpumask_var(&update_mask, GFP_KERNEL))) + return -ENOMEM; + + /* Invalidate distance for all CPUs for which this used to be + * the nearest object. Mark those CPUs for update. + */ + for_each_online_cpu(cpu) { + if (rmap->near[cpu].index == index) { + rmap->near[cpu].dist = CPU_RMAP_DIST_INF; + cpumask_set_cpu(cpu, update_mask); + } + } + + debug_print_rmap(rmap, "after invalidating old distances"); + + /* Set distance to 0 for all CPUs in the new affinity mask. + * Mark all CPUs within their NUMA nodes for update. + */ + for_each_cpu(cpu, affinity) { + rmap->near[cpu].index = index; + rmap->near[cpu].dist = 0; + cpumask_or(update_mask, update_mask, + cpumask_of_node(cpu_to_node(cpu))); + } + + debug_print_rmap(rmap, "after updating neighbours"); + + /* Update distances based on topology */ + for_each_cpu(cpu, update_mask) { + if (cpu_rmap_copy_neigh(rmap, cpu, + topology_thread_cpumask(cpu), 1)) + continue; + if (cpu_rmap_copy_neigh(rmap, cpu, + topology_core_cpumask(cpu), 2)) + continue; + if (cpu_rmap_copy_neigh(rmap, cpu, + cpumask_of_node(cpu_to_node(cpu)), 3)) + continue; + /* We could continue into NUMA node distances, but for now + * we give up. + */ + } + + debug_print_rmap(rmap, "after copying neighbours"); + + free_cpumask_var(update_mask); + return 0; +} +EXPORT_SYMBOL(cpu_rmap_update); + +#ifdef CONFIG_GENERIC_HARDIRQS + +/* Glue between IRQ affinity notifiers and CPU rmaps */ + +struct irq_glue { + struct irq_affinity_notify notify; + struct cpu_rmap *rmap; + u16 index; +}; + +/** + * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs + * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL + * + * Must be called in process context, before freeing the IRQs, and + * without holding any locks required by global workqueue items. + */ +void free_irq_cpu_rmap(struct cpu_rmap *rmap) +{ + struct irq_glue *glue; + u16 index; + + if (!rmap) + return; + + for (index = 0; index < rmap->used; index++) { + glue = rmap->obj[index]; + irq_set_affinity_notifier(glue->notify.irq, NULL); + } + irq_run_affinity_notifiers(); + + kfree(rmap); +} +EXPORT_SYMBOL(free_irq_cpu_rmap); + +static void +irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) +{ + struct irq_glue *glue = + container_of(notify, struct irq_glue, notify); + int rc; + + rc = cpu_rmap_update(glue->rmap, glue->index, mask); + if (rc) + pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc); +} + +static void irq_cpu_rmap_release(struct kref *ref) +{ + struct irq_glue *glue = + container_of(ref, struct irq_glue, notify.kref); + kfree(glue); +} + +/** + * irq_cpu_rmap_add - add an IRQ to a CPU affinity reverse-map + * @rmap: The reverse-map + * @irq: The IRQ number + * + * This adds an IRQ affinity notifier that will update the reverse-map + * automatically. + * + * Must be called in process context, after the IRQ is allocated but + * before it is bound with request_irq(). + */ +int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq) +{ + struct irq_glue *glue = kzalloc(sizeof(*glue), GFP_KERNEL); + int rc; + + if (!glue) + return -ENOMEM; + glue->notify.notify = irq_cpu_rmap_notify; + glue->notify.release = irq_cpu_rmap_release; + glue->rmap = rmap; + glue->index = cpu_rmap_add(rmap, glue); + rc = irq_set_affinity_notifier(irq, &glue->notify); + if (rc) + kfree(glue); + return rc; +} +EXPORT_SYMBOL(irq_cpu_rmap_add); + +#endif /* CONFIG_GENERIC_HARDIRQS */ -- cgit v1.2.2 From de0368d5fec7b9ef95228510f2edb79610beb448 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Mon, 24 Jan 2011 02:41:37 +0000 Subject: textsearch: doc - fix spelling in lib/textsearch.c. Found the following spelling errors while reading the textsearch code: "facitilies" -> "facilities" "continously" -> "continuously" "arbitary" -> "arbitrary" "patern" -> "pattern" "occurences" -> "occurrences" I'll try to push this patch through DaveM, given the only users of textsearch is in the net/ tree (nf_conntrack_amanda.c, xt_string.c and em_text.c) Signed-off-by: Jesper Sander Signed-off-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- lib/textsearch.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'lib') diff --git a/lib/textsearch.c b/lib/textsearch.c index d608331b3e47..e0cc0146ae62 100644 --- a/lib/textsearch.c +++ b/lib/textsearch.c @@ -13,7 +13,7 @@ * * INTRODUCTION * - * The textsearch infrastructure provides text searching facitilies for + * The textsearch infrastructure provides text searching facilities for * both linear and non-linear data. Individual search algorithms are * implemented in modules and chosen by the user. * @@ -43,7 +43,7 @@ * to the algorithm to store persistent variables. * (4) Core eventually resets the search offset and forwards the find() * request to the algorithm. - * (5) Algorithm calls get_next_block() provided by the user continously + * (5) Algorithm calls get_next_block() provided by the user continuously * to fetch the data to be searched in block by block. * (6) Algorithm invokes finish() after the last call to get_next_block * to clean up any leftovers from get_next_block. (Optional) @@ -58,15 +58,15 @@ * the pattern to look for and flags. As a flag, you can set TS_IGNORECASE * to perform case insensitive matching. But it might slow down * performance of algorithm, so you should use it at own your risk. - * The returned configuration may then be used for an arbitary + * The returned configuration may then be used for an arbitrary * amount of times and even in parallel as long as a separate struct * ts_state variable is provided to every instance. * * The actual search is performed by either calling textsearch_find_- * continuous() for linear data or by providing an own get_next_block() * implementation and calling textsearch_find(). Both functions return - * the position of the first occurrence of the patern or UINT_MAX if - * no match was found. Subsequent occurences can be found by calling + * the position of the first occurrence of the pattern or UINT_MAX if + * no match was found. Subsequent occurrences can be found by calling * textsearch_next() regardless of the linearity of the data. * * Once you're done using a configuration it must be given back via -- cgit v1.2.2 From ac15ee691fe84cb46cbd2497ddcb10e246f7ee47 Mon Sep 17 00:00:00 2001 From: Toshiyuki Okajima Date: Tue, 25 Jan 2011 15:07:32 -0800 Subject: radix_tree: radix_tree_gang_lookup_tag_slot() may never return Executed command: fsstress -d /mnt -n 600 -p 850 crash> bt PID: 7947 TASK: ffff880160546a70 CPU: 0 COMMAND: "fsstress" #0 [ffff8800dfc07d00] machine_kexec at ffffffff81030db9 #1 [ffff8800dfc07d70] crash_kexec at ffffffff810a7952 #2 [ffff8800dfc07e40] oops_end at ffffffff814aa7c8 #3 [ffff8800dfc07e70] die_nmi at ffffffff814aa969 #4 [ffff8800dfc07ea0] do_nmi_callback at ffffffff8102b07b #5 [ffff8800dfc07f10] do_nmi at ffffffff814aa514 #6 [ffff8800dfc07f50] nmi at ffffffff814a9d60 [exception RIP: __lookup_tag+100] RIP: ffffffff812274b4 RSP: ffff88016056b998 RFLAGS: 00000287 RAX: 0000000000000000 RBX: 0000000000000002 RCX: 0000000000000006 RDX: 000000000000001d RSI: ffff88016056bb18 RDI: ffff8800c85366e0 RBP: ffff88016056b9c8 R8: ffff88016056b9e8 R9: 0000000000000000 R10: 000000000000000e R11: ffff8800c8536908 R12: 0000000000000010 R13: 0000000000000040 R14: ffffffffffffffc0 R15: ffff8800c85366e0 ORIG_RAX: ffffffffffffffff CS: 0010 SS: 0018 #7 [ffff88016056b998] __lookup_tag at ffffffff812274b4 #8 [ffff88016056b9d0] radix_tree_gang_lookup_tag_slot at ffffffff81227605 #9 [ffff88016056ba20] find_get_pages_tag at ffffffff810fc110 #10 [ffff88016056ba80] pagevec_lookup_tag at ffffffff81105e85 #11 [ffff88016056baa0] write_cache_pages at ffffffff81104c47 #12 [ffff88016056bbd0] generic_writepages at ffffffff81105014 #13 [ffff88016056bbe0] do_writepages at ffffffff81105055 #14 [ffff88016056bbf0] __filemap_fdatawrite_range at ffffffff810fb2cb #15 [ffff88016056bc40] filemap_write_and_wait_range at ffffffff810fb32a #16 [ffff88016056bc70] generic_file_direct_write at ffffffff810fb3dc #17 [ffff88016056bce0] __generic_file_aio_write at ffffffff810fcee5 #18 [ffff88016056bda0] generic_file_aio_write at ffffffff810fd085 #19 [ffff88016056bdf0] do_sync_write at ffffffff8114f9ea #20 [ffff88016056bf00] vfs_write at ffffffff8114fcf8 #21 [ffff88016056bf30] sys_write at ffffffff81150691 #22 [ffff88016056bf80] system_call_fastpath at ffffffff8100c0b2 I think this root cause is the following: radix_tree_range_tag_if_tagged() always tags the root tag with settag if the root tag is set with iftag even if there are no iftag tags in the specified range (Of course, there are some iftag tags outside the specified range). =============================================================================== [[[Detailed description]]] (1) Why cannot radix_tree_gang_lookup_tag_slot() return forever? __lookup_tag(): - Return with 0. - Return with the index which is not bigger than the old one as the input parameter. Therefore the following "while" repeats forever because the above conditions cause "ret" not to be updated and the cur_index cannot be changed into the bigger one. (So, radix_tree_gang_lookup_tag_slot() cannot return forever.) radix_tree_gang_lookup_tag_slot(): 1178 while (ret < max_items) { 1179 unsigned int slots_found; 1180 unsigned long next_index; /* Index of next search */ 1181 1182 if (cur_index > max_index) 1183 break; 1184 slots_found = __lookup_tag(node, results + ret, 1185 cur_index, max_items - ret, &next_index, tag); 1186 ret += slots_found; // cannot update ret because slots_found == 0. // so, this while loops forever. 1187 if (next_index == 0) 1188 break; 1189 cur_index = next_index; 1190 } (2) Why does __lookup_tag() return with 0 and doesn't update the index? Assuming the following: - the one of the slot in radix_tree_node is NULL. - the one of the tag which corresponds to the slot sets with PAGECACHE_TAG_TOWRITE or other. - In a certain height(!=0), the corresponding index is 0. a) __lookup_tag() notices that the tag is set. 1005 static unsigned int 1006 __lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index, 1007 unsigned int max_items, unsigned long *next_index, unsigned int tag) 1008 { 1009 unsigned int nr_found = 0; 1010 unsigned int shift, height; 1011 1012 height = slot->height; 1013 if (height == 0) 1014 goto out; 1015 shift = (height-1) * RADIX_TREE_MAP_SHIFT; 1016 1017 while (height > 0) { 1018 unsigned long i = (index >> shift) & RADIX_TREE_MAP_MASK ; 1019 1020 for (;;) { 1021 if (tag_get(slot, tag, i)) 1022 break; ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * the index is not updated yet. b) __lookup_tag() notices that the slot is NULL. 1023 index &= ~((1UL << shift) - 1); 1024 index += 1UL << shift; 1025 if (index == 0) 1026 goto out; /* 32-bit wraparound */ 1027 i++; 1028 if (i == RADIX_TREE_MAP_SIZE) 1029 goto out; 1030 } 1031 height--; 1032 if (height == 0) { /* Bottom level: grab some items */ ... 1055 } 1056 shift -= RADIX_TREE_MAP_SHIFT; 1057 slot = rcu_dereference_raw(slot->slots[i]); 1058 if (slot == NULL) 1059 break; ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ c) __lookup_tag() doesn't update the index and return with 0. 1060 } 1061 out: 1062 *next_index = index; ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 1063 return nr_found; 1064 } (3) Why is the slot NULL even if the tag is set? Because radix_tree_range_tag_if_tagged() always sets the root tag with PAGECACHE_TAG_TOWRITE if the root tag is set with PAGECACHE_TAG_DIRTY, even if there is no tag which can be set with PAGECACHE_TAG_TOWRITE in the specified range (from *first_indexp to last_index). Of course, some PAGECACHE_TAG_DIRTY nodes must exist outside the specified range. (radix_tree_range_tag_if_tagged() is called only from tag_pages_for_writeback()) 640 unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, 641 unsigned long *first_indexp, unsigned long last_index, 642 unsigned long nr_to_tag, 643 unsigned int iftag, unsigned int settag) 644 { 645 unsigned int height = root->height; 646 struct radix_tree_path path[height]; 647 struct radix_tree_path *pathp = path; 648 struct radix_tree_node *slot; 649 unsigned int shift; 650 unsigned long tagged = 0; 651 unsigned long index = *first_indexp; 652 653 last_index = min(last_index, radix_tree_maxindex(height)); 654 if (index > last_index) 655 return 0; 656 if (!nr_to_tag) 657 return 0; 658 if (!root_tag_get(root, iftag)) { 659 *first_indexp = last_index + 1; 660 return 0; 661 } 662 if (height == 0) { 663 *first_indexp = last_index + 1; 664 root_tag_set(root, settag); 665 return 1; 666 } ... 733 root_tag_set(root, settag); ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 734 *first_indexp = index; 735 736 return tagged; 737 } As the result, there is no radix_tree_node which is set with PAGECACHE_TAG_TOWRITE but the root tag(radix_tree_root) is set with PAGECACHE_TAG_TOWRITE. [figure: inside radix_tree] (Please see the figure with typewriter font) =========================================== [roottag = DIRTY] | tag=0:NOTHING tag[0 0 0 1] 1:DIRTY [x x x +] 2:WRITEBACK | 3:DIRTY,WRITEBACK p 4:TOWRITE <---> 5:DIRTY,TOWRITE ... specified range (index: 0 to 2) * There is no DIRTY tag within the specified range. (But there is a DIRTY tag outside that range.) | | | | | | | | | after calling tag_pages_for_writeback() | | | | | | | | | v v v v v v v v v [roottag = DIRTY,TOWRITE] | p is "page". tag[0 0 0 1] x is NULL. [x x x +] +- is a pointer to "page". | p * But TOWRITE tag is set on the root tag. ============================================ After that, radix_tree_extend() via radix_tree_insert() is called when the page is added. This function sets the new radix_tree_node with PAGECACHE_TAG_TOWRITE to succeed the status of the root tag. 246 static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) 247 { 248 struct radix_tree_node *node; 249 unsigned int height; 250 int tag; 251 252 /* Figure out what the height should be. */ 253 height = root->height + 1; 254 while (index > radix_tree_maxindex(height)) 255 height++; 256 257 if (root->rnode == NULL) { 258 root->height = height; 259 goto out; 260 } 261 262 do { 263 unsigned int newheight; 264 if (!(node = radix_tree_node_alloc(root))) 265 return -ENOMEM; 266 267 /* Increase the height. */ 268 node->slots[0] = radix_tree_indirect_to_ptr(root->rnode); 269 270 /* Propagate the aggregated tag info into the new root */ 271 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { 272 if (root_tag_get(root, tag)) 273 tag_set(node, tag, 0); ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 274 } =========================================== [roottag = DIRTY,TOWRITE] | : tag[0 0 0 1] [0 0 0 0] [x x x +] [+ x x x] | | p p (new page) | | | | | | | | | after calling radix_tree_insert | | | | | | | | | v v v v v v v v v [roottag = DIRTY,TOWRITE] | tag [5 0 0 0] * DIRTY and TOWRITE tags are [+ + x x] succeeded to the new node. | | tag [0 0 0 1] [0 0 0 0] [x x x +] [+ x x x] | | p p ============================================ After that, the index 3 page is released by remove_from_page_cache(). Then we can make the situation that the tag is set with PAGECACHE_TAG_TOWRITE and that the slot which corresponds to the tag is NULL. =========================================== [roottag = DIRTY,TOWRITE] | tag [5 0 0 0] [+ + x x] | | tag [0 0 0 1] [0 0 0 0] [x x x +] [+ x x x] | | p p (remove) | | | | | | | | | after calling remove_page_cache | | | | | | | | | v v v v v v v v v [roottag = DIRTY,TOWRITE] | tag [4 0 0 0] * Only DIRTY tag is cleared [x + x x] because no TOWRITE tag is existed | in the bottom node. [0 0 0 0] [+ x x x] | p ============================================ To solve this problem Change to that radix_tree_tag_if_tagged() doesn't tag the root tag if it doesn't set any tags within the specified range. Like this. ============================================ 640 unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, 641 unsigned long *first_indexp, unsigned long last_index, 642 unsigned long nr_to_tag, 643 unsigned int iftag, unsigned int settag) 644 { 650 unsigned long tagged = 0; ... 733 if (tagged) ^^^^^^^^^^^^^^^^^^^^^^^^ 734 root_tag_set(root, settag); 735 *first_indexp = index; 736 737 return tagged; 738 } ============================================ Signed-off-by: Toshiyuki Okajima Acked-by: Jan Kara Cc: Dave Chinner Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/radix-tree.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'lib') diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 5086bb962b4d..7ea2e033d715 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -736,10 +736,11 @@ next: } } /* - * The iftag must have been set somewhere because otherwise - * we would return immediated at the beginning of the function + * We need not to tag the root tag if there is no tag which is set with + * settag within the range from *first_indexp to last_index. */ - root_tag_set(root, settag); + if (tagged > 0) + root_tag_set(root, settag); *first_indexp = index; return tagged; -- cgit v1.2.2 From d123375425d7df4b6081a631fc1203fceafa59b2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 26 Jan 2011 21:32:01 +0100 Subject: rwsem: Remove redundant asmregparm annotation Peter Zijlstra pointed out, that the only user of asmregparm (x86) is compiling the kernel already with -mregparm=3. So the annotation of the rwsem functions is redundant. Remove it. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: David Howells Cc: Benjamin Herrenschmidt Cc: Matt Turner Cc: Tony Luck Cc: Heiko Carstens Cc: Paul Mundt Cc: David Miller Cc: Chris Zankel LKML-Reference: Signed-off-by: Thomas Gleixner --- lib/rwsem.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'lib') diff --git a/lib/rwsem.c b/lib/rwsem.c index f236d7cd5cf3..aa7c3052261f 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c @@ -222,8 +222,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem, /* * wait for the read lock to be granted */ -asmregparm struct rw_semaphore __sched * -rwsem_down_read_failed(struct rw_semaphore *sem) +struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem) { return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_READ, -RWSEM_ACTIVE_READ_BIAS); @@ -232,8 +231,7 @@ rwsem_down_read_failed(struct rw_semaphore *sem) /* * wait for the write lock to be granted */ -asmregparm struct rw_semaphore __sched * -rwsem_down_write_failed(struct rw_semaphore *sem) +struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) { return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_WRITE, -RWSEM_ACTIVE_WRITE_BIAS); @@ -243,7 +241,7 @@ rwsem_down_write_failed(struct rw_semaphore *sem) * handle waking up a waiter on the semaphore * - up_read/up_write has decremented the active part of count if we come here */ -asmregparm struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) +struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) { unsigned long flags; @@ -263,7 +261,7 @@ asmregparm struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) * - caller incremented waiting part of count and discovered it still negative * - just wake up any readers at the front of the queue */ -asmregparm struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) +struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) { unsigned long flags; -- cgit v1.2.2 From 0b6bb66d1247601e4a2560bb048d64c606bd7b73 Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Wed, 26 Jan 2011 15:55:36 +0100 Subject: Export the augmented rbtree helper functions The augmented rbtree helper functions are not exported to modules right now. (We have started using augmented rbtrees in the upcoming version of drbd.) Signed-off-by: Andreas Gruenbacher Signed-off-by: Linus Torvalds --- lib/rbtree.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'lib') diff --git a/lib/rbtree.c b/lib/rbtree.c index 4693f79195d3..a16be19a1305 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -315,6 +315,7 @@ void rb_augment_insert(struct rb_node *node, rb_augment_f func, void *data) rb_augment_path(node, func, data); } +EXPORT_SYMBOL(rb_augment_insert); /* * before removing the node, find the deepest node on the rebalance path @@ -340,6 +341,7 @@ struct rb_node *rb_augment_erase_begin(struct rb_node *node) return deepest; } +EXPORT_SYMBOL(rb_augment_erase_begin); /* * after removal, update the tree to account for the removed entry @@ -350,6 +352,7 @@ void rb_augment_erase_end(struct rb_node *node, rb_augment_f func, void *data) if (node) rb_augment_path(node, func, data); } +EXPORT_SYMBOL(rb_augment_erase_end); /* * This function returns the first node (in sort order) of the tree. -- cgit v1.2.2 From 8ba6ebf583f12da32036fc0f003ab4043e54692e Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Sun, 23 Jan 2011 17:17:24 +0100 Subject: Dynamic debug: Add more flags Add flags that allow the user to specify via debugfs whether or not the module name, function name, line number and/or thread ID have to be included in the printed message. Signed-off-by: Bart Van Assche Cc: Greg Banks Cc: Konrad Rzeszutek Wilk Signed-off-by: Greg Kroah-Hartman --- lib/dynamic_debug.c | 60 +++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 51 insertions(+), 9 deletions(-) (limited to 'lib') diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index b335acb43be2..863c83430964 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -7,6 +7,7 @@ * Copyright (C) 2008 Jason Baron * By Greg Banks * Copyright (c) 2008 Silicon Graphics Inc. All Rights Reserved. + * Copyright (C) 2011 Bart Van Assche. All Rights Reserved. */ #include @@ -27,6 +28,7 @@ #include #include #include +#include extern struct _ddebug __start___verbose[]; extern struct _ddebug __stop___verbose[]; @@ -63,15 +65,25 @@ static inline const char *basename(const char *path) return tail ? tail+1 : path; } +static struct { unsigned flag:8; char opt_char; } opt_array[] = { + { _DPRINTK_FLAGS_PRINT, 'p' }, + { _DPRINTK_FLAGS_INCL_MODNAME, 'm' }, + { _DPRINTK_FLAGS_INCL_FUNCNAME, 'f' }, + { _DPRINTK_FLAGS_INCL_LINENO, 'l' }, + { _DPRINTK_FLAGS_INCL_TID, 't' }, +}; + /* format a string into buf[] which describes the _ddebug's flags */ static char *ddebug_describe_flags(struct _ddebug *dp, char *buf, size_t maxlen) { char *p = buf; + int i; BUG_ON(maxlen < 4); - if (dp->flags & _DPRINTK_FLAGS_PRINT) - *p++ = 'p'; + for (i = 0; i < ARRAY_SIZE(opt_array); ++i) + if (dp->flags & opt_array[i].flag) + *p++ = opt_array[i].opt_char; if (p == buf) *p++ = '-'; *p = '\0'; @@ -343,7 +355,7 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp, unsigned int *maskp) { unsigned flags = 0; - int op = '='; + int op = '=', i; switch (*str) { case '+': @@ -358,13 +370,14 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp, printk(KERN_INFO "%s: op='%c'\n", __func__, op); for ( ; *str ; ++str) { - switch (*str) { - case 'p': - flags |= _DPRINTK_FLAGS_PRINT; - break; - default: - return -EINVAL; + for (i = ARRAY_SIZE(opt_array) - 1; i >= 0; i--) { + if (*str == opt_array[i].opt_char) { + flags |= opt_array[i].flag; + break; + } } + if (i < 0) + return -EINVAL; } if (flags == 0) return -EINVAL; @@ -413,6 +426,35 @@ static int ddebug_exec_query(char *query_string) return 0; } +int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...) +{ + va_list args; + int res; + + BUG_ON(!descriptor); + BUG_ON(!fmt); + + va_start(args, fmt); + res = printk(KERN_DEBUG); + if (descriptor->flags & _DPRINTK_FLAGS_INCL_TID) { + if (in_interrupt()) + res += printk(KERN_CONT " "); + else + res += printk(KERN_CONT "[%d] ", task_pid_vnr(current)); + } + if (descriptor->flags & _DPRINTK_FLAGS_INCL_MODNAME) + res += printk(KERN_CONT "%s:", descriptor->modname); + if (descriptor->flags & _DPRINTK_FLAGS_INCL_FUNCNAME) + res += printk(KERN_CONT "%s:", descriptor->function); + if (descriptor->flags & _DPRINTK_FLAGS_INCL_LINENO) + res += printk(KERN_CONT "%d ", descriptor->lineno); + res += vprintk(fmt, args); + va_end(args); + + return res; +} +EXPORT_SYMBOL(__dynamic_pr_debug); + static __initdata char ddebug_setup_string[1024]; static __init int ddebug_setup_query(char *str) { -- cgit v1.2.2 From e8d9792aa514e49bf618713987c393d93babc2c5 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 3 Feb 2011 15:59:58 -0800 Subject: dynamic_debug: add #include This fixes a build breakage caused by 8ba6ebf583f12da32036fc0f003ab4043e54692e "Dynamic debug: Add more flags" Cc: Bart Van Assche Signed-off-by: Greg Kroah-Hartman --- lib/dynamic_debug.c | 1 + 1 file changed, 1 insertion(+) (limited to 'lib') diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 863c83430964..75ca78f3a8c9 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -29,6 +29,7 @@ #include #include #include +#include extern struct _ddebug __start___verbose[]; extern struct _ddebug __stop___verbose[]; -- cgit v1.2.2 From 73020415564a3fe4931f3f70f500a5db13eea946 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Sat, 22 Jan 2011 22:35:38 +0100 Subject: m68knommu: Remove dependencies on nonexistent M68KNOMMU M68KNOMMU is set nowhere. Signed-off-by: Geert Uytterhoeven Signed-off-by: Greg Ungerer --- lib/Kconfig.debug | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 3967c2356e37..2b97418c67e2 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -805,7 +805,7 @@ config ARCH_WANT_FRAME_POINTERS config FRAME_POINTER bool "Compile the kernel with frame pointers" depends on DEBUG_KERNEL && \ - (CRIS || M68K || M68KNOMMU || FRV || UML || \ + (CRIS || M68K || FRV || UML || \ AVR32 || SUPERH || BLACKFIN || MN10300) || \ ARCH_WANT_FRAME_POINTERS default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS -- cgit v1.2.2 From 3c18d4de86e4a7f93815c081e50e0543fa27200f Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 18 Feb 2011 11:32:28 -0800 Subject: Expand CONFIG_DEBUG_LIST to several other list operations When list debugging is enabled, we aim to readably show list corruption errors, and the basic list_add/list_del operations end up having extra debugging code in them to do some basic validation of the list entries. However, "list_del_init()" and "list_move[_tail]()" ended up avoiding the debug code due to how they were written. This fixes that. So the _next_ time we have list_move() problems with stale list entries, we'll hopefully have an easier time finding them.. Signed-off-by: Linus Torvalds --- lib/list_debug.c | 39 ++++++++++++++++++++++++++------------- 1 file changed, 26 insertions(+), 13 deletions(-) (limited to 'lib') diff --git a/lib/list_debug.c b/lib/list_debug.c index 344c710d16ca..b8029a5583ff 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c @@ -35,6 +35,31 @@ void __list_add(struct list_head *new, } EXPORT_SYMBOL(__list_add); +void __list_del_entry(struct list_head *entry) +{ + struct list_head *prev, *next; + + prev = entry->prev; + next = entry->next; + + if (WARN(next == LIST_POISON1, + "list_del corruption, %p->next is LIST_POISON1 (%p)\n", + entry, LIST_POISON1) || + WARN(prev == LIST_POISON2, + "list_del corruption, %p->prev is LIST_POISON2 (%p)\n", + entry, LIST_POISON2) || + WARN(prev->next != entry, + "list_del corruption. prev->next should be %p, " + "but was %p\n", entry, prev->next) || + WARN(next->prev != entry, + "list_del corruption. next->prev should be %p, " + "but was %p\n", entry, next->prev)) + return; + + __list_del(prev, next); +} +EXPORT_SYMBOL(__list_del_entry); + /** * list_del - deletes entry from list. * @entry: the element to delete from the list. @@ -43,19 +68,7 @@ EXPORT_SYMBOL(__list_add); */ void list_del(struct list_head *entry) { - WARN(entry->next == LIST_POISON1, - "list_del corruption, next is LIST_POISON1 (%p)\n", - LIST_POISON1); - WARN(entry->next != LIST_POISON1 && entry->prev == LIST_POISON2, - "list_del corruption, prev is LIST_POISON2 (%p)\n", - LIST_POISON2); - WARN(entry->prev->next != entry, - "list_del corruption. prev->next should be %p, " - "but was %p\n", entry, entry->prev->next); - WARN(entry->next->prev != entry, - "list_del corruption. next->prev should be %p, " - "but was %p\n", entry, entry->next->prev); - __list_del(entry->prev, entry->next); + __list_del_entry(entry); entry->next = LIST_POISON1; entry->prev = LIST_POISON2; } -- cgit v1.2.2 From de933bd833be1a53bd361c9a327afd3c65413351 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Fri, 18 Feb 2011 21:47:20 +0100 Subject: kbuild: reenable section mismatch analysis MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This was disabled in commit e5f95c8 (kbuild: print only total number of section mismatces found) because there were too many warnings. Now we're down to a reasonable number again, so we start scaring people with the details. Signed-off-by: Uwe Kleine-König Acked-by: Randy Dunlap Acked-by: Mike Frysinger Signed-off-by: Michal Marek --- lib/Kconfig.debug | 4 ---- 1 file changed, 4 deletions(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 2d05adb98401..8b6a4f13946f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -102,11 +102,7 @@ config HEADERS_CHECK config DEBUG_SECTION_MISMATCH bool "Enable full Section mismatch analysis" - depends on UNDEFINED || (BLACKFIN) default y - # This option is on purpose disabled for now. - # It will be enabled when we are down to a reasonable number - # of section mismatch warnings (< 10 for an allyesconfig build) help The section mismatch analysis checks if there are illegal references from one section to another section. -- cgit v1.2.2 From fba99fa38b023224680308a482e12a0eca87e4e1 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Fri, 25 Feb 2011 14:44:16 -0800 Subject: swiotlb: fix wrong panic swiotlb's map_page wrongly calls panic() when it can't find a buffer fit for device's dma mask. It should return an error instead. Devices with an odd dma mask (i.e. under 4G) like b44 network card hit this bug (the system crashes): http://marc.info/?l=linux-kernel&m=129648943830106&w=2 If swiotlb returns an error, b44 driver can use the own bouncing mechanism. Reported-by: Chuck Ebbert Signed-off-by: FUJITA Tomonori Tested-by: Arkadiusz Miskiewicz Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/swiotlb.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/swiotlb.c b/lib/swiotlb.c index c47bbe11b804..93ca08b8a451 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -686,8 +686,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, /* * Ensure that the address returned is DMA'ble */ - if (!dma_capable(dev, dev_addr, size)) - panic("map_single: bounce buffer is not DMA'ble"); + if (!dma_capable(dev, dev_addr, size)) { + swiotlb_tbl_unmap_single(dev, map, size, dir); + dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer); + } return dev_addr; } -- cgit v1.2.2 From e3fa3aff0cb198e7c53d894f52146121d9592872 Mon Sep 17 00:00:00 2001 From: Lars Ellenberg Date: Mon, 28 Feb 2011 12:38:25 -0800 Subject: net: fix nla_policy_len to actually _iterate_ over the policy Currently nla_policy_len always returns n * NLA_HDRLEN: It loops, but does not advance it's iterator. NLA_UNSPEC == 0 does not contain a .len in any policy. Trivially fixed by adding p++. Signed-off-by: Lars Ellenberg Signed-off-by: David S. Miller --- lib/nlattr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/nlattr.c b/lib/nlattr.c index 5021cbc34411..ac09f2226dc7 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -148,7 +148,7 @@ nla_policy_len(const struct nla_policy *p, int n) { int i, len = 0; - for (i = 0; i < n; i++) { + for (i = 0; i < n; i++, p++) { if (p->len) len += nla_total_size(p->len); else if (nla_attr_minlen[p->type]) -- cgit v1.2.2 From f51b452bed4ae5c20e1f8a790e4ed8663d909a40 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 25 Jan 2011 21:54:50 +0100 Subject: tracing: don't trace the BKL No reason to trace it when the last user is gone. Signed-off-by: Arnd Bergmann Acked-by: Frederic Weisbecker --- lib/kernel_lock.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'lib') diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index b135d04aa48a..d80e12265862 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c @@ -10,9 +10,6 @@ #include #include -#define CREATE_TRACE_POINTS -#include - /* * The 'big kernel lock' * @@ -120,8 +117,6 @@ void __lockfunc _lock_kernel(const char *func, const char *file, int line) { int depth = current->lock_depth + 1; - trace_lock_kernel(func, file, line); - if (likely(!depth)) { might_sleep(); __lock_kernel(); @@ -134,8 +129,6 @@ void __lockfunc _unlock_kernel(const char *func, const char *file, int line) BUG_ON(current->lock_depth < 0); if (likely(--current->lock_depth < 0)) __unlock_kernel(); - - trace_unlock_kernel(func, file, line); } EXPORT_SYMBOL(_lock_kernel); -- cgit v1.2.2 From a7a9a24dcd2c57edf148ca9a14a510a5765ecf20 Mon Sep 17 00:00:00 2001 From: Michael Buesch Date: Tue, 1 Mar 2011 20:03:05 +0100 Subject: lib-average: Make config option selectable Make CONFIG_AVERAGE selectable for out-of-tree users such as compat-wireless. Signed-off-by: Michael Buesch Signed-off-by: John W. Linville --- lib/Kconfig | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/Kconfig b/lib/Kconfig index 0ee67e08ad3e..c70f2a4db358 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -217,6 +217,13 @@ config LRU_CACHE tristate config AVERAGE - bool + bool "Averaging functions" + help + This option is provided for the case where no in-kernel-tree + modules require averaging functions, but a module built outside + the kernel tree does. Such modules that use library averaging + functions require Y here. + + If unsure, say N. endmenu -- cgit v1.2.2 From 4ba8216cd90560bc402f52076f64d8546e8aefcb Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 25 Jan 2011 22:52:22 +0100 Subject: BKL: That's all, folks This removes the implementation of the big kernel lock, at last. A lot of people have worked on this in the past, I so the credit for this patch should be with everyone who participated in the hunt. The names on the Cc list are the people that were the most active in this, according to the recorded git history, in alphabetical order. Signed-off-by: Arnd Bergmann Acked-by: Alan Cox Cc: Alessio Igor Bogani Cc: Al Viro Cc: Andrew Hendry Cc: Andrew Morton Cc: Christoph Hellwig Cc: Eric W. Biederman Cc: Frederic Weisbecker Cc: Hans Verkuil Acked-by: Ingo Molnar Cc: Jan Blunck Cc: John Kacur Cc: Jonathan Corbet Cc: Linus Torvalds Cc: Matthew Wilcox Cc: Oliver Neukum Cc: Paul Menage Acked-by: Thomas Gleixner Cc: Trond Myklebust --- lib/Kconfig.debug | 9 ---- lib/Makefile | 1 - lib/kernel_lock.c | 136 ------------------------------------------------------ 3 files changed, 146 deletions(-) delete mode 100644 lib/kernel_lock.c (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 2b97418c67e2..6f440d82b58d 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -470,15 +470,6 @@ config DEBUG_MUTEXES This feature allows mutex semantics violations to be detected and reported. -config BKL - bool "Big Kernel Lock" if (SMP || PREEMPT) - default y - help - This is the traditional lock that is used in old code instead - of proper locking. All drivers that use the BKL should depend - on this symbol. - Say Y here unless you are working on removing the BKL. - config DEBUG_LOCK_ALLOC bool "Lock debugging: detect incorrect freeing of live locks" depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT diff --git a/lib/Makefile b/lib/Makefile index cbb774f7d41d..de6c609bb4e4 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -43,7 +43,6 @@ obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o -obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o obj-$(CONFIG_BTREE) += btree.o obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o obj-$(CONFIG_DEBUG_LIST) += list_debug.o diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c deleted file mode 100644 index d80e12265862..000000000000 --- a/lib/kernel_lock.c +++ /dev/null @@ -1,136 +0,0 @@ -/* - * lib/kernel_lock.c - * - * This is the traditional BKL - big kernel lock. Largely - * relegated to obsolescence, but used by various less - * important (or lazy) subsystems. - */ -#include -#include -#include -#include - -/* - * The 'big kernel lock' - * - * This spinlock is taken and released recursively by lock_kernel() - * and unlock_kernel(). It is transparently dropped and reacquired - * over schedule(). It is used to protect legacy code that hasn't - * been migrated to a proper locking design yet. - * - * Don't use in new code. - */ -static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag); - - -/* - * Acquire/release the underlying lock from the scheduler. - * - * This is called with preemption disabled, and should - * return an error value if it cannot get the lock and - * TIF_NEED_RESCHED gets set. - * - * If it successfully gets the lock, it should increment - * the preemption count like any spinlock does. - * - * (This works on UP too - do_raw_spin_trylock will never - * return false in that case) - */ -int __lockfunc __reacquire_kernel_lock(void) -{ - while (!do_raw_spin_trylock(&kernel_flag)) { - if (need_resched()) - return -EAGAIN; - cpu_relax(); - } - preempt_disable(); - return 0; -} - -void __lockfunc __release_kernel_lock(void) -{ - do_raw_spin_unlock(&kernel_flag); - preempt_enable_no_resched(); -} - -/* - * These are the BKL spinlocks - we try to be polite about preemption. - * If SMP is not on (ie UP preemption), this all goes away because the - * do_raw_spin_trylock() will always succeed. - */ -#ifdef CONFIG_PREEMPT -static inline void __lock_kernel(void) -{ - preempt_disable(); - if (unlikely(!do_raw_spin_trylock(&kernel_flag))) { - /* - * If preemption was disabled even before this - * was called, there's nothing we can be polite - * about - just spin. - */ - if (preempt_count() > 1) { - do_raw_spin_lock(&kernel_flag); - return; - } - - /* - * Otherwise, let's wait for the kernel lock - * with preemption enabled.. - */ - do { - preempt_enable(); - while (raw_spin_is_locked(&kernel_flag)) - cpu_relax(); - preempt_disable(); - } while (!do_raw_spin_trylock(&kernel_flag)); - } -} - -#else - -/* - * Non-preemption case - just get the spinlock - */ -static inline void __lock_kernel(void) -{ - do_raw_spin_lock(&kernel_flag); -} -#endif - -static inline void __unlock_kernel(void) -{ - /* - * the BKL is not covered by lockdep, so we open-code the - * unlocking sequence (and thus avoid the dep-chain ops): - */ - do_raw_spin_unlock(&kernel_flag); - preempt_enable(); -} - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, so we only need to - * worry about other CPU's. - */ -void __lockfunc _lock_kernel(const char *func, const char *file, int line) -{ - int depth = current->lock_depth + 1; - - if (likely(!depth)) { - might_sleep(); - __lock_kernel(); - } - current->lock_depth = depth; -} - -void __lockfunc _unlock_kernel(const char *func, const char *file, int line) -{ - BUG_ON(current->lock_depth < 0); - if (likely(--current->lock_depth < 0)) - __unlock_kernel(); -} - -EXPORT_SYMBOL(_lock_kernel); -EXPORT_SYMBOL(_unlock_kernel); - -- cgit v1.2.2 From 997772884036e6e121de39322179989154437d9f Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Mon, 7 Mar 2011 09:58:33 +0100 Subject: debugobjects: Add hint for better object identification In complex subsystems like mac80211 structures can contain several timers and work structs, so identifying a specific instance from the call trace and object type output of debugobjects can be hard. Allow the subsystems which support debugobjects to provide a hint function. This function returns a pointer to a kernel address (preferrably the objects callback function) which is printed along with the debugobjects type. Add hint methods for timer_list, work_struct and hrtimer. [ tglx: Massaged changelog, made it compile ] Signed-off-by: Stanislaw Gruszka LKML-Reference: <20110307085809.GA9334@redhat.com> Signed-off-by: Thomas Gleixner --- lib/debugobjects.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'lib') diff --git a/lib/debugobjects.c b/lib/debugobjects.c index deebcc57d4e6..9d86e45086f5 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -249,14 +249,17 @@ static struct debug_bucket *get_bucket(unsigned long addr) static void debug_print_object(struct debug_obj *obj, char *msg) { + struct debug_obj_descr *descr = obj->descr; static int limit; - if (limit < 5 && obj->descr != descr_test) { + if (limit < 5 && descr != descr_test) { + void *hint = descr->debug_hint ? + descr->debug_hint(obj->object) : NULL; limit++; WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " - "object type: %s\n", + "object type: %s hint: %pS\n", msg, obj_states[obj->state], obj->astate, - obj->descr->name); + descr->name, hint); } debug_objects_warnings++; } -- cgit v1.2.2 From bf6a9b8336ba12672755c2ae898b0abe42c7a5ac Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 21 Dec 2010 17:55:14 +0800 Subject: plist: Shrink struct plist_head struct plist_head is used in struct task_struct as well as struct rtmutex. If we can make it smaller, it will also make these structures smaller as well. The field prio_list in struct plist_head is seldom used and we can get its information from the plist_nodes. Removing this field will decrease the size of plist_head by half. Signed-off-by: Lai Jiangshan LKML-Reference: <4D107982.9090700@cn.fujitsu.com> Signed-off-by: Steven Rostedt --- lib/plist.c | 54 +++++++++++++++++++++++++++++++++++------------------- 1 file changed, 35 insertions(+), 19 deletions(-) (limited to 'lib') diff --git a/lib/plist.c b/lib/plist.c index 1471988d9190..8c614d0e6c35 100644 --- a/lib/plist.c +++ b/lib/plist.c @@ -59,7 +59,8 @@ static void plist_check_head(struct plist_head *head) WARN_ON_SMP(!raw_spin_is_locked(head->rawlock)); if (head->spinlock) WARN_ON_SMP(!spin_is_locked(head->spinlock)); - plist_check_list(&head->prio_list); + if (!plist_head_empty(head)) + plist_check_list(&plist_first(head)->prio_list); plist_check_list(&head->node_list); } @@ -75,25 +76,33 @@ static void plist_check_head(struct plist_head *head) */ void plist_add(struct plist_node *node, struct plist_head *head) { - struct plist_node *iter; + struct plist_node *first, *iter, *prev = NULL; + struct list_head *node_next = &head->node_list; plist_check_head(head); WARN_ON(!plist_node_empty(node)); + WARN_ON(!list_empty(&node->prio_list)); - list_for_each_entry(iter, &head->prio_list, plist.prio_list) { - if (node->prio < iter->prio) - goto lt_prio; - else if (node->prio == iter->prio) { - iter = list_entry(iter->plist.prio_list.next, - struct plist_node, plist.prio_list); - goto eq_prio; + if (plist_head_empty(head)) + goto ins_node; + + first = iter = plist_first(head); + + do { + if (node->prio < iter->prio) { + node_next = &iter->node_list; + break; } - } -lt_prio: - list_add_tail(&node->plist.prio_list, &iter->plist.prio_list); -eq_prio: - list_add_tail(&node->plist.node_list, &iter->plist.node_list); + prev = iter; + iter = list_entry(iter->prio_list.next, + struct plist_node, prio_list); + } while (iter != first); + + if (!prev || prev->prio != node->prio) + list_add_tail(&node->prio_list, &iter->prio_list); +ins_node: + list_add_tail(&node->node_list, node_next); plist_check_head(head); } @@ -108,14 +117,21 @@ void plist_del(struct plist_node *node, struct plist_head *head) { plist_check_head(head); - if (!list_empty(&node->plist.prio_list)) { - struct plist_node *next = plist_first(&node->plist); + if (!list_empty(&node->prio_list)) { + if (node->node_list.next != &head->node_list) { + struct plist_node *next; + + next = list_entry(node->node_list.next, + struct plist_node, node_list); - list_move_tail(&next->plist.prio_list, &node->plist.prio_list); - list_del_init(&node->plist.prio_list); + /* add the next plist_node into prio_list */ + if (list_empty(&next->prio_list)) + list_add(&next->prio_list, &node->prio_list); + } + list_del_init(&node->prio_list); } - list_del_init(&node->plist.node_list); + list_del_init(&node->node_list); plist_check_head(head); } -- cgit v1.2.2 From 6d55da53db3d9b911f69f2ce1e5fb8943eafe057 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 21 Dec 2010 17:55:18 +0800 Subject: plist: Add priority list test Add test code for checking plist when the kernel is booting. Signed-off-by: Lai Jiangshan LKML-Reference: <4D107986.1010302@cn.fujitsu.com> Signed-off-by: Steven Rostedt --- lib/plist.c | 81 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 80 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/plist.c b/lib/plist.c index 8c614d0e6c35..0ae7e6431726 100644 --- a/lib/plist.c +++ b/lib/plist.c @@ -28,6 +28,8 @@ #ifdef CONFIG_DEBUG_PI_LIST +static struct plist_head test_head; + static void plist_check_prev_next(struct list_head *t, struct list_head *p, struct list_head *n) { @@ -54,7 +56,7 @@ static void plist_check_list(struct list_head *top) static void plist_check_head(struct plist_head *head) { - WARN_ON(!head->rawlock && !head->spinlock); + WARN_ON(head != &test_head && !head->rawlock && !head->spinlock); if (head->rawlock) WARN_ON_SMP(!raw_spin_is_locked(head->rawlock)); if (head->spinlock) @@ -135,3 +137,80 @@ void plist_del(struct plist_node *node, struct plist_head *head) plist_check_head(head); } + +#ifdef CONFIG_DEBUG_PI_LIST +#include +#include +#include + +static struct plist_node __initdata test_node[241]; + +static void __init plist_test_check(int nr_expect) +{ + struct plist_node *first, *prio_pos, *node_pos; + + if (plist_head_empty(&test_head)) { + BUG_ON(nr_expect != 0); + return; + } + + prio_pos = first = plist_first(&test_head); + plist_for_each(node_pos, &test_head) { + if (nr_expect-- < 0) + break; + if (node_pos == first) + continue; + if (node_pos->prio == prio_pos->prio) { + BUG_ON(!list_empty(&node_pos->prio_list)); + continue; + } + + BUG_ON(prio_pos->prio > node_pos->prio); + BUG_ON(prio_pos->prio_list.next != &node_pos->prio_list); + prio_pos = node_pos; + } + + BUG_ON(nr_expect != 0); + BUG_ON(prio_pos->prio_list.next != &first->prio_list); +} + +static int __init plist_test(void) +{ + int nr_expect = 0, i, loop; + unsigned int r = local_clock(); + + printk(KERN_INFO "start plist test\n"); + plist_head_init(&test_head, NULL); + for (i = 0; i < ARRAY_SIZE(test_node); i++) + plist_node_init(test_node + i, 0); + + for (loop = 0; loop < 1000; loop++) { + r = r * 193939 % 47629; + i = r % ARRAY_SIZE(test_node); + if (plist_node_empty(test_node + i)) { + r = r * 193939 % 47629; + test_node[i].prio = r % 99; + plist_add(test_node + i, &test_head); + nr_expect++; + } else { + plist_del(test_node + i, &test_head); + nr_expect--; + } + plist_test_check(nr_expect); + } + + for (i = 0; i < ARRAY_SIZE(test_node); i++) { + if (plist_node_empty(test_node + i)) + continue; + plist_del(test_node + i, &test_head); + nr_expect--; + plist_test_check(nr_expect); + } + + printk(KERN_INFO "end plist test\n"); + return 0; +} + +module_init(plist_test); + +#endif -- cgit v1.2.2 From f2c23f65f63fe0dd83fc94efdfae0364c74458b8 Mon Sep 17 00:00:00 2001 From: Michal Marek Date: Mon, 21 Mar 2011 10:44:30 +0100 Subject: kbuild: Make DEBUG_SECTION_MISMATCH selectable, but not on by default CONFIG_DEBUG_SECTION_MISMATCH has also runtime effects due to the -fno-inline-functions-called-once compiler flag, so forcing it on everyone is not a good idea. Signed-off-by: Michal Marek --- lib/Kconfig.debug | 1 - 1 file changed, 1 deletion(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 8b6a4f13946f..5a24805d4321 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -102,7 +102,6 @@ config HEADERS_CHECK config DEBUG_SECTION_MISMATCH bool "Enable full Section mismatch analysis" - default y help The section mismatch analysis checks if there are illegal references from one section to another section. -- cgit v1.2.2 From ddd588b5dd55f14320379961e47683db4e4c1d90 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Tue, 22 Mar 2011 16:30:46 -0700 Subject: oom: suppress nodes that are not allowed from meminfo on oom kill The oom killer is extremely verbose for machines with a large number of cpus and/or nodes. This verbosity can often be harmful if it causes other important messages to be scrolled from the kernel log and incurs a signicant time delay, specifically for kernels with CONFIG_NODES_SHIFT > 8. This patch causes only memory information to be displayed for nodes that are allowed by current's cpuset when dumping the VM state. Information for all other nodes is irrelevant to the oom condition; we don't care if there's an abundance of memory elsewhere if we can't access it. This only affects the behavior of dumping memory information when an oom is triggered. Other dumps, such as for sysrq+m, still display the unfiltered form when using the existing show_mem() interface. Additionally, the per-cpu pageset statistics are extremely verbose in oom killer output, so it is now suppressed. This removes nodes_weight(current->mems_allowed) * (1 + nr_cpus) lines from the oom killer output. Callers may use __show_mem(SHOW_MEM_FILTER_NODES) to filter disallowed nodes. Signed-off-by: David Rientjes Cc: Mel Gorman Cc: KAMEZAWA Hiroyuki Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/show_mem.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/show_mem.c b/lib/show_mem.c index fdc77c82f922..d8d602b58c31 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c @@ -9,14 +9,14 @@ #include #include -void show_mem(void) +void __show_mem(unsigned int filter) { pg_data_t *pgdat; unsigned long total = 0, reserved = 0, shared = 0, nonshared = 0, highmem = 0; printk("Mem-Info:\n"); - show_free_areas(); + __show_free_areas(filter); for_each_online_pgdat(pgdat) { unsigned long i, flags; @@ -61,3 +61,8 @@ void show_mem(void) quicklist_total_size()); #endif } + +void show_mem(void) +{ + __show_mem(0); +} -- cgit v1.2.2 From fef2c9bc1b54c0261324a96e948c0b849796e896 Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Tue, 22 Mar 2011 16:34:16 -0700 Subject: kernel/watchdog.c: allow hardlockup to panic by default When a cpu is considered stuck, instead of limping along and just printing a warning, it is sometimes preferred to just panic, let kdump capture the vmcore and reboot. This gets the machine back into a stable state quickly while saving the info that got it into a stuck state to begin with. Add a Kconfig option to allow users to set the hardlockup to panic by default. Also add in a 'nmi_watchdog=nopanic' to override this. [akpm@linux-foundation.org: fix strncmp length] Signed-off-by: Don Zickus Acked-by: Peter Zijlstra Reviewed-by: WANG Cong Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 191c5c4c89fc..fb0afeff9436 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -171,6 +171,23 @@ config HARDLOCKUP_DETECTOR def_bool LOCKUP_DETECTOR && PERF_EVENTS && HAVE_PERF_EVENTS_NMI && \ !ARCH_HAS_NMI_WATCHDOG +config BOOTPARAM_HARDLOCKUP_PANIC + bool "Panic (Reboot) On Hard Lockups" + depends on LOCKUP_DETECTOR + help + Say Y here to enable the kernel to panic on "hard lockups", + which are bugs that cause the kernel to loop in kernel + mode with interrupts disabled for more than 60 seconds. + + Say N if unsure. + +config BOOTPARAM_HARDLOCKUP_PANIC_VALUE + int + depends on LOCKUP_DETECTOR + range 0 1 + default 0 if !BOOTPARAM_HARDLOCKUP_PANIC + default 1 if BOOTPARAM_HARDLOCKUP_PANIC + config BOOTPARAM_SOFTLOCKUP_PANIC bool "Panic (Reboot) On Soft Lockups" depends on LOCKUP_DETECTOR -- cgit v1.2.2 From 26297607e09ca6c7e6f2a6b86a8bee2f23503bb8 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 22 Mar 2011 16:34:19 -0700 Subject: vsprintf: neaten %pK kptr_restrict, save a bit of code space If kptr restrictions are on, just set the passed pointer to NULL. $ size lib/vsprintf.o.* text data bss dec hex filename 8247 4 2 8253 203d lib/vsprintf.o.new 8282 4 2 8288 2060 lib/vsprintf.o.old Signed-off-by: Joe Perches Cc: Dan Rosenberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/vsprintf.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'lib') diff --git a/lib/vsprintf.c b/lib/vsprintf.c index d3023df8477f..070d134eef71 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -1047,16 +1047,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, if (spec.field_width == -1) spec.field_width = 2 * sizeof(void *); return string(buf, end, "pK-error", spec); - } else if ((kptr_restrict == 0) || - (kptr_restrict == 1 && - has_capability_noaudit(current, CAP_SYSLOG))) - break; - - if (spec.field_width == -1) { - spec.field_width = 2 * sizeof(void *); - spec.flags |= ZEROPAD; } - return number(buf, end, 0, spec); + if (!((kptr_restrict == 0) || + (kptr_restrict == 1 && + has_capability_noaudit(current, CAP_SYSLOG)))) + ptr = NULL; + break; } spec.flags |= SMALL; if (spec.field_width == -1) { -- cgit v1.2.2 From 9f36e2c448007b54851e7e4fa48da97d1477a175 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 22 Mar 2011 16:34:22 -0700 Subject: printk: use %pK for /proc/kallsyms and /proc/modules In an effort to reduce kernel address leaks that might be used to help target kernel privilege escalation exploits, this patch uses %pK when displaying addresses in /proc/kallsyms, /proc/modules, and /sys/module/*/sections/*. Note that this changes %x to %p, so some legitimately 0 values in /proc/kallsyms would have changed from 00000000 to "(null)". To avoid this, "(null)" is not used when using the "K" format. Anything that was already successfully parsing "(null)" in addition to full hex digits should have no problem with this change. (Thanks to Joe Perches for the suggestion.) Due to the %x to %p, "void *" casts are needed since these addresses are already "unsigned long" everywhere internally, due to their starting life as ELF section offsets. Signed-off-by: Kees Cook Cc: Eugene Teo Cc: Dan Rosenberg Cc: Rusty Russell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/vsprintf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 070d134eef71..ac444ff01658 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -991,7 +991,7 @@ static noinline_for_stack char *pointer(const char *fmt, char *buf, char *end, void *ptr, struct printf_spec spec) { - if (!ptr) { + if (!ptr && *fmt != 'K') { /* * Print (null) with the same width as a pointer so it makes * tabular output look nice. -- cgit v1.2.2 From 5af5bcb8d37f99ba415a1adc6da71051b84f93a5 Mon Sep 17 00:00:00 2001 From: Mandeep Singh Baines Date: Tue, 22 Mar 2011 16:34:23 -0700 Subject: printk: allow setting DEFAULT_MESSAGE_LEVEL via Kconfig We've been burned by regressions/bugs which we later realized could have been triaged quicker if only we'd paid closer attention to dmesg. To make it easier to audit dmesg, we'd like to make DEFAULT_MESSAGE_LEVEL Kconfig-settable. That way we can set it to KERN_NOTICE and audit any messages <= KERN_WARNING. Signed-off-by: Mandeep Singh Baines Cc: Ingo Molnar Cc: Joe Perches Cc: Olof Johansson Cc: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index fb0afeff9436..bef5faab660f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -9,6 +9,17 @@ config PRINTK_TIME operations. This is useful for identifying long delays in kernel startup. +config DEFAULT_MESSAGE_LOGLEVEL + int "Default message log level (1-7)" + range 1 7 + default "4" + help + Default log level for printk statements with no specified priority. + + This was hard-coded to KERN_WARNING since at least 2.6.10 but folks + that are auditing their logs closely may want to set it to a lower + priority. + config ENABLE_WARN_DEPRECATED bool "Enable __deprecated logic" default y -- cgit v1.2.2 From 33ee3b2e2eb9b4b6c64dcf9ed66e2ac3124e748c Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Tue, 22 Mar 2011 16:34:40 -0700 Subject: kstrto*: converting strings to integers done (hopefully) right 1. simple_strto*() do not contain overflow checks and crufty, libc way to indicate failure. 2. strict_strto*() also do not have overflow checks but the name and comments pretend they do. 3. Both families have only "long long" and "long" variants, but users want strtou8() 4. Both "simple" and "strict" prefixes are wrong: Simple doesn't exactly say what's so simple, strict should not exist because conversion should be strict by default. The solution is to use "k" prefix and add convertors for more types. Enter kstrtoull() kstrtoll() kstrtoul() kstrtol() kstrtouint() kstrtoint() kstrtou64() kstrtos64() kstrtou32() kstrtos32() kstrtou16() kstrtos16() kstrtou8() kstrtos8() Include runtime testsuite (somewhat incomplete) as well. strict_strto*() become deprecated, stubbed to kstrto*() and eventually will be removed altogether. Use kstrto*() in code today! Note: on some archs _kstrtoul() and _kstrtol() are left in tree, even if they'll be unused at runtime. This is temporarily solution, because I don't want to hardcode list of archs where these functions aren't needed. Current solution with sizeof() and __alignof__ at least always works. Signed-off-by: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 3 + lib/Makefile | 2 + lib/kstrtox.c | 227 ++++++++++++++++ lib/test-kstrtox.c | 739 +++++++++++++++++++++++++++++++++++++++++++++++++++++ lib/vsprintf.c | 141 ---------- 5 files changed, 971 insertions(+), 141 deletions(-) create mode 100644 lib/kstrtox.c create mode 100644 lib/test-kstrtox.c (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index bef5faab660f..df9234c5f9d1 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1250,3 +1250,6 @@ source "samples/Kconfig" source "lib/Kconfig.kgdb" source "lib/Kconfig.kmemcheck" + +config TEST_KSTRTOX + tristate "Test kstrto*() family of functions at runtime" diff --git a/lib/Makefile b/lib/Makefile index ef7ed71a6ffd..8c9de027ebb1 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -22,6 +22,8 @@ lib-y += kobject.o kref.o klist.o obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o +obj-y += kstrtox.o +obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG diff --git a/lib/kstrtox.c b/lib/kstrtox.c new file mode 100644 index 000000000000..05672e819f8c --- /dev/null +++ b/lib/kstrtox.c @@ -0,0 +1,227 @@ +/* + * Convert integer string representation to an integer. + * If an integer doesn't fit into specified type, -E is returned. + * + * Integer starts with optional sign. + * kstrtou*() functions do not accept sign "-". + * + * Radix 0 means autodetection: leading "0x" implies radix 16, + * leading "0" implies radix 8, otherwise radix is 10. + * Autodetection hints work after optional sign, but not before. + * + * If -E is returned, result is not touched. + */ +#include +#include +#include +#include +#include +#include + +static inline char _tolower(const char c) +{ + return c | 0x20; +} + +static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) +{ + unsigned long long acc; + int ok; + + if (base == 0) { + if (s[0] == '0') { + if (_tolower(s[1]) == 'x' && isxdigit(s[2])) + base = 16; + else + base = 8; + } else + base = 10; + } + if (base == 16 && s[0] == '0' && _tolower(s[1]) == 'x') + s += 2; + + acc = 0; + ok = 0; + while (*s) { + unsigned int val; + + if ('0' <= *s && *s <= '9') + val = *s - '0'; + else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f') + val = _tolower(*s) - 'a' + 10; + else if (*s == '\n') { + if (*(s + 1) == '\0') + break; + else + return -EINVAL; + } else + return -EINVAL; + + if (val >= base) + return -EINVAL; + if (acc > div_u64(ULLONG_MAX - val, base)) + return -ERANGE; + acc = acc * base + val; + ok = 1; + + s++; + } + if (!ok) + return -EINVAL; + *res = acc; + return 0; +} + +int kstrtoull(const char *s, unsigned int base, unsigned long long *res) +{ + if (s[0] == '+') + s++; + return _kstrtoull(s, base, res); +} +EXPORT_SYMBOL(kstrtoull); + +int kstrtoll(const char *s, unsigned int base, long long *res) +{ + unsigned long long tmp; + int rv; + + if (s[0] == '-') { + rv = _kstrtoull(s + 1, base, &tmp); + if (rv < 0) + return rv; + if ((long long)(-tmp) >= 0) + return -ERANGE; + *res = -tmp; + } else { + rv = kstrtoull(s, base, &tmp); + if (rv < 0) + return rv; + if ((long long)tmp < 0) + return -ERANGE; + *res = tmp; + } + return 0; +} +EXPORT_SYMBOL(kstrtoll); + +/* Internal, do not use. */ +int _kstrtoul(const char *s, unsigned int base, unsigned long *res) +{ + unsigned long long tmp; + int rv; + + rv = kstrtoull(s, base, &tmp); + if (rv < 0) + return rv; + if (tmp != (unsigned long long)(unsigned long)tmp) + return -ERANGE; + *res = tmp; + return 0; +} +EXPORT_SYMBOL(_kstrtoul); + +/* Internal, do not use. */ +int _kstrtol(const char *s, unsigned int base, long *res) +{ + long long tmp; + int rv; + + rv = kstrtoll(s, base, &tmp); + if (rv < 0) + return rv; + if (tmp != (long long)(long)tmp) + return -ERANGE; + *res = tmp; + return 0; +} +EXPORT_SYMBOL(_kstrtol); + +int kstrtouint(const char *s, unsigned int base, unsigned int *res) +{ + unsigned long long tmp; + int rv; + + rv = kstrtoull(s, base, &tmp); + if (rv < 0) + return rv; + if (tmp != (unsigned long long)(unsigned int)tmp) + return -ERANGE; + *res = tmp; + return 0; +} +EXPORT_SYMBOL(kstrtouint); + +int kstrtoint(const char *s, unsigned int base, int *res) +{ + long long tmp; + int rv; + + rv = kstrtoll(s, base, &tmp); + if (rv < 0) + return rv; + if (tmp != (long long)(int)tmp) + return -ERANGE; + *res = tmp; + return 0; +} +EXPORT_SYMBOL(kstrtoint); + +int kstrtou16(const char *s, unsigned int base, u16 *res) +{ + unsigned long long tmp; + int rv; + + rv = kstrtoull(s, base, &tmp); + if (rv < 0) + return rv; + if (tmp != (unsigned long long)(u16)tmp) + return -ERANGE; + *res = tmp; + return 0; +} +EXPORT_SYMBOL(kstrtou16); + +int kstrtos16(const char *s, unsigned int base, s16 *res) +{ + long long tmp; + int rv; + + rv = kstrtoll(s, base, &tmp); + if (rv < 0) + return rv; + if (tmp != (long long)(s16)tmp) + return -ERANGE; + *res = tmp; + return 0; +} +EXPORT_SYMBOL(kstrtos16); + +int kstrtou8(const char *s, unsigned int base, u8 *res) +{ + unsigned long long tmp; + int rv; + + rv = kstrtoull(s, base, &tmp); + if (rv < 0) + return rv; + if (tmp != (unsigned long long)(u8)tmp) + return -ERANGE; + *res = tmp; + return 0; +} +EXPORT_SYMBOL(kstrtou8); + +int kstrtos8(const char *s, unsigned int base, s8 *res) +{ + long long tmp; + int rv; + + rv = kstrtoll(s, base, &tmp); + if (rv < 0) + return rv; + if (tmp != (long long)(s8)tmp) + return -ERANGE; + *res = tmp; + return 0; +} +EXPORT_SYMBOL(kstrtos8); diff --git a/lib/test-kstrtox.c b/lib/test-kstrtox.c new file mode 100644 index 000000000000..325c2f9ecebd --- /dev/null +++ b/lib/test-kstrtox.c @@ -0,0 +1,739 @@ +#include +#include +#include + +#define for_each_test(i, test) \ + for (i = 0; i < sizeof(test) / sizeof(test[0]); i++) + +struct test_fail { + const char *str; + unsigned int base; +}; + +#define DEFINE_TEST_FAIL(test) \ + const struct test_fail test[] __initdata + +#define DECLARE_TEST_OK(type, test_type) \ + test_type { \ + const char *str; \ + unsigned int base; \ + type expected_res; \ + } + +#define DEFINE_TEST_OK(type, test) \ + const type test[] __initdata + +#define TEST_FAIL(fn, type, fmt, test) \ +{ \ + unsigned int i; \ + \ + for_each_test(i, test) { \ + const struct test_fail *t = &test[i]; \ + type tmp; \ + int rv; \ + \ + tmp = 0; \ + rv = fn(t->str, t->base, &tmp); \ + if (rv >= 0) { \ + WARN(1, "str '%s', base %u, expected -E, got %d/" fmt "\n", \ + t->str, t->base, rv, tmp); \ + continue; \ + } \ + } \ +} + +#define TEST_OK(fn, type, fmt, test) \ +{ \ + unsigned int i; \ + \ + for_each_test(i, test) { \ + const typeof(test[0]) *t = &test[i]; \ + type res; \ + int rv; \ + \ + rv = fn(t->str, t->base, &res); \ + if (rv != 0) { \ + WARN(1, "str '%s', base %u, expected 0/" fmt ", got %d\n", \ + t->str, t->base, t->expected_res, rv); \ + continue; \ + } \ + if (res != t->expected_res) { \ + WARN(1, "str '%s', base %u, expected " fmt ", got " fmt "\n", \ + t->str, t->base, t->expected_res, res); \ + continue; \ + } \ + } \ +} + +static void __init test_kstrtoull_ok(void) +{ + DECLARE_TEST_OK(unsigned long long, struct test_ull); + static DEFINE_TEST_OK(struct test_ull, test_ull_ok) = { + {"0", 10, 0ULL}, + {"1", 10, 1ULL}, + {"127", 10, 127ULL}, + {"128", 10, 128ULL}, + {"129", 10, 129ULL}, + {"255", 10, 255ULL}, + {"256", 10, 256ULL}, + {"257", 10, 257ULL}, + {"32767", 10, 32767ULL}, + {"32768", 10, 32768ULL}, + {"32769", 10, 32769ULL}, + {"65535", 10, 65535ULL}, + {"65536", 10, 65536ULL}, + {"65537", 10, 65537ULL}, + {"2147483647", 10, 2147483647ULL}, + {"2147483648", 10, 2147483648ULL}, + {"2147483649", 10, 2147483649ULL}, + {"4294967295", 10, 4294967295ULL}, + {"4294967296", 10, 4294967296ULL}, + {"4294967297", 10, 4294967297ULL}, + {"9223372036854775807", 10, 9223372036854775807ULL}, + {"9223372036854775808", 10, 9223372036854775808ULL}, + {"9223372036854775809", 10, 9223372036854775809ULL}, + {"18446744073709551614", 10, 18446744073709551614ULL}, + {"18446744073709551615", 10, 18446744073709551615ULL}, + + {"00", 8, 00ULL}, + {"01", 8, 01ULL}, + {"0177", 8, 0177ULL}, + {"0200", 8, 0200ULL}, + {"0201", 8, 0201ULL}, + {"0377", 8, 0377ULL}, + {"0400", 8, 0400ULL}, + {"0401", 8, 0401ULL}, + {"077777", 8, 077777ULL}, + {"0100000", 8, 0100000ULL}, + {"0100001", 8, 0100001ULL}, + {"0177777", 8, 0177777ULL}, + {"0200000", 8, 0200000ULL}, + {"0200001", 8, 0200001ULL}, + {"017777777777", 8, 017777777777ULL}, + {"020000000000", 8, 020000000000ULL}, + {"020000000001", 8, 020000000001ULL}, + {"037777777777", 8, 037777777777ULL}, + {"040000000000", 8, 040000000000ULL}, + {"040000000001", 8, 040000000001ULL}, + {"0777777777777777777777", 8, 0777777777777777777777ULL}, + {"01000000000000000000000", 8, 01000000000000000000000ULL}, + {"01000000000000000000001", 8, 01000000000000000000001ULL}, + {"01777777777777777777776", 8, 01777777777777777777776ULL}, + {"01777777777777777777777", 8, 01777777777777777777777ULL}, + + {"0x0", 16, 0x0ULL}, + {"0x1", 16, 0x1ULL}, + {"0x7f", 16, 0x7fULL}, + {"0x80", 16, 0x80ULL}, + {"0x81", 16, 0x81ULL}, + {"0xff", 16, 0xffULL}, + {"0x100", 16, 0x100ULL}, + {"0x101", 16, 0x101ULL}, + {"0x7fff", 16, 0x7fffULL}, + {"0x8000", 16, 0x8000ULL}, + {"0x8001", 16, 0x8001ULL}, + {"0xffff", 16, 0xffffULL}, + {"0x10000", 16, 0x10000ULL}, + {"0x10001", 16, 0x10001ULL}, + {"0x7fffffff", 16, 0x7fffffffULL}, + {"0x80000000", 16, 0x80000000ULL}, + {"0x80000001", 16, 0x80000001ULL}, + {"0xffffffff", 16, 0xffffffffULL}, + {"0x100000000", 16, 0x100000000ULL}, + {"0x100000001", 16, 0x100000001ULL}, + {"0x7fffffffffffffff", 16, 0x7fffffffffffffffULL}, + {"0x8000000000000000", 16, 0x8000000000000000ULL}, + {"0x8000000000000001", 16, 0x8000000000000001ULL}, + {"0xfffffffffffffffe", 16, 0xfffffffffffffffeULL}, + {"0xffffffffffffffff", 16, 0xffffffffffffffffULL}, + + {"0\n", 0, 0ULL}, + }; + TEST_OK(kstrtoull, unsigned long long, "%llu", test_ull_ok); +} + +static void __init test_kstrtoull_fail(void) +{ + static DEFINE_TEST_FAIL(test_ull_fail) = { + {"", 0}, + {"", 8}, + {"", 10}, + {"", 16}, + {"\n", 0}, + {"\n", 8}, + {"\n", 10}, + {"\n", 16}, + {"\n0", 0}, + {"\n0", 8}, + {"\n0", 10}, + {"\n0", 16}, + {"+", 0}, + {"+", 8}, + {"+", 10}, + {"+", 16}, + {"-", 0}, + {"-", 8}, + {"-", 10}, + {"-", 16}, + {"0x", 0}, + {"0x", 16}, + {"0X", 0}, + {"0X", 16}, + {"0 ", 0}, + {"1+", 0}, + {"1-", 0}, + {" 2", 0}, + /* base autodetection */ + {"0x0z", 0}, + {"0z", 0}, + {"a", 0}, + /* digit >= base */ + {"2", 2}, + {"8", 8}, + {"a", 10}, + {"A", 10}, + {"g", 16}, + {"G", 16}, + /* overflow */ + {"10000000000000000000000000000000000000000000000000000000000000000", 2}, + {"2000000000000000000000", 8}, + {"18446744073709551616", 10}, + {"10000000000000000", 16}, + /* negative */ + {"-0", 0}, + {"-0", 8}, + {"-0", 10}, + {"-0", 16}, + {"-1", 0}, + {"-1", 8}, + {"-1", 10}, + {"-1", 16}, + /* sign is first character if any */ + {"-+1", 0}, + {"-+1", 8}, + {"-+1", 10}, + {"-+1", 16}, + /* nothing after \n */ + {"0\n0", 0}, + {"0\n0", 8}, + {"0\n0", 10}, + {"0\n0", 16}, + {"0\n+", 0}, + {"0\n+", 8}, + {"0\n+", 10}, + {"0\n+", 16}, + {"0\n-", 0}, + {"0\n-", 8}, + {"0\n-", 10}, + {"0\n-", 16}, + {"0\n ", 0}, + {"0\n ", 8}, + {"0\n ", 10}, + {"0\n ", 16}, + }; + TEST_FAIL(kstrtoull, unsigned long long, "%llu", test_ull_fail); +} + +static void __init test_kstrtoll_ok(void) +{ + DECLARE_TEST_OK(long long, struct test_ll); + static DEFINE_TEST_OK(struct test_ll, test_ll_ok) = { + {"0", 10, 0LL}, + {"1", 10, 1LL}, + {"127", 10, 127LL}, + {"128", 10, 128LL}, + {"129", 10, 129LL}, + {"255", 10, 255LL}, + {"256", 10, 256LL}, + {"257", 10, 257LL}, + {"32767", 10, 32767LL}, + {"32768", 10, 32768LL}, + {"32769", 10, 32769LL}, + {"65535", 10, 65535LL}, + {"65536", 10, 65536LL}, + {"65537", 10, 65537LL}, + {"2147483647", 10, 2147483647LL}, + {"2147483648", 10, 2147483648LL}, + {"2147483649", 10, 2147483649LL}, + {"4294967295", 10, 4294967295LL}, + {"4294967296", 10, 4294967296LL}, + {"4294967297", 10, 4294967297LL}, + {"9223372036854775807", 10, 9223372036854775807LL}, + + {"-1", 10, -1LL}, + {"-2", 10, -2LL}, + {"-9223372036854775808", 10, LLONG_MIN}, + }; + TEST_OK(kstrtoll, long long, "%lld", test_ll_ok); +} + +static void __init test_kstrtoll_fail(void) +{ + static DEFINE_TEST_FAIL(test_ll_fail) = { + {"9223372036854775808", 10}, + {"9223372036854775809", 10}, + {"18446744073709551614", 10}, + {"18446744073709551615", 10}, + {"-9223372036854775809", 10}, + {"-18446744073709551614", 10}, + {"-18446744073709551615", 10}, + /* negative zero isn't an integer in Linux */ + {"-0", 0}, + {"-0", 8}, + {"-0", 10}, + {"-0", 16}, + /* sign is first character if any */ + {"-+1", 0}, + {"-+1", 8}, + {"-+1", 10}, + {"-+1", 16}, + }; + TEST_FAIL(kstrtoll, long long, "%lld", test_ll_fail); +} + +static void __init test_kstrtou64_ok(void) +{ + DECLARE_TEST_OK(u64, struct test_u64); + static DEFINE_TEST_OK(struct test_u64, test_u64_ok) = { + {"0", 10, 0}, + {"1", 10, 1}, + {"126", 10, 126}, + {"127", 10, 127}, + {"128", 10, 128}, + {"129", 10, 129}, + {"254", 10, 254}, + {"255", 10, 255}, + {"256", 10, 256}, + {"257", 10, 257}, + {"32766", 10, 32766}, + {"32767", 10, 32767}, + {"32768", 10, 32768}, + {"32769", 10, 32769}, + {"65534", 10, 65534}, + {"65535", 10, 65535}, + {"65536", 10, 65536}, + {"65537", 10, 65537}, + {"2147483646", 10, 2147483646}, + {"2147483647", 10, 2147483647}, + {"2147483648", 10, 2147483648}, + {"2147483649", 10, 2147483649}, + {"4294967294", 10, 4294967294}, + {"4294967295", 10, 4294967295}, + {"4294967296", 10, 4294967296}, + {"4294967297", 10, 4294967297}, + {"9223372036854775806", 10, 9223372036854775806ULL}, + {"9223372036854775807", 10, 9223372036854775807ULL}, + {"9223372036854775808", 10, 9223372036854775808ULL}, + {"9223372036854775809", 10, 9223372036854775809ULL}, + {"18446744073709551614", 10, 18446744073709551614ULL}, + {"18446744073709551615", 10, 18446744073709551615ULL}, + }; + TEST_OK(kstrtou64, u64, "%llu", test_u64_ok); +} + +static void __init test_kstrtou64_fail(void) +{ + static DEFINE_TEST_FAIL(test_u64_fail) = { + {"-2", 10}, + {"-1", 10}, + {"18446744073709551616", 10}, + {"18446744073709551617", 10}, + }; + TEST_FAIL(kstrtou64, u64, "%llu", test_u64_fail); +} + +static void __init test_kstrtos64_ok(void) +{ + DECLARE_TEST_OK(s64, struct test_s64); + static DEFINE_TEST_OK(struct test_s64, test_s64_ok) = { + {"-128", 10, -128}, + {"-127", 10, -127}, + {"-1", 10, -1}, + {"0", 10, 0}, + {"1", 10, 1}, + {"126", 10, 126}, + {"127", 10, 127}, + {"128", 10, 128}, + {"129", 10, 129}, + {"254", 10, 254}, + {"255", 10, 255}, + {"256", 10, 256}, + {"257", 10, 257}, + {"32766", 10, 32766}, + {"32767", 10, 32767}, + {"32768", 10, 32768}, + {"32769", 10, 32769}, + {"65534", 10, 65534}, + {"65535", 10, 65535}, + {"65536", 10, 65536}, + {"65537", 10, 65537}, + {"2147483646", 10, 2147483646}, + {"2147483647", 10, 2147483647}, + {"2147483648", 10, 2147483648}, + {"2147483649", 10, 2147483649}, + {"4294967294", 10, 4294967294}, + {"4294967295", 10, 4294967295}, + {"4294967296", 10, 4294967296}, + {"4294967297", 10, 4294967297}, + {"9223372036854775806", 10, 9223372036854775806LL}, + {"9223372036854775807", 10, 9223372036854775807LL}, + }; + TEST_OK(kstrtos64, s64, "%lld", test_s64_ok); +} + +static void __init test_kstrtos64_fail(void) +{ + static DEFINE_TEST_FAIL(test_s64_fail) = { + {"9223372036854775808", 10}, + {"9223372036854775809", 10}, + {"18446744073709551614", 10}, + {"18446744073709551615", 10}, + {"18446744073709551616", 10}, + {"18446744073709551617", 10}, + }; + TEST_FAIL(kstrtos64, s64, "%lld", test_s64_fail); +} + +static void __init test_kstrtou32_ok(void) +{ + DECLARE_TEST_OK(u32, struct test_u32); + static DEFINE_TEST_OK(struct test_u32, test_u32_ok) = { + {"0", 10, 0}, + {"1", 10, 1}, + {"126", 10, 126}, + {"127", 10, 127}, + {"128", 10, 128}, + {"129", 10, 129}, + {"254", 10, 254}, + {"255", 10, 255}, + {"256", 10, 256}, + {"257", 10, 257}, + {"32766", 10, 32766}, + {"32767", 10, 32767}, + {"32768", 10, 32768}, + {"32769", 10, 32769}, + {"65534", 10, 65534}, + {"65535", 10, 65535}, + {"65536", 10, 65536}, + {"65537", 10, 65537}, + {"2147483646", 10, 2147483646}, + {"2147483647", 10, 2147483647}, + {"2147483648", 10, 2147483648}, + {"2147483649", 10, 2147483649}, + {"4294967294", 10, 4294967294}, + {"4294967295", 10, 4294967295}, + }; + TEST_OK(kstrtou32, u32, "%u", test_u32_ok); +} + +static void __init test_kstrtou32_fail(void) +{ + static DEFINE_TEST_FAIL(test_u32_fail) = { + {"-2", 10}, + {"-1", 10}, + {"4294967296", 10}, + {"4294967297", 10}, + {"9223372036854775806", 10}, + {"9223372036854775807", 10}, + {"9223372036854775808", 10}, + {"9223372036854775809", 10}, + {"18446744073709551614", 10}, + {"18446744073709551615", 10}, + {"18446744073709551616", 10}, + {"18446744073709551617", 10}, + }; + TEST_FAIL(kstrtou32, u32, "%u", test_u32_fail); +} + +static void __init test_kstrtos32_ok(void) +{ + DECLARE_TEST_OK(s32, struct test_s32); + static DEFINE_TEST_OK(struct test_s32, test_s32_ok) = { + {"-128", 10, -128}, + {"-127", 10, -127}, + {"-1", 10, -1}, + {"0", 10, 0}, + {"1", 10, 1}, + {"126", 10, 126}, + {"127", 10, 127}, + {"128", 10, 128}, + {"129", 10, 129}, + {"254", 10, 254}, + {"255", 10, 255}, + {"256", 10, 256}, + {"257", 10, 257}, + {"32766", 10, 32766}, + {"32767", 10, 32767}, + {"32768", 10, 32768}, + {"32769", 10, 32769}, + {"65534", 10, 65534}, + {"65535", 10, 65535}, + {"65536", 10, 65536}, + {"65537", 10, 65537}, + {"2147483646", 10, 2147483646}, + {"2147483647", 10, 2147483647}, + }; + TEST_OK(kstrtos32, s32, "%d", test_s32_ok); +} + +static void __init test_kstrtos32_fail(void) +{ + static DEFINE_TEST_FAIL(test_s32_fail) = { + {"2147483648", 10}, + {"2147483649", 10}, + {"4294967294", 10}, + {"4294967295", 10}, + {"4294967296", 10}, + {"4294967297", 10}, + {"9223372036854775806", 10}, + {"9223372036854775807", 10}, + {"9223372036854775808", 10}, + {"9223372036854775809", 10}, + {"18446744073709551614", 10}, + {"18446744073709551615", 10}, + {"18446744073709551616", 10}, + {"18446744073709551617", 10}, + }; + TEST_FAIL(kstrtos32, s32, "%d", test_s32_fail); +} + +static void __init test_kstrtou16_ok(void) +{ + DECLARE_TEST_OK(u16, struct test_u16); + static DEFINE_TEST_OK(struct test_u16, test_u16_ok) = { + {"0", 10, 0}, + {"1", 10, 1}, + {"126", 10, 126}, + {"127", 10, 127}, + {"128", 10, 128}, + {"129", 10, 129}, + {"254", 10, 254}, + {"255", 10, 255}, + {"256", 10, 256}, + {"257", 10, 257}, + {"32766", 10, 32766}, + {"32767", 10, 32767}, + {"32768", 10, 32768}, + {"32769", 10, 32769}, + {"65534", 10, 65534}, + {"65535", 10, 65535}, + }; + TEST_OK(kstrtou16, u16, "%hu", test_u16_ok); +} + +static void __init test_kstrtou16_fail(void) +{ + static DEFINE_TEST_FAIL(test_u16_fail) = { + {"-2", 10}, + {"-1", 10}, + {"65536", 10}, + {"65537", 10}, + {"2147483646", 10}, + {"2147483647", 10}, + {"2147483648", 10}, + {"2147483649", 10}, + {"4294967294", 10}, + {"4294967295", 10}, + {"4294967296", 10}, + {"4294967297", 10}, + {"9223372036854775806", 10}, + {"9223372036854775807", 10}, + {"9223372036854775808", 10}, + {"9223372036854775809", 10}, + {"18446744073709551614", 10}, + {"18446744073709551615", 10}, + {"18446744073709551616", 10}, + {"18446744073709551617", 10}, + }; + TEST_FAIL(kstrtou16, u16, "%hu", test_u16_fail); +} + +static void __init test_kstrtos16_ok(void) +{ + DECLARE_TEST_OK(s16, struct test_s16); + static DEFINE_TEST_OK(struct test_s16, test_s16_ok) = { + {"-130", 10, -130}, + {"-129", 10, -129}, + {"-128", 10, -128}, + {"-127", 10, -127}, + {"-1", 10, -1}, + {"0", 10, 0}, + {"1", 10, 1}, + {"126", 10, 126}, + {"127", 10, 127}, + {"128", 10, 128}, + {"129", 10, 129}, + {"254", 10, 254}, + {"255", 10, 255}, + {"256", 10, 256}, + {"257", 10, 257}, + {"32766", 10, 32766}, + {"32767", 10, 32767}, + }; + TEST_OK(kstrtos16, s16, "%hd", test_s16_ok); +} + +static void __init test_kstrtos16_fail(void) +{ + static DEFINE_TEST_FAIL(test_s16_fail) = { + {"32768", 10}, + {"32769", 10}, + {"65534", 10}, + {"65535", 10}, + {"65536", 10}, + {"65537", 10}, + {"2147483646", 10}, + {"2147483647", 10}, + {"2147483648", 10}, + {"2147483649", 10}, + {"4294967294", 10}, + {"4294967295", 10}, + {"4294967296", 10}, + {"4294967297", 10}, + {"9223372036854775806", 10}, + {"9223372036854775807", 10}, + {"9223372036854775808", 10}, + {"9223372036854775809", 10}, + {"18446744073709551614", 10}, + {"18446744073709551615", 10}, + {"18446744073709551616", 10}, + {"18446744073709551617", 10}, + }; + TEST_FAIL(kstrtos16, s16, "%hd", test_s16_fail); +} + +static void __init test_kstrtou8_ok(void) +{ + DECLARE_TEST_OK(u8, struct test_u8); + static DEFINE_TEST_OK(struct test_u8, test_u8_ok) = { + {"0", 10, 0}, + {"1", 10, 1}, + {"126", 10, 126}, + {"127", 10, 127}, + {"128", 10, 128}, + {"129", 10, 129}, + {"254", 10, 254}, + {"255", 10, 255}, + }; + TEST_OK(kstrtou8, u8, "%hhu", test_u8_ok); +} + +static void __init test_kstrtou8_fail(void) +{ + static DEFINE_TEST_FAIL(test_u8_fail) = { + {"-2", 10}, + {"-1", 10}, + {"256", 10}, + {"257", 10}, + {"32766", 10}, + {"32767", 10}, + {"32768", 10}, + {"32769", 10}, + {"65534", 10}, + {"65535", 10}, + {"65536", 10}, + {"65537", 10}, + {"2147483646", 10}, + {"2147483647", 10}, + {"2147483648", 10}, + {"2147483649", 10}, + {"4294967294", 10}, + {"4294967295", 10}, + {"4294967296", 10}, + {"4294967297", 10}, + {"9223372036854775806", 10}, + {"9223372036854775807", 10}, + {"9223372036854775808", 10}, + {"9223372036854775809", 10}, + {"18446744073709551614", 10}, + {"18446744073709551615", 10}, + {"18446744073709551616", 10}, + {"18446744073709551617", 10}, + }; + TEST_FAIL(kstrtou8, u8, "%hhu", test_u8_fail); +} + +static void __init test_kstrtos8_ok(void) +{ + DECLARE_TEST_OK(s8, struct test_s8); + static DEFINE_TEST_OK(struct test_s8, test_s8_ok) = { + {"-128", 10, -128}, + {"-127", 10, -127}, + {"-1", 10, -1}, + {"0", 10, 0}, + {"1", 10, 1}, + {"126", 10, 126}, + {"127", 10, 127}, + }; + TEST_OK(kstrtos8, s8, "%hhd", test_s8_ok); +} + +static void __init test_kstrtos8_fail(void) +{ + static DEFINE_TEST_FAIL(test_s8_fail) = { + {"-130", 10}, + {"-129", 10}, + {"128", 10}, + {"129", 10}, + {"254", 10}, + {"255", 10}, + {"256", 10}, + {"257", 10}, + {"32766", 10}, + {"32767", 10}, + {"32768", 10}, + {"32769", 10}, + {"65534", 10}, + {"65535", 10}, + {"65536", 10}, + {"65537", 10}, + {"2147483646", 10}, + {"2147483647", 10}, + {"2147483648", 10}, + {"2147483649", 10}, + {"4294967294", 10}, + {"4294967295", 10}, + {"4294967296", 10}, + {"4294967297", 10}, + {"9223372036854775806", 10}, + {"9223372036854775807", 10}, + {"9223372036854775808", 10}, + {"9223372036854775809", 10}, + {"18446744073709551614", 10}, + {"18446744073709551615", 10}, + {"18446744073709551616", 10}, + {"18446744073709551617", 10}, + }; + TEST_FAIL(kstrtos8, s8, "%hhd", test_s8_fail); +} + +static int __init test_kstrtox_init(void) +{ + test_kstrtoull_ok(); + test_kstrtoull_fail(); + test_kstrtoll_ok(); + test_kstrtoll_fail(); + + test_kstrtou64_ok(); + test_kstrtou64_fail(); + test_kstrtos64_ok(); + test_kstrtos64_fail(); + + test_kstrtou32_ok(); + test_kstrtou32_fail(); + test_kstrtos32_ok(); + test_kstrtos32_fail(); + + test_kstrtou16_ok(); + test_kstrtou16_fail(); + test_kstrtos16_ok(); + test_kstrtos16_fail(); + + test_kstrtou8_ok(); + test_kstrtou8_fail(); + test_kstrtos8_ok(); + test_kstrtos8_fail(); + return -EINVAL; +} +module_init(test_kstrtox_init); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index ac444ff01658..02bcdd5feac4 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -120,147 +120,6 @@ long long simple_strtoll(const char *cp, char **endp, unsigned int base) } EXPORT_SYMBOL(simple_strtoll); -/** - * strict_strtoul - convert a string to an unsigned long strictly - * @cp: The string to be converted - * @base: The number base to use - * @res: The converted result value - * - * strict_strtoul converts a string to an unsigned long only if the - * string is really an unsigned long string, any string containing - * any invalid char at the tail will be rejected and -EINVAL is returned, - * only a newline char at the tail is acceptible because people generally - * change a module parameter in the following way: - * - * echo 1024 > /sys/module/e1000/parameters/copybreak - * - * echo will append a newline to the tail. - * - * It returns 0 if conversion is successful and *res is set to the converted - * value, otherwise it returns -EINVAL and *res is set to 0. - * - * simple_strtoul just ignores the successive invalid characters and - * return the converted value of prefix part of the string. - */ -int strict_strtoul(const char *cp, unsigned int base, unsigned long *res) -{ - char *tail; - unsigned long val; - - *res = 0; - if (!*cp) - return -EINVAL; - - val = simple_strtoul(cp, &tail, base); - if (tail == cp) - return -EINVAL; - - if ((tail[0] == '\0') || (tail[0] == '\n' && tail[1] == '\0')) { - *res = val; - return 0; - } - - return -EINVAL; -} -EXPORT_SYMBOL(strict_strtoul); - -/** - * strict_strtol - convert a string to a long strictly - * @cp: The string to be converted - * @base: The number base to use - * @res: The converted result value - * - * strict_strtol is similiar to strict_strtoul, but it allows the first - * character of a string is '-'. - * - * It returns 0 if conversion is successful and *res is set to the converted - * value, otherwise it returns -EINVAL and *res is set to 0. - */ -int strict_strtol(const char *cp, unsigned int base, long *res) -{ - int ret; - if (*cp == '-') { - ret = strict_strtoul(cp + 1, base, (unsigned long *)res); - if (!ret) - *res = -(*res); - } else { - ret = strict_strtoul(cp, base, (unsigned long *)res); - } - - return ret; -} -EXPORT_SYMBOL(strict_strtol); - -/** - * strict_strtoull - convert a string to an unsigned long long strictly - * @cp: The string to be converted - * @base: The number base to use - * @res: The converted result value - * - * strict_strtoull converts a string to an unsigned long long only if the - * string is really an unsigned long long string, any string containing - * any invalid char at the tail will be rejected and -EINVAL is returned, - * only a newline char at the tail is acceptible because people generally - * change a module parameter in the following way: - * - * echo 1024 > /sys/module/e1000/parameters/copybreak - * - * echo will append a newline to the tail of the string. - * - * It returns 0 if conversion is successful and *res is set to the converted - * value, otherwise it returns -EINVAL and *res is set to 0. - * - * simple_strtoull just ignores the successive invalid characters and - * return the converted value of prefix part of the string. - */ -int strict_strtoull(const char *cp, unsigned int base, unsigned long long *res) -{ - char *tail; - unsigned long long val; - - *res = 0; - if (!*cp) - return -EINVAL; - - val = simple_strtoull(cp, &tail, base); - if (tail == cp) - return -EINVAL; - if ((tail[0] == '\0') || (tail[0] == '\n' && tail[1] == '\0')) { - *res = val; - return 0; - } - - return -EINVAL; -} -EXPORT_SYMBOL(strict_strtoull); - -/** - * strict_strtoll - convert a string to a long long strictly - * @cp: The string to be converted - * @base: The number base to use - * @res: The converted result value - * - * strict_strtoll is similiar to strict_strtoull, but it allows the first - * character of a string is '-'. - * - * It returns 0 if conversion is successful and *res is set to the converted - * value, otherwise it returns -EINVAL and *res is set to 0. - */ -int strict_strtoll(const char *cp, unsigned int base, long long *res) -{ - int ret; - if (*cp == '-') { - ret = strict_strtoull(cp + 1, base, (unsigned long long *)res); - if (!ret) - *res = -(*res); - } else { - ret = strict_strtoull(cp, base, (unsigned long long *)res); - } - - return ret; -} -EXPORT_SYMBOL(strict_strtoll); - static noinline_for_stack int skip_atoi(const char **s) { -- cgit v1.2.2 From 565d76cb7d5fd7cb010fd690602280a69ab116ef Mon Sep 17 00:00:00 2001 From: Jim Keniston Date: Tue, 22 Mar 2011 16:35:12 -0700 Subject: zlib: slim down zlib_deflate() workspace when possible Instead of always creating a huge (268K) deflate_workspace with the maximum compression parameters (windowBits=15, memLevel=8), allow the caller to obtain a smaller workspace by specifying smaller parameter values. For example, when capturing oops and panic reports to a medium with limited capacity, such as NVRAM, compression may be the only way to capture the whole report. In this case, a small workspace (24K works fine) is a win, whether you allocate the workspace when you need it (i.e., during an oops or panic) or at boot time. I've verified that this patch works with all accepted values of windowBits (positive and negative), memLevel, and compression level. Signed-off-by: Jim Keniston Cc: Herbert Xu Cc: David Miller Cc: Chris Mason Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/zlib_deflate/deflate.c | 31 +++++++++++++++++++++++++++++-- lib/zlib_deflate/defutil.h | 17 +++++++++++++---- 2 files changed, 42 insertions(+), 6 deletions(-) (limited to 'lib') diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c index 46a31e5f49c3..d63381e8e333 100644 --- a/lib/zlib_deflate/deflate.c +++ b/lib/zlib_deflate/deflate.c @@ -176,6 +176,7 @@ int zlib_deflateInit2( deflate_state *s; int noheader = 0; deflate_workspace *mem; + char *next; ush *overlay; /* We overlay pending_buf and d_buf+l_buf. This works since the average @@ -199,6 +200,21 @@ int zlib_deflateInit2( strategy < 0 || strategy > Z_HUFFMAN_ONLY) { return Z_STREAM_ERROR; } + + /* + * Direct the workspace's pointers to the chunks that were allocated + * along with the deflate_workspace struct. + */ + next = (char *) mem; + next += sizeof(*mem); + mem->window_memory = (Byte *) next; + next += zlib_deflate_window_memsize(windowBits); + mem->prev_memory = (Pos *) next; + next += zlib_deflate_prev_memsize(windowBits); + mem->head_memory = (Pos *) next; + next += zlib_deflate_head_memsize(memLevel); + mem->overlay_memory = next; + s = (deflate_state *) &(mem->deflate_memory); strm->state = (struct internal_state *)s; s->strm = strm; @@ -1247,7 +1263,18 @@ static block_state deflate_slow( return flush == Z_FINISH ? finish_done : block_done; } -int zlib_deflate_workspacesize(void) +int zlib_deflate_workspacesize(int windowBits, int memLevel) { - return sizeof(deflate_workspace); + if (windowBits < 0) /* undocumented feature: suppress zlib header */ + windowBits = -windowBits; + + /* Since the return value is typically passed to vmalloc() unchecked... */ + BUG_ON(memLevel < 1 || memLevel > MAX_MEM_LEVEL || windowBits < 9 || + windowBits > 15); + + return sizeof(deflate_workspace) + + zlib_deflate_window_memsize(windowBits) + + zlib_deflate_prev_memsize(windowBits) + + zlib_deflate_head_memsize(memLevel) + + zlib_deflate_overlay_memsize(memLevel); } diff --git a/lib/zlib_deflate/defutil.h b/lib/zlib_deflate/defutil.h index 6b15a909ca3f..b640b6402e99 100644 --- a/lib/zlib_deflate/defutil.h +++ b/lib/zlib_deflate/defutil.h @@ -241,12 +241,21 @@ typedef struct deflate_state { typedef struct deflate_workspace { /* State memory for the deflator */ deflate_state deflate_memory; - Byte window_memory[2 * (1 << MAX_WBITS)]; - Pos prev_memory[1 << MAX_WBITS]; - Pos head_memory[1 << (MAX_MEM_LEVEL + 7)]; - char overlay_memory[(1 << (MAX_MEM_LEVEL + 6)) * (sizeof(ush)+2)]; + Byte *window_memory; + Pos *prev_memory; + Pos *head_memory; + char *overlay_memory; } deflate_workspace; +#define zlib_deflate_window_memsize(windowBits) \ + (2 * (1 << (windowBits)) * sizeof(Byte)) +#define zlib_deflate_prev_memsize(windowBits) \ + ((1 << (windowBits)) * sizeof(Pos)) +#define zlib_deflate_head_memsize(memLevel) \ + ((1 << ((memLevel)+7)) * sizeof(Pos)) +#define zlib_deflate_overlay_memsize(memLevel) \ + ((1 << ((memLevel)+6)) * (sizeof(ush)+2)) + /* Output a byte on the stream. * IN assertion: there is enough room in pending_buf. */ -- cgit v1.2.2 From c4945b9ed472e8796e352f10df9dbc2841ba7b61 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Wed, 23 Mar 2011 16:41:47 -0700 Subject: asm-generic: rename generic little-endian bitops functions As a preparation for providing little-endian bitops for all architectures, This renames generic implementation of little-endian bitops. (remove "generic_" prefix and postfix "_le") s/generic_find_next_le_bit/find_next_bit_le/ s/generic_find_next_zero_le_bit/find_next_zero_bit_le/ s/generic_find_first_zero_le_bit/find_first_zero_bit_le/ s/generic___test_and_set_le_bit/__test_and_set_bit_le/ s/generic___test_and_clear_le_bit/__test_and_clear_bit_le/ s/generic_test_le_bit/test_bit_le/ s/generic___set_le_bit/__set_bit_le/ s/generic___clear_le_bit/__clear_bit_le/ s/generic_test_and_set_le_bit/test_and_set_bit_le/ s/generic_test_and_clear_le_bit/test_and_clear_bit_le/ Signed-off-by: Akinobu Mita Acked-by: Arnd Bergmann Acked-by: Hans-Christian Egtvedt Cc: Geert Uytterhoeven Cc: Roman Zippel Cc: Andreas Schwab Cc: Greg Ungerer Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/find_next_bit.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'lib') diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index 24c59ded47a0..c093ba988003 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c @@ -185,7 +185,7 @@ static inline unsigned long ext2_swab(const unsigned long y) #endif } -unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned +unsigned long find_next_zero_bit_le(const unsigned long *addr, unsigned long size, unsigned long offset) { const unsigned long *p = addr + BITOP_WORD(offset); @@ -226,10 +226,9 @@ found_middle: found_middle_swap: return result + ffz(ext2_swab(tmp)); } +EXPORT_SYMBOL(find_next_zero_bit_le); -EXPORT_SYMBOL(generic_find_next_zero_le_bit); - -unsigned long generic_find_next_le_bit(const unsigned long *addr, unsigned +unsigned long find_next_bit_le(const unsigned long *addr, unsigned long size, unsigned long offset) { const unsigned long *p = addr + BITOP_WORD(offset); @@ -271,5 +270,5 @@ found_middle: found_middle_swap: return result + __ffs(ext2_swab(tmp)); } -EXPORT_SYMBOL(generic_find_next_le_bit); +EXPORT_SYMBOL(find_next_bit_le); #endif /* __BIG_ENDIAN */ -- cgit v1.2.2 From a56560b3b233238e85205d4e8d7bded904ac2306 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Wed, 23 Mar 2011 16:41:50 -0700 Subject: asm-generic: change little-endian bitops to take any pointer types This makes the little-endian bitops take any pointer types by changing the prototypes and adding casts in the preprocessor macros. That would seem to at least make all the filesystem code happier, and they can continue to do just something like #define ext2_set_bit __test_and_set_bit_le (or whatever the exact sequence ends up being). Signed-off-by: Akinobu Mita Cc: Arnd Bergmann Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Mikael Starvik Cc: David Howells Cc: Yoshinori Sato Cc: "Luck, Tony" Cc: Ralf Baechle Cc: Kyle McMartin Cc: Matthew Wilcox Cc: Grant Grundler Cc: Paul Mundt Cc: Kazumoto Kojima Cc: Hirokazu Takata Cc: "David S. Miller" Cc: Chris Zankel Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Hans-Christian Egtvedt Cc: "H. Peter Anvin" Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/find_next_bit.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index c093ba988003..7667c3d907d3 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c @@ -185,15 +185,16 @@ static inline unsigned long ext2_swab(const unsigned long y) #endif } -unsigned long find_next_zero_bit_le(const unsigned long *addr, unsigned +unsigned long find_next_zero_bit_le(const void *addr, unsigned long size, unsigned long offset) { - const unsigned long *p = addr + BITOP_WORD(offset); + const unsigned long *p = addr; unsigned long result = offset & ~(BITS_PER_LONG - 1); unsigned long tmp; if (offset >= size) return size; + p += BITOP_WORD(offset); size -= result; offset &= (BITS_PER_LONG - 1UL); if (offset) { @@ -228,15 +229,16 @@ found_middle_swap: } EXPORT_SYMBOL(find_next_zero_bit_le); -unsigned long find_next_bit_le(const unsigned long *addr, unsigned +unsigned long find_next_bit_le(const void *addr, unsigned long size, unsigned long offset) { - const unsigned long *p = addr + BITOP_WORD(offset); + const unsigned long *p = addr; unsigned long result = offset & ~(BITS_PER_LONG - 1); unsigned long tmp; if (offset >= size) return size; + p += BITOP_WORD(offset); size -= result; offset &= (BITS_PER_LONG - 1UL); if (offset) { -- cgit v1.2.2 From 0664996b7c2fdb1b7f90954469cc242274abd7db Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Wed, 23 Mar 2011 16:41:59 -0700 Subject: bitops: introduce CONFIG_GENERIC_FIND_BIT_LE This introduces CONFIG_GENERIC_FIND_BIT_LE to tell whether to use generic implementation of find_*_bit_le() in lib/find_next_bit.c or not. For now we select CONFIG_GENERIC_FIND_BIT_LE for all architectures which enable CONFIG_GENERIC_FIND_NEXT_BIT. But m68knommu wants to define own faster find_next_zero_bit_le() and continues using generic find_next_{,zero_}bit(). (CONFIG_GENERIC_FIND_NEXT_BIT and !CONFIG_GENERIC_FIND_BIT_LE) Signed-off-by: Akinobu Mita Cc: Greg Ungerer Cc: Arnd Bergmann Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig | 3 +++ lib/Makefile | 1 + lib/find_next_bit.c | 3 +++ 3 files changed, 7 insertions(+) (limited to 'lib') diff --git a/lib/Kconfig b/lib/Kconfig index 3a55a43c43eb..23fa7a359db7 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -22,6 +22,9 @@ config GENERIC_FIND_FIRST_BIT config GENERIC_FIND_NEXT_BIT bool +config GENERIC_FIND_BIT_LE + bool + config GENERIC_FIND_LAST_BIT bool default y diff --git a/lib/Makefile b/lib/Makefile index 8c9de027ebb1..d7872b5c4c1c 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -40,6 +40,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o +lib-$(CONFIG_GENERIC_FIND_BIT_LE) += find_next_bit.o obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index 7667c3d907d3..b0a8767282bf 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c @@ -160,6 +160,7 @@ EXPORT_SYMBOL(find_first_zero_bit); #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ #ifdef __BIG_ENDIAN +#ifdef CONFIG_GENERIC_FIND_BIT_LE /* include/linux/byteorder does not support "unsigned long" type */ static inline unsigned long ext2_swabp(const unsigned long * x) @@ -273,4 +274,6 @@ found_middle_swap: return result + __ffs(ext2_swab(tmp)); } EXPORT_SYMBOL(find_next_bit_le); + +#endif /* CONFIG_GENERIC_FIND_BIT_LE */ #endif /* __BIG_ENDIAN */ -- cgit v1.2.2 From b2b755b5f10eb32fbdc73a9907c07006b17f714b Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Thu, 24 Mar 2011 15:18:15 -0700 Subject: lib, arch: add filter argument to show_mem and fix private implementations Commit ddd588b5dd55 ("oom: suppress nodes that are not allowed from meminfo on oom kill") moved lib/show_mem.o out of lib/lib.a, which resulted in build warnings on all architectures that implement their own versions of show_mem(): lib/lib.a(show_mem.o): In function `show_mem': show_mem.c:(.text+0x1f4): multiple definition of `show_mem' arch/sparc/mm/built-in.o:(.text+0xd70): first defined here The fix is to remove __show_mem() and add its argument to show_mem() in all implementations to prevent this breakage. Architectures that implement their own show_mem() actually don't do anything with the argument yet, but they could be made to filter nodes that aren't allowed in the current context in the future just like the generic implementation. Reported-by: Stephen Rothwell Reported-by: James Bottomley Suggested-by: Andrew Morton Signed-off-by: David Rientjes Signed-off-by: Linus Torvalds --- lib/show_mem.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'lib') diff --git a/lib/show_mem.c b/lib/show_mem.c index d8d602b58c31..90cbe4bb5960 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c @@ -9,7 +9,7 @@ #include #include -void __show_mem(unsigned int filter) +void show_mem(unsigned int filter) { pg_data_t *pgdat; unsigned long total = 0, reserved = 0, shared = 0, @@ -61,8 +61,3 @@ void __show_mem(unsigned int filter) quicklist_total_size()); #endif } - -void show_mem(void) -{ - __show_mem(0); -} -- cgit v1.2.2