diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 6 | ||||
-rw-r--r-- | lib/Kconfig.debug | 22 | ||||
-rw-r--r-- | lib/Makefile | 4 | ||||
-rw-r--r-- | lib/idr.c | 14 | ||||
-rw-r--r-- | lib/smp_processor_id.c | 2 |
5 files changed, 16 insertions, 32 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 03c2c24b9083..fc8ea1ca59d8 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -136,12 +136,6 @@ config TEXTSEARCH_BM | |||
136 | config TEXTSEARCH_FSM | 136 | config TEXTSEARCH_FSM |
137 | tristate | 137 | tristate |
138 | 138 | ||
139 | # | ||
140 | # plist support is select#ed if needed | ||
141 | # | ||
142 | config PLIST | ||
143 | boolean | ||
144 | |||
145 | config HAS_IOMEM | 139 | config HAS_IOMEM |
146 | boolean | 140 | boolean |
147 | depends on !NO_IOMEM | 141 | depends on !NO_IOMEM |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 4c9ae6085c75..29044f500269 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -570,6 +570,15 @@ config DEBUG_NOTIFIERS | |||
570 | This is a relatively cheap check but if you care about maximum | 570 | This is a relatively cheap check but if you care about maximum |
571 | performance, say N. | 571 | performance, say N. |
572 | 572 | ||
573 | # | ||
574 | # Select this config option from the architecture Kconfig, if it | ||
575 | # it is preferred to always offer frame pointers as a config | ||
576 | # option on the architecture (regardless of KERNEL_DEBUG): | ||
577 | # | ||
578 | config ARCH_WANT_FRAME_POINTERS | ||
579 | bool | ||
580 | help | ||
581 | |||
573 | config FRAME_POINTER | 582 | config FRAME_POINTER |
574 | bool "Compile the kernel with frame pointers" | 583 | bool "Compile the kernel with frame pointers" |
575 | depends on DEBUG_KERNEL && \ | 584 | depends on DEBUG_KERNEL && \ |
@@ -633,19 +642,6 @@ config RCU_TORTURE_TEST_RUNNABLE | |||
633 | 642 | ||
634 | config RCU_CPU_STALL_DETECTOR | 643 | config RCU_CPU_STALL_DETECTOR |
635 | bool "Check for stalled CPUs delaying RCU grace periods" | 644 | bool "Check for stalled CPUs delaying RCU grace periods" |
636 | depends on CLASSIC_RCU | ||
637 | default n | ||
638 | help | ||
639 | This option causes RCU to printk information on which | ||
640 | CPUs are delaying the current grace period, but only when | ||
641 | the grace period extends for excessive time periods. | ||
642 | |||
643 | Say Y if you want RCU to perform such checks. | ||
644 | |||
645 | Say N if you are unsure. | ||
646 | |||
647 | config RCU_CPU_STALL_DETECTOR | ||
648 | bool "Check for stalled CPUs delaying RCU grace periods" | ||
649 | depends on CLASSIC_RCU || TREE_RCU | 645 | depends on CLASSIC_RCU || TREE_RCU |
650 | default n | 646 | default n |
651 | help | 647 | help |
diff --git a/lib/Makefile b/lib/Makefile index 32b0e64ded27..902d73851044 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -11,7 +11,8 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ | |||
11 | rbtree.o radix-tree.o dump_stack.o \ | 11 | rbtree.o radix-tree.o dump_stack.o \ |
12 | idr.o int_sqrt.o extable.o prio_tree.o \ | 12 | idr.o int_sqrt.o extable.o prio_tree.o \ |
13 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ | 13 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ |
14 | proportions.o prio_heap.o ratelimit.o show_mem.o is_single_threaded.o | 14 | proportions.o prio_heap.o ratelimit.o show_mem.o \ |
15 | is_single_threaded.o plist.o | ||
15 | 16 | ||
16 | lib-$(CONFIG_MMU) += ioremap.o | 17 | lib-$(CONFIG_MMU) += ioremap.o |
17 | lib-$(CONFIG_SMP) += cpumask.o | 18 | lib-$(CONFIG_SMP) += cpumask.o |
@@ -40,7 +41,6 @@ lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o | |||
40 | lib-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o | 41 | lib-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o |
41 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o | 42 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o |
42 | obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o | 43 | obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o |
43 | obj-$(CONFIG_PLIST) += plist.o | ||
44 | obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o | 44 | obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o |
45 | obj-$(CONFIG_DEBUG_LIST) += list_debug.o | 45 | obj-$(CONFIG_DEBUG_LIST) += list_debug.o |
46 | obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o | 46 | obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o |
@@ -121,7 +121,7 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask) | |||
121 | { | 121 | { |
122 | while (idp->id_free_cnt < IDR_FREE_MAX) { | 122 | while (idp->id_free_cnt < IDR_FREE_MAX) { |
123 | struct idr_layer *new; | 123 | struct idr_layer *new; |
124 | new = kmem_cache_alloc(idr_layer_cache, gfp_mask); | 124 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); |
125 | if (new == NULL) | 125 | if (new == NULL) |
126 | return (0); | 126 | return (0); |
127 | move_to_free_list(idp, new); | 127 | move_to_free_list(idp, new); |
@@ -292,7 +292,7 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | |||
292 | * and go back to the idr_pre_get() call. If the idr is full, it will | 292 | * and go back to the idr_pre_get() call. If the idr is full, it will |
293 | * return -ENOSPC. | 293 | * return -ENOSPC. |
294 | * | 294 | * |
295 | * @id returns a value in the range 0 ... 0x7fffffff | 295 | * @id returns a value in the range @starting_id ... 0x7fffffff |
296 | */ | 296 | */ |
297 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | 297 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
298 | { | 298 | { |
@@ -623,16 +623,10 @@ void *idr_replace(struct idr *idp, void *ptr, int id) | |||
623 | } | 623 | } |
624 | EXPORT_SYMBOL(idr_replace); | 624 | EXPORT_SYMBOL(idr_replace); |
625 | 625 | ||
626 | static void idr_cache_ctor(void *idr_layer) | ||
627 | { | ||
628 | memset(idr_layer, 0, sizeof(struct idr_layer)); | ||
629 | } | ||
630 | |||
631 | void __init idr_init_cache(void) | 626 | void __init idr_init_cache(void) |
632 | { | 627 | { |
633 | idr_layer_cache = kmem_cache_create("idr_layer_cache", | 628 | idr_layer_cache = kmem_cache_create("idr_layer_cache", |
634 | sizeof(struct idr_layer), 0, SLAB_PANIC, | 629 | sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); |
635 | idr_cache_ctor); | ||
636 | } | 630 | } |
637 | 631 | ||
638 | /** | 632 | /** |
@@ -723,7 +717,7 @@ EXPORT_SYMBOL(ida_pre_get); | |||
723 | * and go back to the ida_pre_get() call. If the ida is full, it will | 717 | * and go back to the ida_pre_get() call. If the ida is full, it will |
724 | * return -ENOSPC. | 718 | * return -ENOSPC. |
725 | * | 719 | * |
726 | * @p_id returns a value in the range 0 ... 0x7fffffff. | 720 | * @p_id returns a value in the range @starting_id ... 0x7fffffff. |
727 | */ | 721 | */ |
728 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | 722 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
729 | { | 723 | { |
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 0f8fc22ed103..4689cb073da4 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c | |||
@@ -22,7 +22,7 @@ notrace unsigned int debug_smp_processor_id(void) | |||
22 | * Kernel threads bound to a single CPU can safely use | 22 | * Kernel threads bound to a single CPU can safely use |
23 | * smp_processor_id(): | 23 | * smp_processor_id(): |
24 | */ | 24 | */ |
25 | if (cpus_equal(current->cpus_allowed, cpumask_of_cpu(this_cpu))) | 25 | if (cpumask_equal(¤t->cpus_allowed, cpumask_of(this_cpu))) |
26 | goto out; | 26 | goto out; |
27 | 27 | ||
28 | /* | 28 | /* |