aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/hexdump.c2
-rw-r--r--lib/kobject.c7
-rw-r--r--lib/lockref.c23
-rw-r--r--lib/percpu-refcount.c3
-rw-r--r--lib/scatterlist.c3
6 files changed, 30 insertions, 10 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 06344d986eb9..094f3152ec2b 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -983,7 +983,7 @@ config DEBUG_KOBJECT
983 983
984config DEBUG_KOBJECT_RELEASE 984config DEBUG_KOBJECT_RELEASE
985 bool "kobject release debugging" 985 bool "kobject release debugging"
986 depends on DEBUG_KERNEL 986 depends on DEBUG_OBJECTS_TIMERS
987 help 987 help
988 kobjects are reference counted objects. This means that their 988 kobjects are reference counted objects. This means that their
989 last reference count put is not predictable, and the kobject can 989 last reference count put is not predictable, and the kobject can
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 3f0494c9d57a..8499c810909a 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -14,6 +14,8 @@
14 14
15const char hex_asc[] = "0123456789abcdef"; 15const char hex_asc[] = "0123456789abcdef";
16EXPORT_SYMBOL(hex_asc); 16EXPORT_SYMBOL(hex_asc);
17const char hex_asc_upper[] = "0123456789ABCDEF";
18EXPORT_SYMBOL(hex_asc_upper);
17 19
18/** 20/**
19 * hex_to_bin - convert a hex digit to its real value 21 * hex_to_bin - convert a hex digit to its real value
diff --git a/lib/kobject.c b/lib/kobject.c
index 962175134702..084f7b18d0c0 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -592,7 +592,7 @@ static void kobject_release(struct kref *kref)
592{ 592{
593 struct kobject *kobj = container_of(kref, struct kobject, kref); 593 struct kobject *kobj = container_of(kref, struct kobject, kref);
594#ifdef CONFIG_DEBUG_KOBJECT_RELEASE 594#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
595 pr_debug("kobject: '%s' (%p): %s, parent %p (delayed)\n", 595 pr_info("kobject: '%s' (%p): %s, parent %p (delayed)\n",
596 kobject_name(kobj), kobj, __func__, kobj->parent); 596 kobject_name(kobj), kobj, __func__, kobj->parent);
597 INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup); 597 INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup);
598 schedule_delayed_work(&kobj->release, HZ); 598 schedule_delayed_work(&kobj->release, HZ);
@@ -933,10 +933,7 @@ const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj)
933 933
934bool kobj_ns_current_may_mount(enum kobj_ns_type type) 934bool kobj_ns_current_may_mount(enum kobj_ns_type type)
935{ 935{
936 bool may_mount = false; 936 bool may_mount = true;
937
938 if (type == KOBJ_NS_TYPE_NONE)
939 return true;
940 937
941 spin_lock(&kobj_ns_type_lock); 938 spin_lock(&kobj_ns_type_lock);
942 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) && 939 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
diff --git a/lib/lockref.c b/lib/lockref.c
index 677d036cf3c7..6f9d434c1521 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -4,6 +4,22 @@
4#ifdef CONFIG_CMPXCHG_LOCKREF 4#ifdef CONFIG_CMPXCHG_LOCKREF
5 5
6/* 6/*
7 * Allow weakly-ordered memory architectures to provide barrier-less
8 * cmpxchg semantics for lockref updates.
9 */
10#ifndef cmpxchg64_relaxed
11# define cmpxchg64_relaxed cmpxchg64
12#endif
13
14/*
15 * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP.
16 * This is useful for architectures with an expensive cpu_relax().
17 */
18#ifndef arch_mutex_cpu_relax
19# define arch_mutex_cpu_relax() cpu_relax()
20#endif
21
22/*
7 * Note that the "cmpxchg()" reloads the "old" value for the 23 * Note that the "cmpxchg()" reloads the "old" value for the
8 * failure case. 24 * failure case.
9 */ 25 */
@@ -14,12 +30,13 @@
14 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ 30 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
15 struct lockref new = old, prev = old; \ 31 struct lockref new = old, prev = old; \
16 CODE \ 32 CODE \
17 old.lock_count = cmpxchg64(&lockref->lock_count, \ 33 old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
18 old.lock_count, new.lock_count); \ 34 old.lock_count, \
35 new.lock_count); \
19 if (likely(old.lock_count == prev.lock_count)) { \ 36 if (likely(old.lock_count == prev.lock_count)) { \
20 SUCCESS; \ 37 SUCCESS; \
21 } \ 38 } \
22 cpu_relax(); \ 39 arch_mutex_cpu_relax(); \
23 } \ 40 } \
24} while (0) 41} while (0)
25 42
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 7deeb6297a48..1a53d497a8c5 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -53,6 +53,7 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
53 ref->release = release; 53 ref->release = release;
54 return 0; 54 return 0;
55} 55}
56EXPORT_SYMBOL_GPL(percpu_ref_init);
56 57
57/** 58/**
58 * percpu_ref_cancel_init - cancel percpu_ref_init() 59 * percpu_ref_cancel_init - cancel percpu_ref_init()
@@ -84,6 +85,7 @@ void percpu_ref_cancel_init(struct percpu_ref *ref)
84 free_percpu(ref->pcpu_count); 85 free_percpu(ref->pcpu_count);
85 } 86 }
86} 87}
88EXPORT_SYMBOL_GPL(percpu_ref_cancel_init);
87 89
88static void percpu_ref_kill_rcu(struct rcu_head *rcu) 90static void percpu_ref_kill_rcu(struct rcu_head *rcu)
89{ 91{
@@ -156,3 +158,4 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
156 158
157 call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu); 159 call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
158} 160}
161EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index a685c8a79578..d16fa295ae1d 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -577,7 +577,8 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
577 miter->__offset += miter->consumed; 577 miter->__offset += miter->consumed;
578 miter->__remaining -= miter->consumed; 578 miter->__remaining -= miter->consumed;
579 579
580 if (miter->__flags & SG_MITER_TO_SG) 580 if ((miter->__flags & SG_MITER_TO_SG) &&
581 !PageSlab(miter->page))
581 flush_kernel_dcache_page(miter->page); 582 flush_kernel_dcache_page(miter->page);
582 583
583 if (miter->__flags & SG_MITER_ATOMIC) { 584 if (miter->__flags & SG_MITER_ATOMIC) {