aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug29
-rw-r--r--lib/Makefile1
-rw-r--r--lib/cpumask.c2
-rw-r--r--lib/devres.c28
-rw-r--r--lib/lockref.c3
-rw-r--r--lib/percpu-refcount.c86
-rw-r--r--lib/test_firmware.c117
7 files changed, 188 insertions, 78 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 7a638aa3545b..f8f45ec0ed46 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -835,7 +835,7 @@ config DEBUG_RT_MUTEXES
835 835
836config RT_MUTEX_TESTER 836config RT_MUTEX_TESTER
837 bool "Built-in scriptable tester for rt-mutexes" 837 bool "Built-in scriptable tester for rt-mutexes"
838 depends on DEBUG_KERNEL && RT_MUTEXES 838 depends on DEBUG_KERNEL && RT_MUTEXES && BROKEN
839 help 839 help
840 This option enables a rt-mutex tester. 840 This option enables a rt-mutex tester.
841 841
@@ -1131,20 +1131,6 @@ config PROVE_RCU_REPEATEDLY
1131 1131
1132 Say N if you are unsure. 1132 Say N if you are unsure.
1133 1133
1134config PROVE_RCU_DELAY
1135 bool "RCU debugging: preemptible RCU race provocation"
1136 depends on DEBUG_KERNEL && PREEMPT_RCU
1137 default n
1138 help
1139 There is a class of races that involve an unlikely preemption
1140 of __rcu_read_unlock() just after ->rcu_read_lock_nesting has
1141 been set to INT_MIN. This feature inserts a delay at that
1142 point to increase the probability of these races.
1143
1144 Say Y to increase probability of preemption of __rcu_read_unlock().
1145
1146 Say N if you are unsure.
1147
1148config SPARSE_RCU_POINTER 1134config SPARSE_RCU_POINTER
1149 bool "RCU debugging: sparse-based checks for pointer usage" 1135 bool "RCU debugging: sparse-based checks for pointer usage"
1150 default n 1136 default n
@@ -1649,6 +1635,19 @@ config TEST_BPF
1649 1635
1650 If unsure, say N. 1636 If unsure, say N.
1651 1637
1638config TEST_FIRMWARE
1639 tristate "Test firmware loading via userspace interface"
1640 default n
1641 depends on FW_LOADER
1642 help
1643 This builds the "test_firmware" module that creates a userspace
1644 interface for testing firmware loading. This can be used to
1645 control the triggering of firmware loading without needing an
1646 actual firmware-using device. The contents can be rechecked by
1647 userspace.
1648
1649 If unsure, say N.
1650
1652source "samples/Kconfig" 1651source "samples/Kconfig"
1653 1652
1654source "lib/Kconfig.kgdb" 1653source "lib/Kconfig.kgdb"
diff --git a/lib/Makefile b/lib/Makefile
index ba967a19edba..230b4b1456d6 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
34obj-$(CONFIG_TEST_MODULE) += test_module.o 34obj-$(CONFIG_TEST_MODULE) += test_module.o
35obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o 35obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
36obj-$(CONFIG_TEST_BPF) += test_bpf.o 36obj-$(CONFIG_TEST_BPF) += test_bpf.o
37obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
37 38
38ifeq ($(CONFIG_DEBUG_KOBJECT),y) 39ifeq ($(CONFIG_DEBUG_KOBJECT),y)
39CFLAGS_kobject.o += -DDEBUG 40CFLAGS_kobject.o += -DDEBUG
diff --git a/lib/cpumask.c b/lib/cpumask.c
index c101230658eb..b6513a9f2892 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -191,7 +191,7 @@ int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
191 191
192 i %= num_online_cpus(); 192 i %= num_online_cpus();
193 193
194 if (!cpumask_of_node(numa_node)) { 194 if (numa_node == -1 || !cpumask_of_node(numa_node)) {
195 /* Use all online cpu's for non numa aware system */ 195 /* Use all online cpu's for non numa aware system */
196 cpumask_copy(mask, cpu_online_mask); 196 cpumask_copy(mask, cpu_online_mask);
197 } else { 197 } else {
diff --git a/lib/devres.c b/lib/devres.c
index f562bf6ff71d..6a4aee8a3a7e 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -142,34 +142,6 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
142} 142}
143EXPORT_SYMBOL(devm_ioremap_resource); 143EXPORT_SYMBOL(devm_ioremap_resource);
144 144
145/**
146 * devm_request_and_ioremap() - Check, request region, and ioremap resource
147 * @dev: Generic device to handle the resource for
148 * @res: resource to be handled
149 *
150 * Takes all necessary steps to ioremap a mem resource. Uses managed device, so
151 * everything is undone on driver detach. Checks arguments, so you can feed
152 * it the result from e.g. platform_get_resource() directly. Returns the
153 * remapped pointer or NULL on error. Usage example:
154 *
155 * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
156 * base = devm_request_and_ioremap(&pdev->dev, res);
157 * if (!base)
158 * return -EADDRNOTAVAIL;
159 */
160void __iomem *devm_request_and_ioremap(struct device *dev,
161 struct resource *res)
162{
163 void __iomem *dest_ptr;
164
165 dest_ptr = devm_ioremap_resource(dev, res);
166 if (IS_ERR(dest_ptr))
167 return NULL;
168
169 return dest_ptr;
170}
171EXPORT_SYMBOL(devm_request_and_ioremap);
172
173#ifdef CONFIG_HAS_IOPORT_MAP 145#ifdef CONFIG_HAS_IOPORT_MAP
174/* 146/*
175 * Generic iomap devres 147 * Generic iomap devres
diff --git a/lib/lockref.c b/lib/lockref.c
index f07a40d33871..d2233de9a86e 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -1,6 +1,5 @@
1#include <linux/export.h> 1#include <linux/export.h>
2#include <linux/lockref.h> 2#include <linux/lockref.h>
3#include <linux/mutex.h>
4 3
5#if USE_CMPXCHG_LOCKREF 4#if USE_CMPXCHG_LOCKREF
6 5
@@ -29,7 +28,7 @@
29 if (likely(old.lock_count == prev.lock_count)) { \ 28 if (likely(old.lock_count == prev.lock_count)) { \
30 SUCCESS; \ 29 SUCCESS; \
31 } \ 30 } \
32 arch_mutex_cpu_relax(); \ 31 cpu_relax_lowlatency(); \
33 } \ 32 } \
34} while (0) 33} while (0)
35 34
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 963b7034a51b..fe5a3342e960 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -31,6 +31,11 @@
31 31
32#define PCPU_COUNT_BIAS (1U << 31) 32#define PCPU_COUNT_BIAS (1U << 31)
33 33
34static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref)
35{
36 return (unsigned __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
37}
38
34/** 39/**
35 * percpu_ref_init - initialize a percpu refcount 40 * percpu_ref_init - initialize a percpu refcount
36 * @ref: percpu_ref to initialize 41 * @ref: percpu_ref to initialize
@@ -46,8 +51,8 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
46{ 51{
47 atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS); 52 atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
48 53
49 ref->pcpu_count = alloc_percpu(unsigned); 54 ref->pcpu_count_ptr = (unsigned long)alloc_percpu(unsigned);
50 if (!ref->pcpu_count) 55 if (!ref->pcpu_count_ptr)
51 return -ENOMEM; 56 return -ENOMEM;
52 57
53 ref->release = release; 58 ref->release = release;
@@ -56,53 +61,71 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
56EXPORT_SYMBOL_GPL(percpu_ref_init); 61EXPORT_SYMBOL_GPL(percpu_ref_init);
57 62
58/** 63/**
59 * percpu_ref_cancel_init - cancel percpu_ref_init() 64 * percpu_ref_reinit - re-initialize a percpu refcount
60 * @ref: percpu_ref to cancel init for 65 * @ref: perpcu_ref to re-initialize
61 * 66 *
62 * Once a percpu_ref is initialized, its destruction is initiated by 67 * Re-initialize @ref so that it's in the same state as when it finished
63 * percpu_ref_kill() and completes asynchronously, which can be painful to 68 * percpu_ref_init(). @ref must have been initialized successfully, killed
64 * do when destroying a half-constructed object in init failure path. 69 * and reached 0 but not exited.
65 * 70 *
66 * This function destroys @ref without invoking @ref->release and the 71 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
67 * memory area containing it can be freed immediately on return. To 72 * this function is in progress.
68 * prevent accidental misuse, it's required that @ref has finished
69 * percpu_ref_init(), whether successful or not, but never used.
70 *
71 * The weird name and usage restriction are to prevent people from using
72 * this function by mistake for normal shutdown instead of
73 * percpu_ref_kill().
74 */ 73 */
75void percpu_ref_cancel_init(struct percpu_ref *ref) 74void percpu_ref_reinit(struct percpu_ref *ref)
76{ 75{
77 unsigned __percpu *pcpu_count = ref->pcpu_count; 76 unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
78 int cpu; 77 int cpu;
79 78
80 WARN_ON_ONCE(atomic_read(&ref->count) != 1 + PCPU_COUNT_BIAS); 79 BUG_ON(!pcpu_count);
80 WARN_ON(!percpu_ref_is_zero(ref));
81
82 atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
83
84 /*
85 * Restore per-cpu operation. smp_store_release() is paired with
86 * smp_read_barrier_depends() in __pcpu_ref_alive() and guarantees
87 * that the zeroing is visible to all percpu accesses which can see
88 * the following PCPU_REF_DEAD clearing.
89 */
90 for_each_possible_cpu(cpu)
91 *per_cpu_ptr(pcpu_count, cpu) = 0;
92
93 smp_store_release(&ref->pcpu_count_ptr,
94 ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
95}
96EXPORT_SYMBOL_GPL(percpu_ref_reinit);
97
98/**
99 * percpu_ref_exit - undo percpu_ref_init()
100 * @ref: percpu_ref to exit
101 *
102 * This function exits @ref. The caller is responsible for ensuring that
103 * @ref is no longer in active use. The usual places to invoke this
104 * function from are the @ref->release() callback or in init failure path
105 * where percpu_ref_init() succeeded but other parts of the initialization
106 * of the embedding object failed.
107 */
108void percpu_ref_exit(struct percpu_ref *ref)
109{
110 unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
81 111
82 if (pcpu_count) { 112 if (pcpu_count) {
83 for_each_possible_cpu(cpu) 113 free_percpu(pcpu_count);
84 WARN_ON_ONCE(*per_cpu_ptr(pcpu_count, cpu)); 114 ref->pcpu_count_ptr = PCPU_REF_DEAD;
85 free_percpu(ref->pcpu_count);
86 } 115 }
87} 116}
88EXPORT_SYMBOL_GPL(percpu_ref_cancel_init); 117EXPORT_SYMBOL_GPL(percpu_ref_exit);
89 118
90static void percpu_ref_kill_rcu(struct rcu_head *rcu) 119static void percpu_ref_kill_rcu(struct rcu_head *rcu)
91{ 120{
92 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); 121 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
93 unsigned __percpu *pcpu_count = ref->pcpu_count; 122 unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
94 unsigned count = 0; 123 unsigned count = 0;
95 int cpu; 124 int cpu;
96 125
97 /* Mask out PCPU_REF_DEAD */
98 pcpu_count = (unsigned __percpu *)
99 (((unsigned long) pcpu_count) & ~PCPU_STATUS_MASK);
100
101 for_each_possible_cpu(cpu) 126 for_each_possible_cpu(cpu)
102 count += *per_cpu_ptr(pcpu_count, cpu); 127 count += *per_cpu_ptr(pcpu_count, cpu);
103 128
104 free_percpu(pcpu_count);
105
106 pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count); 129 pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count);
107 130
108 /* 131 /*
@@ -152,11 +175,10 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
152void percpu_ref_kill_and_confirm(struct percpu_ref *ref, 175void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
153 percpu_ref_func_t *confirm_kill) 176 percpu_ref_func_t *confirm_kill)
154{ 177{
155 WARN_ONCE(REF_STATUS(ref->pcpu_count) == PCPU_REF_DEAD, 178 WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD,
156 "percpu_ref_kill() called more than once!\n"); 179 "percpu_ref_kill() called more than once!\n");
157 180
158 ref->pcpu_count = (unsigned __percpu *) 181 ref->pcpu_count_ptr |= PCPU_REF_DEAD;
159 (((unsigned long) ref->pcpu_count)|PCPU_REF_DEAD);
160 ref->confirm_kill = confirm_kill; 182 ref->confirm_kill = confirm_kill;
161 183
162 call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu); 184 call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
new file mode 100644
index 000000000000..86374c1c49a4
--- /dev/null
+++ b/lib/test_firmware.c
@@ -0,0 +1,117 @@
1/*
2 * This module provides an interface to trigger and test firmware loading.
3 *
4 * It is designed to be used for basic evaluation of the firmware loading
5 * subsystem (for example when validating firmware verification). It lacks
6 * any extra dependencies, and will not normally be loaded by the system
7 * unless explicitly requested by name.
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/printk.h>
15#include <linux/firmware.h>
16#include <linux/device.h>
17#include <linux/fs.h>
18#include <linux/miscdevice.h>
19#include <linux/slab.h>
20#include <linux/uaccess.h>
21
22static DEFINE_MUTEX(test_fw_mutex);
23static const struct firmware *test_firmware;
24
25static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
26 size_t size, loff_t *offset)
27{
28 ssize_t rc = 0;
29
30 mutex_lock(&test_fw_mutex);
31 if (test_firmware)
32 rc = simple_read_from_buffer(buf, size, offset,
33 test_firmware->data,
34 test_firmware->size);
35 mutex_unlock(&test_fw_mutex);
36 return rc;
37}
38
39static const struct file_operations test_fw_fops = {
40 .owner = THIS_MODULE,
41 .read = test_fw_misc_read,
42};
43
44static struct miscdevice test_fw_misc_device = {
45 .minor = MISC_DYNAMIC_MINOR,
46 .name = "test_firmware",
47 .fops = &test_fw_fops,
48};
49
50static ssize_t trigger_request_store(struct device *dev,
51 struct device_attribute *attr,
52 const char *buf, size_t count)
53{
54 int rc;
55 char *name;
56
57 name = kzalloc(count + 1, GFP_KERNEL);
58 if (!name)
59 return -ENOSPC;
60 memcpy(name, buf, count);
61
62 pr_info("loading '%s'\n", name);
63
64 mutex_lock(&test_fw_mutex);
65 release_firmware(test_firmware);
66 test_firmware = NULL;
67 rc = request_firmware(&test_firmware, name, dev);
68 if (rc)
69 pr_info("load of '%s' failed: %d\n", name, rc);
70 pr_info("loaded: %zu\n", test_firmware ? test_firmware->size : 0);
71 mutex_unlock(&test_fw_mutex);
72
73 kfree(name);
74
75 return count;
76}
77static DEVICE_ATTR_WO(trigger_request);
78
79static int __init test_firmware_init(void)
80{
81 int rc;
82
83 rc = misc_register(&test_fw_misc_device);
84 if (rc) {
85 pr_err("could not register misc device: %d\n", rc);
86 return rc;
87 }
88 rc = device_create_file(test_fw_misc_device.this_device,
89 &dev_attr_trigger_request);
90 if (rc) {
91 pr_err("could not create sysfs interface: %d\n", rc);
92 goto dereg;
93 }
94
95 pr_warn("interface ready\n");
96
97 return 0;
98dereg:
99 misc_deregister(&test_fw_misc_device);
100 return rc;
101}
102
103module_init(test_firmware_init);
104
105static void __exit test_firmware_exit(void)
106{
107 release_firmware(test_firmware);
108 device_remove_file(test_fw_misc_device.this_device,
109 &dev_attr_trigger_request);
110 misc_deregister(&test_fw_misc_device);
111 pr_warn("removed interface\n");
112}
113
114module_exit(test_firmware_exit);
115
116MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
117MODULE_LICENSE("GPL");