aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig7
-rw-r--r--lib/Kconfig.debug3
-rw-r--r--lib/Kconfig.ubsan29
-rw-r--r--lib/Makefile8
-rw-r--r--lib/div64.c6
-rw-r--r--lib/iomap_copy.c21
-rw-r--r--lib/irq_poll.c222
-rw-r--r--lib/libcrc32c.c2
-rw-r--r--lib/lru_cache.c4
-rw-r--r--lib/ratelimit.c2
-rw-r--r--lib/string_helpers.c63
-rw-r--r--lib/strncpy_from_user.c11
-rw-r--r--lib/strnlen_user.c18
-rw-r--r--lib/test_hexdump.c (renamed from lib/test-hexdump.c)146
-rw-r--r--lib/ubsan.c456
-rw-r--r--lib/ubsan.h84
16 files changed, 1002 insertions, 80 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 5a0c1c83cdf0..133ebc0c1773 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -210,9 +210,11 @@ config RANDOM32_SELFTEST
210# compression support is select'ed if needed 210# compression support is select'ed if needed
211# 211#
212config 842_COMPRESS 212config 842_COMPRESS
213 select CRC32
213 tristate 214 tristate
214 215
215config 842_DECOMPRESS 216config 842_DECOMPRESS
217 select CRC32
216 tristate 218 tristate
217 219
218config ZLIB_INFLATE 220config ZLIB_INFLATE
@@ -475,6 +477,11 @@ config DDR
475 information. This data is useful for drivers handling 477 information. This data is useful for drivers handling
476 DDR SDRAM controllers. 478 DDR SDRAM controllers.
477 479
480config IRQ_POLL
481 bool "IRQ polling library"
482 help
483 Helper library to poll interrupt mitigation using polling.
484
478config MPILIB 485config MPILIB
479 tristate 486 tristate
480 select CLZ_TAB 487 select CLZ_TAB
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index f75a33f29f6e..ecb9e75614bf 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1893,6 +1893,8 @@ source "samples/Kconfig"
1893 1893
1894source "lib/Kconfig.kgdb" 1894source "lib/Kconfig.kgdb"
1895 1895
1896source "lib/Kconfig.ubsan"
1897
1896config ARCH_HAS_DEVMEM_IS_ALLOWED 1898config ARCH_HAS_DEVMEM_IS_ALLOWED
1897 bool 1899 bool
1898 1900
@@ -1919,7 +1921,6 @@ config STRICT_DEVMEM
1919config IO_STRICT_DEVMEM 1921config IO_STRICT_DEVMEM
1920 bool "Filter I/O access to /dev/mem" 1922 bool "Filter I/O access to /dev/mem"
1921 depends on STRICT_DEVMEM 1923 depends on STRICT_DEVMEM
1922 default STRICT_DEVMEM
1923 ---help--- 1924 ---help---
1924 If this option is disabled, you allow userspace (root) access to all 1925 If this option is disabled, you allow userspace (root) access to all
1925 io-memory regardless of whether a driver is actively using that 1926 io-memory regardless of whether a driver is actively using that
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
new file mode 100644
index 000000000000..49518fb48cab
--- /dev/null
+++ b/lib/Kconfig.ubsan
@@ -0,0 +1,29 @@
1config ARCH_HAS_UBSAN_SANITIZE_ALL
2 bool
3
4config UBSAN
5 bool "Undefined behaviour sanity checker"
6 help
7 This option enables undefined behaviour sanity checker
8 Compile-time instrumentation is used to detect various undefined
9 behaviours in runtime. Various types of checks may be enabled
10 via boot parameter ubsan_handle (see: Documentation/ubsan.txt).
11
12config UBSAN_SANITIZE_ALL
13 bool "Enable instrumentation for the entire kernel"
14 depends on UBSAN
15 depends on ARCH_HAS_UBSAN_SANITIZE_ALL
16 default y
17 help
18 This option activates instrumentation for the entire kernel.
19 If you don't enable this option, you have to explicitly specify
20 UBSAN_SANITIZE := y for the files/directories you want to check for UB.
21
22config UBSAN_ALIGNMENT
23 bool "Enable checking of pointers alignment"
24 depends on UBSAN
25 default y if !HAVE_EFFICIENT_UNALIGNED_ACCESS
26 help
27 This option enables detection of unaligned memory accesses.
28 Enabling this option on architectures that support unalligned
29 accesses may produce a lot of false positives.
diff --git a/lib/Makefile b/lib/Makefile
index 180dd4d0dd41..a7c26a41a738 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -31,7 +31,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
31obj-y += string_helpers.o 31obj-y += string_helpers.o
32obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o 32obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
33obj-y += hexdump.o 33obj-y += hexdump.o
34obj-$(CONFIG_TEST_HEXDUMP) += test-hexdump.o 34obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o
35obj-y += kstrtox.o 35obj-y += kstrtox.o
36obj-$(CONFIG_TEST_BPF) += test_bpf.o 36obj-$(CONFIG_TEST_BPF) += test_bpf.o
37obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o 37obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
@@ -154,7 +154,7 @@ obj-$(CONFIG_GLOB) += glob.o
154obj-$(CONFIG_MPILIB) += mpi/ 154obj-$(CONFIG_MPILIB) += mpi/
155obj-$(CONFIG_SIGNATURE) += digsig.o 155obj-$(CONFIG_SIGNATURE) += digsig.o
156 156
157obj-$(CONFIG_CLZ_TAB) += clz_tab.o 157lib-$(CONFIG_CLZ_TAB) += clz_tab.o
158 158
159obj-$(CONFIG_DDR) += jedec_ddr_data.o 159obj-$(CONFIG_DDR) += jedec_ddr_data.o
160 160
@@ -165,6 +165,7 @@ obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o
165 165
166obj-$(CONFIG_SG_SPLIT) += sg_split.o 166obj-$(CONFIG_SG_SPLIT) += sg_split.o
167obj-$(CONFIG_STMP_DEVICE) += stmp_device.o 167obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
168obj-$(CONFIG_IRQ_POLL) += irq_poll.o
168 169
169libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \ 170libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
170 fdt_empty_tree.o 171 fdt_empty_tree.o
@@ -209,3 +210,6 @@ quiet_cmd_build_OID_registry = GEN $@
209clean-files += oid_registry_data.c 210clean-files += oid_registry_data.c
210 211
211obj-$(CONFIG_UCS2_STRING) += ucs2_string.o 212obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
213obj-$(CONFIG_UBSAN) += ubsan.o
214
215UBSAN_SANITIZE_ubsan.o := n
diff --git a/lib/div64.c b/lib/div64.c
index 62a698a432bc..7f345259c32f 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -13,7 +13,8 @@
13 * 13 *
14 * Code generated for this function might be very inefficient 14 * Code generated for this function might be very inefficient
15 * for some CPUs. __div64_32() can be overridden by linking arch-specific 15 * for some CPUs. __div64_32() can be overridden by linking arch-specific
16 * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S. 16 * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S
17 * or by defining a preprocessor macro in arch/include/asm/div64.h.
17 */ 18 */
18 19
19#include <linux/export.h> 20#include <linux/export.h>
@@ -23,6 +24,7 @@
23/* Not needed on 64bit architectures */ 24/* Not needed on 64bit architectures */
24#if BITS_PER_LONG == 32 25#if BITS_PER_LONG == 32
25 26
27#ifndef __div64_32
26uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base) 28uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
27{ 29{
28 uint64_t rem = *n; 30 uint64_t rem = *n;
@@ -55,8 +57,8 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
55 *n = res; 57 *n = res;
56 return rem; 58 return rem;
57} 59}
58
59EXPORT_SYMBOL(__div64_32); 60EXPORT_SYMBOL(__div64_32);
61#endif
60 62
61#ifndef div_s64_rem 63#ifndef div_s64_rem
62s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) 64s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
diff --git a/lib/iomap_copy.c b/lib/iomap_copy.c
index 4527e751b5e0..b8f1d6cbb200 100644
--- a/lib/iomap_copy.c
+++ b/lib/iomap_copy.c
@@ -42,6 +42,27 @@ void __attribute__((weak)) __iowrite32_copy(void __iomem *to,
42EXPORT_SYMBOL_GPL(__iowrite32_copy); 42EXPORT_SYMBOL_GPL(__iowrite32_copy);
43 43
44/** 44/**
45 * __ioread32_copy - copy data from MMIO space, in 32-bit units
46 * @to: destination (must be 32-bit aligned)
47 * @from: source, in MMIO space (must be 32-bit aligned)
48 * @count: number of 32-bit quantities to copy
49 *
50 * Copy data from MMIO space to kernel space, in units of 32 bits at a
51 * time. Order of access is not guaranteed, nor is a memory barrier
52 * performed afterwards.
53 */
54void __ioread32_copy(void *to, const void __iomem *from, size_t count)
55{
56 u32 *dst = to;
57 const u32 __iomem *src = from;
58 const u32 __iomem *end = src + count;
59
60 while (src < end)
61 *dst++ = __raw_readl(src++);
62}
63EXPORT_SYMBOL_GPL(__ioread32_copy);
64
65/**
45 * __iowrite64_copy - copy data to MMIO space, in 64-bit or 32-bit units 66 * __iowrite64_copy - copy data to MMIO space, in 64-bit or 32-bit units
46 * @to: destination, in MMIO space (must be 64-bit aligned) 67 * @to: destination, in MMIO space (must be 64-bit aligned)
47 * @from: source (must be 64-bit aligned) 68 * @from: source (must be 64-bit aligned)
diff --git a/lib/irq_poll.c b/lib/irq_poll.c
new file mode 100644
index 000000000000..836f7db4e548
--- /dev/null
+++ b/lib/irq_poll.c
@@ -0,0 +1,222 @@
1/*
2 * Functions related to interrupt-poll handling in the block layer. This
3 * is similar to NAPI for network devices.
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/bio.h>
9#include <linux/interrupt.h>
10#include <linux/cpu.h>
11#include <linux/irq_poll.h>
12#include <linux/delay.h>
13
14static unsigned int irq_poll_budget __read_mostly = 256;
15
16static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
17
18/**
19 * irq_poll_sched - Schedule a run of the iopoll handler
20 * @iop: The parent iopoll structure
21 *
22 * Description:
23 * Add this irq_poll structure to the pending poll list and trigger the
24 * raise of the blk iopoll softirq.
25 **/
26void irq_poll_sched(struct irq_poll *iop)
27{
28 unsigned long flags;
29
30 if (test_bit(IRQ_POLL_F_DISABLE, &iop->state))
31 return;
32 if (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state))
33 return;
34
35 local_irq_save(flags);
36 list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
37 __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
38 local_irq_restore(flags);
39}
40EXPORT_SYMBOL(irq_poll_sched);
41
42/**
43 * __irq_poll_complete - Mark this @iop as un-polled again
44 * @iop: The parent iopoll structure
45 *
46 * Description:
47 * See irq_poll_complete(). This function must be called with interrupts
48 * disabled.
49 **/
50static void __irq_poll_complete(struct irq_poll *iop)
51{
52 list_del(&iop->list);
53 smp_mb__before_atomic();
54 clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state);
55}
56
57/**
58 * irq_poll_complete - Mark this @iop as un-polled again
59 * @iop: The parent iopoll structure
60 *
61 * Description:
62 * If a driver consumes less than the assigned budget in its run of the
63 * iopoll handler, it'll end the polled mode by calling this function. The
64 * iopoll handler will not be invoked again before irq_poll_sched()
65 * is called.
66 **/
67void irq_poll_complete(struct irq_poll *iop)
68{
69 unsigned long flags;
70
71 local_irq_save(flags);
72 __irq_poll_complete(iop);
73 local_irq_restore(flags);
74}
75EXPORT_SYMBOL(irq_poll_complete);
76
77static void irq_poll_softirq(struct softirq_action *h)
78{
79 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
80 int rearm = 0, budget = irq_poll_budget;
81 unsigned long start_time = jiffies;
82
83 local_irq_disable();
84
85 while (!list_empty(list)) {
86 struct irq_poll *iop;
87 int work, weight;
88
89 /*
90 * If softirq window is exhausted then punt.
91 */
92 if (budget <= 0 || time_after(jiffies, start_time)) {
93 rearm = 1;
94 break;
95 }
96
97 local_irq_enable();
98
99 /* Even though interrupts have been re-enabled, this
100 * access is safe because interrupts can only add new
101 * entries to the tail of this list, and only ->poll()
102 * calls can remove this head entry from the list.
103 */
104 iop = list_entry(list->next, struct irq_poll, list);
105
106 weight = iop->weight;
107 work = 0;
108 if (test_bit(IRQ_POLL_F_SCHED, &iop->state))
109 work = iop->poll(iop, weight);
110
111 budget -= work;
112
113 local_irq_disable();
114
115 /*
116 * Drivers must not modify the iopoll state, if they
117 * consume their assigned weight (or more, some drivers can't
118 * easily just stop processing, they have to complete an
119 * entire mask of commands).In such cases this code
120 * still "owns" the iopoll instance and therefore can
121 * move the instance around on the list at-will.
122 */
123 if (work >= weight) {
124 if (test_bit(IRQ_POLL_F_DISABLE, &iop->state))
125 __irq_poll_complete(iop);
126 else
127 list_move_tail(&iop->list, list);
128 }
129 }
130
131 if (rearm)
132 __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
133
134 local_irq_enable();
135}
136
137/**
138 * irq_poll_disable - Disable iopoll on this @iop
139 * @iop: The parent iopoll structure
140 *
141 * Description:
142 * Disable io polling and wait for any pending callbacks to have completed.
143 **/
144void irq_poll_disable(struct irq_poll *iop)
145{
146 set_bit(IRQ_POLL_F_DISABLE, &iop->state);
147 while (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state))
148 msleep(1);
149 clear_bit(IRQ_POLL_F_DISABLE, &iop->state);
150}
151EXPORT_SYMBOL(irq_poll_disable);
152
153/**
154 * irq_poll_enable - Enable iopoll on this @iop
155 * @iop: The parent iopoll structure
156 *
157 * Description:
158 * Enable iopoll on this @iop. Note that the handler run will not be
159 * scheduled, it will only mark it as active.
160 **/
161void irq_poll_enable(struct irq_poll *iop)
162{
163 BUG_ON(!test_bit(IRQ_POLL_F_SCHED, &iop->state));
164 smp_mb__before_atomic();
165 clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state);
166}
167EXPORT_SYMBOL(irq_poll_enable);
168
169/**
170 * irq_poll_init - Initialize this @iop
171 * @iop: The parent iopoll structure
172 * @weight: The default weight (or command completion budget)
173 * @poll_fn: The handler to invoke
174 *
175 * Description:
176 * Initialize and enable this irq_poll structure.
177 **/
178void irq_poll_init(struct irq_poll *iop, int weight, irq_poll_fn *poll_fn)
179{
180 memset(iop, 0, sizeof(*iop));
181 INIT_LIST_HEAD(&iop->list);
182 iop->weight = weight;
183 iop->poll = poll_fn;
184}
185EXPORT_SYMBOL(irq_poll_init);
186
187static int irq_poll_cpu_notify(struct notifier_block *self,
188 unsigned long action, void *hcpu)
189{
190 /*
191 * If a CPU goes away, splice its entries to the current CPU
192 * and trigger a run of the softirq
193 */
194 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
195 int cpu = (unsigned long) hcpu;
196
197 local_irq_disable();
198 list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
199 this_cpu_ptr(&blk_cpu_iopoll));
200 __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
201 local_irq_enable();
202 }
203
204 return NOTIFY_OK;
205}
206
207static struct notifier_block irq_poll_cpu_notifier = {
208 .notifier_call = irq_poll_cpu_notify,
209};
210
211static __init int irq_poll_setup(void)
212{
213 int i;
214
215 for_each_possible_cpu(i)
216 INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i));
217
218 open_softirq(IRQ_POLL_SOFTIRQ, irq_poll_softirq);
219 register_hotcpu_notifier(&irq_poll_cpu_notifier);
220 return 0;
221}
222subsys_initcall(irq_poll_setup);
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 6a08ce7d6adc..74a54b7f2562 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -36,6 +36,7 @@
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/kernel.h> 37#include <linux/kernel.h>
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/crc32c.h>
39 40
40static struct crypto_shash *tfm; 41static struct crypto_shash *tfm;
41 42
@@ -74,3 +75,4 @@ module_exit(libcrc32c_mod_fini);
74MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>"); 75MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
75MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations"); 76MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations");
76MODULE_LICENSE("GPL"); 77MODULE_LICENSE("GPL");
78MODULE_SOFTDEP("pre: crc32c");
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
index 028f5d996eef..28ba40b99337 100644
--- a/lib/lru_cache.c
+++ b/lib/lru_cache.c
@@ -238,7 +238,7 @@ void lc_reset(struct lru_cache *lc)
238 * @seq: the seq_file to print into 238 * @seq: the seq_file to print into
239 * @lc: the lru cache to print statistics of 239 * @lc: the lru cache to print statistics of
240 */ 240 */
241size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc) 241void lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc)
242{ 242{
243 /* NOTE: 243 /* NOTE:
244 * total calls to lc_get are 244 * total calls to lc_get are
@@ -250,8 +250,6 @@ size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc)
250 seq_printf(seq, "\t%s: used:%u/%u hits:%lu misses:%lu starving:%lu locked:%lu changed:%lu\n", 250 seq_printf(seq, "\t%s: used:%u/%u hits:%lu misses:%lu starving:%lu locked:%lu changed:%lu\n",
251 lc->name, lc->used, lc->nr_elements, 251 lc->name, lc->used, lc->nr_elements,
252 lc->hits, lc->misses, lc->starving, lc->locked, lc->changed); 252 lc->hits, lc->misses, lc->starving, lc->locked, lc->changed);
253
254 return 0;
255} 253}
256 254
257static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr) 255static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr)
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 40e03ea2a967..2c5de86460c5 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -49,7 +49,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
49 if (rs->missed) 49 if (rs->missed)
50 printk(KERN_WARNING "%s: %d callbacks suppressed\n", 50 printk(KERN_WARNING "%s: %d callbacks suppressed\n",
51 func, rs->missed); 51 func, rs->missed);
52 rs->begin = 0; 52 rs->begin = jiffies;
53 rs->printed = 0; 53 rs->printed = 0;
54 rs->missed = 0; 54 rs->missed = 0;
55 } 55 }
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 5939f63d90cd..5c88204b6f1f 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -43,50 +43,73 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
43 [STRING_UNITS_10] = 1000, 43 [STRING_UNITS_10] = 1000,
44 [STRING_UNITS_2] = 1024, 44 [STRING_UNITS_2] = 1024,
45 }; 45 };
46 int i, j; 46 static const unsigned int rounding[] = { 500, 50, 5 };
47 u32 remainder = 0, sf_cap, exp; 47 int i = 0, j;
48 u32 remainder = 0, sf_cap;
48 char tmp[8]; 49 char tmp[8];
49 const char *unit; 50 const char *unit;
50 51
51 tmp[0] = '\0'; 52 tmp[0] = '\0';
52 i = 0; 53
53 if (!size) 54 if (blk_size == 0)
55 size = 0;
56 if (size == 0)
54 goto out; 57 goto out;
55 58
56 while (blk_size >= divisor[units]) { 59 /* This is Napier's algorithm. Reduce the original block size to
57 remainder = do_div(blk_size, divisor[units]); 60 *
61 * coefficient * divisor[units]^i
62 *
63 * we do the reduction so both coefficients are just under 32 bits so
64 * that multiplying them together won't overflow 64 bits and we keep
65 * as much precision as possible in the numbers.
66 *
67 * Note: it's safe to throw away the remainders here because all the
68 * precision is in the coefficients.
69 */
70 while (blk_size >> 32) {
71 do_div(blk_size, divisor[units]);
58 i++; 72 i++;
59 } 73 }
60 74
61 exp = divisor[units] / (u32)blk_size; 75 while (size >> 32) {
62 /* 76 do_div(size, divisor[units]);
63 * size must be strictly greater than exp here to ensure that remainder
64 * is greater than divisor[units] coming out of the if below.
65 */
66 if (size > exp) {
67 remainder = do_div(size, divisor[units]);
68 remainder *= blk_size;
69 i++; 77 i++;
70 } else {
71 remainder *= size;
72 } 78 }
73 79
80 /* now perform the actual multiplication keeping i as the sum of the
81 * two logarithms */
74 size *= blk_size; 82 size *= blk_size;
75 size += remainder / divisor[units];
76 remainder %= divisor[units];
77 83
84 /* and logarithmically reduce it until it's just under the divisor */
78 while (size >= divisor[units]) { 85 while (size >= divisor[units]) {
79 remainder = do_div(size, divisor[units]); 86 remainder = do_div(size, divisor[units]);
80 i++; 87 i++;
81 } 88 }
82 89
90 /* work out in j how many digits of precision we need from the
91 * remainder */
83 sf_cap = size; 92 sf_cap = size;
84 for (j = 0; sf_cap*10 < 1000; j++) 93 for (j = 0; sf_cap*10 < 1000; j++)
85 sf_cap *= 10; 94 sf_cap *= 10;
86 95
87 if (j) { 96 if (units == STRING_UNITS_2) {
97 /* express the remainder as a decimal. It's currently the
98 * numerator of a fraction whose denominator is
99 * divisor[units], which is 1 << 10 for STRING_UNITS_2 */
88 remainder *= 1000; 100 remainder *= 1000;
89 remainder /= divisor[units]; 101 remainder >>= 10;
102 }
103
104 /* add a 5 to the digit below what will be printed to ensure
105 * an arithmetical round up and carry it through to size */
106 remainder += rounding[j];
107 if (remainder >= 1000) {
108 remainder -= 1000;
109 size += 1;
110 }
111
112 if (j) {
90 snprintf(tmp, sizeof(tmp), ".%03u", remainder); 113 snprintf(tmp, sizeof(tmp), ".%03u", remainder);
91 tmp[j+1] = '\0'; 114 tmp[j+1] = '\0';
92 } 115 }
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index e0af6ff73d14..33840324138c 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -39,7 +39,7 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, long
39 unsigned long c, data; 39 unsigned long c, data;
40 40
41 /* Fall back to byte-at-a-time if we get a page fault */ 41 /* Fall back to byte-at-a-time if we get a page fault */
42 if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) 42 if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res))))
43 break; 43 break;
44 *(unsigned long *)(dst+res) = c; 44 *(unsigned long *)(dst+res) = c;
45 if (has_zero(c, &data, &constants)) { 45 if (has_zero(c, &data, &constants)) {
@@ -55,7 +55,7 @@ byte_at_a_time:
55 while (max) { 55 while (max) {
56 char c; 56 char c;
57 57
58 if (unlikely(__get_user(c,src+res))) 58 if (unlikely(unsafe_get_user(c,src+res)))
59 return -EFAULT; 59 return -EFAULT;
60 dst[res] = c; 60 dst[res] = c;
61 if (!c) 61 if (!c)
@@ -107,7 +107,12 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
107 src_addr = (unsigned long)src; 107 src_addr = (unsigned long)src;
108 if (likely(src_addr < max_addr)) { 108 if (likely(src_addr < max_addr)) {
109 unsigned long max = max_addr - src_addr; 109 unsigned long max = max_addr - src_addr;
110 return do_strncpy_from_user(dst, src, count, max); 110 long retval;
111
112 user_access_begin();
113 retval = do_strncpy_from_user(dst, src, count, max);
114 user_access_end();
115 return retval;
111 } 116 }
112 return -EFAULT; 117 return -EFAULT;
113} 118}
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 3a5f2b366d84..2625943625d7 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -45,7 +45,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
45 src -= align; 45 src -= align;
46 max += align; 46 max += align;
47 47
48 if (unlikely(__get_user(c,(unsigned long __user *)src))) 48 if (unlikely(unsafe_get_user(c,(unsigned long __user *)src)))
49 return 0; 49 return 0;
50 c |= aligned_byte_mask(align); 50 c |= aligned_byte_mask(align);
51 51
@@ -61,7 +61,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
61 if (unlikely(max <= sizeof(unsigned long))) 61 if (unlikely(max <= sizeof(unsigned long)))
62 break; 62 break;
63 max -= sizeof(unsigned long); 63 max -= sizeof(unsigned long);
64 if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) 64 if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res))))
65 return 0; 65 return 0;
66 } 66 }
67 res -= align; 67 res -= align;
@@ -112,7 +112,12 @@ long strnlen_user(const char __user *str, long count)
112 src_addr = (unsigned long)str; 112 src_addr = (unsigned long)str;
113 if (likely(src_addr < max_addr)) { 113 if (likely(src_addr < max_addr)) {
114 unsigned long max = max_addr - src_addr; 114 unsigned long max = max_addr - src_addr;
115 return do_strnlen_user(str, count, max); 115 long retval;
116
117 user_access_begin();
118 retval = do_strnlen_user(str, count, max);
119 user_access_end();
120 return retval;
116 } 121 }
117 return 0; 122 return 0;
118} 123}
@@ -141,7 +146,12 @@ long strlen_user(const char __user *str)
141 src_addr = (unsigned long)str; 146 src_addr = (unsigned long)str;
142 if (likely(src_addr < max_addr)) { 147 if (likely(src_addr < max_addr)) {
143 unsigned long max = max_addr - src_addr; 148 unsigned long max = max_addr - src_addr;
144 return do_strnlen_user(str, ~0ul, max); 149 long retval;
150
151 user_access_begin();
152 retval = do_strnlen_user(str, ~0ul, max);
153 user_access_end();
154 return retval;
145 } 155 }
146 return 0; 156 return 0;
147} 157}
diff --git a/lib/test-hexdump.c b/lib/test_hexdump.c
index 5241df36eedf..3f415d8101f3 100644
--- a/lib/test-hexdump.c
+++ b/lib/test_hexdump.c
@@ -42,19 +42,21 @@ static const char * const test_data_8_le[] __initconst = {
42 "e9ac0f9cad319ca6", "0cafb1439919d14c", 42 "e9ac0f9cad319ca6", "0cafb1439919d14c",
43}; 43};
44 44
45static void __init test_hexdump(size_t len, int rowsize, int groupsize, 45#define FILL_CHAR '#'
46 bool ascii) 46
47static unsigned total_tests __initdata;
48static unsigned failed_tests __initdata;
49
50static void __init test_hexdump_prepare_test(size_t len, int rowsize,
51 int groupsize, char *test,
52 size_t testlen, bool ascii)
47{ 53{
48 char test[32 * 3 + 2 + 32 + 1];
49 char real[32 * 3 + 2 + 32 + 1];
50 char *p; 54 char *p;
51 const char * const *result; 55 const char * const *result;
52 size_t l = len; 56 size_t l = len;
53 int gs = groupsize, rs = rowsize; 57 int gs = groupsize, rs = rowsize;
54 unsigned int i; 58 unsigned int i;
55 59
56 hex_dump_to_buffer(data_b, l, rs, gs, real, sizeof(real), ascii);
57
58 if (rs != 16 && rs != 32) 60 if (rs != 16 && rs != 32)
59 rs = 16; 61 rs = 16;
60 62
@@ -73,8 +75,6 @@ static void __init test_hexdump(size_t len, int rowsize, int groupsize,
73 else 75 else
74 result = test_data_1_le; 76 result = test_data_1_le;
75 77
76 memset(test, ' ', sizeof(test));
77
78 /* hex dump */ 78 /* hex dump */
79 p = test; 79 p = test;
80 for (i = 0; i < l / gs; i++) { 80 for (i = 0; i < l / gs; i++) {
@@ -82,24 +82,49 @@ static void __init test_hexdump(size_t len, int rowsize, int groupsize,
82 size_t amount = strlen(q); 82 size_t amount = strlen(q);
83 83
84 strncpy(p, q, amount); 84 strncpy(p, q, amount);
85 p += amount + 1; 85 p += amount;
86
87 *p++ = ' ';
86 } 88 }
87 if (i) 89 if (i)
88 p--; 90 p--;
89 91
90 /* ASCII part */ 92 /* ASCII part */
91 if (ascii) { 93 if (ascii) {
92 p = test + rs * 2 + rs / gs + 1; 94 do {
95 *p++ = ' ';
96 } while (p < test + rs * 2 + rs / gs + 1);
97
93 strncpy(p, data_a, l); 98 strncpy(p, data_a, l);
94 p += l; 99 p += l;
95 } 100 }
96 101
97 *p = '\0'; 102 *p = '\0';
103}
98 104
99 if (strcmp(test, real)) { 105#define TEST_HEXDUMP_BUF_SIZE (32 * 3 + 2 + 32 + 1)
106
107static void __init test_hexdump(size_t len, int rowsize, int groupsize,
108 bool ascii)
109{
110 char test[TEST_HEXDUMP_BUF_SIZE];
111 char real[TEST_HEXDUMP_BUF_SIZE];
112
113 total_tests++;
114
115 memset(real, FILL_CHAR, sizeof(real));
116 hex_dump_to_buffer(data_b, len, rowsize, groupsize, real, sizeof(real),
117 ascii);
118
119 memset(test, FILL_CHAR, sizeof(test));
120 test_hexdump_prepare_test(len, rowsize, groupsize, test, sizeof(test),
121 ascii);
122
123 if (memcmp(test, real, TEST_HEXDUMP_BUF_SIZE)) {
100 pr_err("Len: %zu row: %d group: %d\n", len, rowsize, groupsize); 124 pr_err("Len: %zu row: %d group: %d\n", len, rowsize, groupsize);
101 pr_err("Result: '%s'\n", real); 125 pr_err("Result: '%s'\n", real);
102 pr_err("Expect: '%s'\n", test); 126 pr_err("Expect: '%s'\n", test);
127 failed_tests++;
103 } 128 }
104} 129}
105 130
@@ -114,52 +139,72 @@ static void __init test_hexdump_set(int rowsize, bool ascii)
114 test_hexdump(len, rowsize, 1, ascii); 139 test_hexdump(len, rowsize, 1, ascii);
115} 140}
116 141
117static void __init test_hexdump_overflow(bool ascii) 142static void __init test_hexdump_overflow(size_t buflen, size_t len,
143 int rowsize, int groupsize,
144 bool ascii)
118{ 145{
119 char buf[56]; 146 char test[TEST_HEXDUMP_BUF_SIZE];
120 const char *t = test_data_1_le[0]; 147 char buf[TEST_HEXDUMP_BUF_SIZE];
121 size_t l = get_random_int() % sizeof(buf); 148 int rs = rowsize, gs = groupsize;
149 int ae, he, e, f, r;
122 bool a; 150 bool a;
123 int e, r;
124 151
125 memset(buf, ' ', sizeof(buf)); 152 total_tests++;
153
154 memset(buf, FILL_CHAR, sizeof(buf));
126 155
127 r = hex_dump_to_buffer(data_b, 1, 16, 1, buf, l, ascii); 156 r = hex_dump_to_buffer(data_b, len, rs, gs, buf, buflen, ascii);
157
158 /*
159 * Caller must provide the data length multiple of groupsize. The
160 * calculations below are made with that assumption in mind.
161 */
162 ae = rs * 2 /* hex */ + rs / gs /* spaces */ + 1 /* space */ + len /* ascii */;
163 he = (gs * 2 /* hex */ + 1 /* space */) * len / gs - 1 /* no trailing space */;
128 164
129 if (ascii) 165 if (ascii)
130 e = 50; 166 e = ae;
131 else 167 else
132 e = 2; 168 e = he;
133 buf[e + 2] = '\0'; 169
134 170 f = min_t(int, e + 1, buflen);
135 if (!l) { 171 if (buflen) {
136 a = r == e && buf[0] == ' '; 172 test_hexdump_prepare_test(len, rs, gs, test, sizeof(test), ascii);
137 } else if (l < 3) { 173 test[f - 1] = '\0';
138 a = r == e && buf[0] == '\0';
139 } else if (l < 4) {
140 a = r == e && !strcmp(buf, t);
141 } else if (ascii) {
142 if (l < 51)
143 a = r == e && buf[l - 1] == '\0' && buf[l - 2] == ' ';
144 else
145 a = r == e && buf[50] == '\0' && buf[49] == '.';
146 } else {
147 a = r == e && buf[e] == '\0';
148 } 174 }
175 memset(test + f, FILL_CHAR, sizeof(test) - f);
176
177 a = r == e && !memcmp(test, buf, TEST_HEXDUMP_BUF_SIZE);
178
179 buf[sizeof(buf) - 1] = '\0';
149 180
150 if (!a) { 181 if (!a) {
151 pr_err("Len: %zu rc: %u strlen: %zu\n", l, r, strlen(buf)); 182 pr_err("Len: %zu buflen: %zu strlen: %zu\n",
152 pr_err("Result: '%s'\n", buf); 183 len, buflen, strnlen(buf, sizeof(buf)));
184 pr_err("Result: %d '%s'\n", r, buf);
185 pr_err("Expect: %d '%s'\n", e, test);
186 failed_tests++;
153 } 187 }
154} 188}
155 189
190static void __init test_hexdump_overflow_set(size_t buflen, bool ascii)
191{
192 unsigned int i = 0;
193 int rs = (get_random_int() % 2 + 1) * 16;
194
195 do {
196 int gs = 1 << i;
197 size_t len = get_random_int() % rs + gs;
198
199 test_hexdump_overflow(buflen, rounddown(len, gs), rs, gs, ascii);
200 } while (i++ < 3);
201}
202
156static int __init test_hexdump_init(void) 203static int __init test_hexdump_init(void)
157{ 204{
158 unsigned int i; 205 unsigned int i;
159 int rowsize; 206 int rowsize;
160 207
161 pr_info("Running tests...\n");
162
163 rowsize = (get_random_int() % 2 + 1) * 16; 208 rowsize = (get_random_int() % 2 + 1) * 16;
164 for (i = 0; i < 16; i++) 209 for (i = 0; i < 16; i++)
165 test_hexdump_set(rowsize, false); 210 test_hexdump_set(rowsize, false);
@@ -168,13 +213,26 @@ static int __init test_hexdump_init(void)
168 for (i = 0; i < 16; i++) 213 for (i = 0; i < 16; i++)
169 test_hexdump_set(rowsize, true); 214 test_hexdump_set(rowsize, true);
170 215
171 for (i = 0; i < 16; i++) 216 for (i = 0; i <= TEST_HEXDUMP_BUF_SIZE; i++)
172 test_hexdump_overflow(false); 217 test_hexdump_overflow_set(i, false);
173 218
174 for (i = 0; i < 16; i++) 219 for (i = 0; i <= TEST_HEXDUMP_BUF_SIZE; i++)
175 test_hexdump_overflow(true); 220 test_hexdump_overflow_set(i, true);
221
222 if (failed_tests == 0)
223 pr_info("all %u tests passed\n", total_tests);
224 else
225 pr_err("failed %u out of %u tests\n", failed_tests, total_tests);
176 226
177 return -EINVAL; 227 return failed_tests ? -EINVAL : 0;
178} 228}
179module_init(test_hexdump_init); 229module_init(test_hexdump_init);
230
231static void __exit test_hexdump_exit(void)
232{
233 /* do nothing */
234}
235module_exit(test_hexdump_exit);
236
237MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
180MODULE_LICENSE("Dual BSD/GPL"); 238MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/ubsan.c b/lib/ubsan.c
new file mode 100644
index 000000000000..8799ae5e2e42
--- /dev/null
+++ b/lib/ubsan.c
@@ -0,0 +1,456 @@
1/*
2 * UBSAN error reporting functions
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12
13#include <linux/bitops.h>
14#include <linux/bug.h>
15#include <linux/ctype.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/sched.h>
20
21#include "ubsan.h"
22
23const char *type_check_kinds[] = {
24 "load of",
25 "store to",
26 "reference binding to",
27 "member access within",
28 "member call on",
29 "constructor call on",
30 "downcast of",
31 "downcast of"
32};
33
34#define REPORTED_BIT 31
35
36#if (BITS_PER_LONG == 64) && defined(__BIG_ENDIAN)
37#define COLUMN_MASK (~(1U << REPORTED_BIT))
38#define LINE_MASK (~0U)
39#else
40#define COLUMN_MASK (~0U)
41#define LINE_MASK (~(1U << REPORTED_BIT))
42#endif
43
44#define VALUE_LENGTH 40
45
46static bool was_reported(struct source_location *location)
47{
48 return test_and_set_bit(REPORTED_BIT, &location->reported);
49}
50
51static void print_source_location(const char *prefix,
52 struct source_location *loc)
53{
54 pr_err("%s %s:%d:%d\n", prefix, loc->file_name,
55 loc->line & LINE_MASK, loc->column & COLUMN_MASK);
56}
57
58static bool suppress_report(struct source_location *loc)
59{
60 return current->in_ubsan || was_reported(loc);
61}
62
63static bool type_is_int(struct type_descriptor *type)
64{
65 return type->type_kind == type_kind_int;
66}
67
68static bool type_is_signed(struct type_descriptor *type)
69{
70 WARN_ON(!type_is_int(type));
71 return type->type_info & 1;
72}
73
74static unsigned type_bit_width(struct type_descriptor *type)
75{
76 return 1 << (type->type_info >> 1);
77}
78
79static bool is_inline_int(struct type_descriptor *type)
80{
81 unsigned inline_bits = sizeof(unsigned long)*8;
82 unsigned bits = type_bit_width(type);
83
84 WARN_ON(!type_is_int(type));
85
86 return bits <= inline_bits;
87}
88
89static s_max get_signed_val(struct type_descriptor *type, unsigned long val)
90{
91 if (is_inline_int(type)) {
92 unsigned extra_bits = sizeof(s_max)*8 - type_bit_width(type);
93 return ((s_max)val) << extra_bits >> extra_bits;
94 }
95
96 if (type_bit_width(type) == 64)
97 return *(s64 *)val;
98
99 return *(s_max *)val;
100}
101
102static bool val_is_negative(struct type_descriptor *type, unsigned long val)
103{
104 return type_is_signed(type) && get_signed_val(type, val) < 0;
105}
106
107static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val)
108{
109 if (is_inline_int(type))
110 return val;
111
112 if (type_bit_width(type) == 64)
113 return *(u64 *)val;
114
115 return *(u_max *)val;
116}
117
118static void val_to_string(char *str, size_t size, struct type_descriptor *type,
119 unsigned long value)
120{
121 if (type_is_int(type)) {
122 if (type_bit_width(type) == 128) {
123#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
124 u_max val = get_unsigned_val(type, value);
125
126 scnprintf(str, size, "0x%08x%08x%08x%08x",
127 (u32)(val >> 96),
128 (u32)(val >> 64),
129 (u32)(val >> 32),
130 (u32)(val));
131#else
132 WARN_ON(1);
133#endif
134 } else if (type_is_signed(type)) {
135 scnprintf(str, size, "%lld",
136 (s64)get_signed_val(type, value));
137 } else {
138 scnprintf(str, size, "%llu",
139 (u64)get_unsigned_val(type, value));
140 }
141 }
142}
143
144static bool location_is_valid(struct source_location *loc)
145{
146 return loc->file_name != NULL;
147}
148
149static DEFINE_SPINLOCK(report_lock);
150
151static void ubsan_prologue(struct source_location *location,
152 unsigned long *flags)
153{
154 current->in_ubsan++;
155 spin_lock_irqsave(&report_lock, *flags);
156
157 pr_err("========================================"
158 "========================================\n");
159 print_source_location("UBSAN: Undefined behaviour in", location);
160}
161
162static void ubsan_epilogue(unsigned long *flags)
163{
164 dump_stack();
165 pr_err("========================================"
166 "========================================\n");
167 spin_unlock_irqrestore(&report_lock, *flags);
168 current->in_ubsan--;
169}
170
171static void handle_overflow(struct overflow_data *data, unsigned long lhs,
172 unsigned long rhs, char op)
173{
174
175 struct type_descriptor *type = data->type;
176 unsigned long flags;
177 char lhs_val_str[VALUE_LENGTH];
178 char rhs_val_str[VALUE_LENGTH];
179
180 if (suppress_report(&data->location))
181 return;
182
183 ubsan_prologue(&data->location, &flags);
184
185 val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs);
186 val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs);
187 pr_err("%s integer overflow:\n",
188 type_is_signed(type) ? "signed" : "unsigned");
189 pr_err("%s %c %s cannot be represented in type %s\n",
190 lhs_val_str,
191 op,
192 rhs_val_str,
193 type->type_name);
194
195 ubsan_epilogue(&flags);
196}
197
198void __ubsan_handle_add_overflow(struct overflow_data *data,
199 unsigned long lhs,
200 unsigned long rhs)
201{
202
203 handle_overflow(data, lhs, rhs, '+');
204}
205EXPORT_SYMBOL(__ubsan_handle_add_overflow);
206
207void __ubsan_handle_sub_overflow(struct overflow_data *data,
208 unsigned long lhs,
209 unsigned long rhs)
210{
211 handle_overflow(data, lhs, rhs, '-');
212}
213EXPORT_SYMBOL(__ubsan_handle_sub_overflow);
214
215void __ubsan_handle_mul_overflow(struct overflow_data *data,
216 unsigned long lhs,
217 unsigned long rhs)
218{
219 handle_overflow(data, lhs, rhs, '*');
220}
221EXPORT_SYMBOL(__ubsan_handle_mul_overflow);
222
223void __ubsan_handle_negate_overflow(struct overflow_data *data,
224 unsigned long old_val)
225{
226 unsigned long flags;
227 char old_val_str[VALUE_LENGTH];
228
229 if (suppress_report(&data->location))
230 return;
231
232 ubsan_prologue(&data->location, &flags);
233
234 val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val);
235
236 pr_err("negation of %s cannot be represented in type %s:\n",
237 old_val_str, data->type->type_name);
238
239 ubsan_epilogue(&flags);
240}
241EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
242
243
244void __ubsan_handle_divrem_overflow(struct overflow_data *data,
245 unsigned long lhs,
246 unsigned long rhs)
247{
248 unsigned long flags;
249 char rhs_val_str[VALUE_LENGTH];
250
251 if (suppress_report(&data->location))
252 return;
253
254 ubsan_prologue(&data->location, &flags);
255
256 val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs);
257
258 if (type_is_signed(data->type) && get_signed_val(data->type, rhs) == -1)
259 pr_err("division of %s by -1 cannot be represented in type %s\n",
260 rhs_val_str, data->type->type_name);
261 else
262 pr_err("division by zero\n");
263
264 ubsan_epilogue(&flags);
265}
266EXPORT_SYMBOL(__ubsan_handle_divrem_overflow);
267
268static void handle_null_ptr_deref(struct type_mismatch_data *data)
269{
270 unsigned long flags;
271
272 if (suppress_report(&data->location))
273 return;
274
275 ubsan_prologue(&data->location, &flags);
276
277 pr_err("%s null pointer of type %s\n",
278 type_check_kinds[data->type_check_kind],
279 data->type->type_name);
280
281 ubsan_epilogue(&flags);
282}
283
284static void handle_missaligned_access(struct type_mismatch_data *data,
285 unsigned long ptr)
286{
287 unsigned long flags;
288
289 if (suppress_report(&data->location))
290 return;
291
292 ubsan_prologue(&data->location, &flags);
293
294 pr_err("%s misaligned address %p for type %s\n",
295 type_check_kinds[data->type_check_kind],
296 (void *)ptr, data->type->type_name);
297 pr_err("which requires %ld byte alignment\n", data->alignment);
298
299 ubsan_epilogue(&flags);
300}
301
302static void handle_object_size_mismatch(struct type_mismatch_data *data,
303 unsigned long ptr)
304{
305 unsigned long flags;
306
307 if (suppress_report(&data->location))
308 return;
309
310 ubsan_prologue(&data->location, &flags);
311 pr_err("%s address %pk with insufficient space\n",
312 type_check_kinds[data->type_check_kind],
313 (void *) ptr);
314 pr_err("for an object of type %s\n", data->type->type_name);
315 ubsan_epilogue(&flags);
316}
317
318void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
319 unsigned long ptr)
320{
321
322 if (!ptr)
323 handle_null_ptr_deref(data);
324 else if (data->alignment && !IS_ALIGNED(ptr, data->alignment))
325 handle_missaligned_access(data, ptr);
326 else
327 handle_object_size_mismatch(data, ptr);
328}
329EXPORT_SYMBOL(__ubsan_handle_type_mismatch);
330
331void __ubsan_handle_nonnull_return(struct nonnull_return_data *data)
332{
333 unsigned long flags;
334
335 if (suppress_report(&data->location))
336 return;
337
338 ubsan_prologue(&data->location, &flags);
339
340 pr_err("null pointer returned from function declared to never return null\n");
341
342 if (location_is_valid(&data->attr_location))
343 print_source_location("returns_nonnull attribute specified in",
344 &data->attr_location);
345
346 ubsan_epilogue(&flags);
347}
348EXPORT_SYMBOL(__ubsan_handle_nonnull_return);
349
350void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data,
351 unsigned long bound)
352{
353 unsigned long flags;
354 char bound_str[VALUE_LENGTH];
355
356 if (suppress_report(&data->location))
357 return;
358
359 ubsan_prologue(&data->location, &flags);
360
361 val_to_string(bound_str, sizeof(bound_str), data->type, bound);
362 pr_err("variable length array bound value %s <= 0\n", bound_str);
363
364 ubsan_epilogue(&flags);
365}
366EXPORT_SYMBOL(__ubsan_handle_vla_bound_not_positive);
367
368void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data,
369 unsigned long index)
370{
371 unsigned long flags;
372 char index_str[VALUE_LENGTH];
373
374 if (suppress_report(&data->location))
375 return;
376
377 ubsan_prologue(&data->location, &flags);
378
379 val_to_string(index_str, sizeof(index_str), data->index_type, index);
380 pr_err("index %s is out of range for type %s\n", index_str,
381 data->array_type->type_name);
382 ubsan_epilogue(&flags);
383}
384EXPORT_SYMBOL(__ubsan_handle_out_of_bounds);
385
386void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
387 unsigned long lhs, unsigned long rhs)
388{
389 unsigned long flags;
390 struct type_descriptor *rhs_type = data->rhs_type;
391 struct type_descriptor *lhs_type = data->lhs_type;
392 char rhs_str[VALUE_LENGTH];
393 char lhs_str[VALUE_LENGTH];
394
395 if (suppress_report(&data->location))
396 return;
397
398 ubsan_prologue(&data->location, &flags);
399
400 val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs);
401 val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs);
402
403 if (val_is_negative(rhs_type, rhs))
404 pr_err("shift exponent %s is negative\n", rhs_str);
405
406 else if (get_unsigned_val(rhs_type, rhs) >=
407 type_bit_width(lhs_type))
408 pr_err("shift exponent %s is too large for %u-bit type %s\n",
409 rhs_str,
410 type_bit_width(lhs_type),
411 lhs_type->type_name);
412 else if (val_is_negative(lhs_type, lhs))
413 pr_err("left shift of negative value %s\n",
414 lhs_str);
415 else
416 pr_err("left shift of %s by %s places cannot be"
417 " represented in type %s\n",
418 lhs_str, rhs_str,
419 lhs_type->type_name);
420
421 ubsan_epilogue(&flags);
422}
423EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds);
424
425
426void __noreturn
427__ubsan_handle_builtin_unreachable(struct unreachable_data *data)
428{
429 unsigned long flags;
430
431 ubsan_prologue(&data->location, &flags);
432 pr_err("calling __builtin_unreachable()\n");
433 ubsan_epilogue(&flags);
434 panic("can't return from __builtin_unreachable()");
435}
436EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable);
437
438void __ubsan_handle_load_invalid_value(struct invalid_value_data *data,
439 unsigned long val)
440{
441 unsigned long flags;
442 char val_str[VALUE_LENGTH];
443
444 if (suppress_report(&data->location))
445 return;
446
447 ubsan_prologue(&data->location, &flags);
448
449 val_to_string(val_str, sizeof(val_str), data->type, val);
450
451 pr_err("load of value %s is not a valid value for type %s\n",
452 val_str, data->type->type_name);
453
454 ubsan_epilogue(&flags);
455}
456EXPORT_SYMBOL(__ubsan_handle_load_invalid_value);
diff --git a/lib/ubsan.h b/lib/ubsan.h
new file mode 100644
index 000000000000..b2d18d4a53f5
--- /dev/null
+++ b/lib/ubsan.h
@@ -0,0 +1,84 @@
1#ifndef _LIB_UBSAN_H
2#define _LIB_UBSAN_H
3
4enum {
5 type_kind_int = 0,
6 type_kind_float = 1,
7 type_unknown = 0xffff
8};
9
10struct type_descriptor {
11 u16 type_kind;
12 u16 type_info;
13 char type_name[1];
14};
15
16struct source_location {
17 const char *file_name;
18 union {
19 unsigned long reported;
20 struct {
21 u32 line;
22 u32 column;
23 };
24 };
25};
26
27struct overflow_data {
28 struct source_location location;
29 struct type_descriptor *type;
30};
31
32struct type_mismatch_data {
33 struct source_location location;
34 struct type_descriptor *type;
35 unsigned long alignment;
36 unsigned char type_check_kind;
37};
38
39struct nonnull_arg_data {
40 struct source_location location;
41 struct source_location attr_location;
42 int arg_index;
43};
44
45struct nonnull_return_data {
46 struct source_location location;
47 struct source_location attr_location;
48};
49
50struct vla_bound_data {
51 struct source_location location;
52 struct type_descriptor *type;
53};
54
55struct out_of_bounds_data {
56 struct source_location location;
57 struct type_descriptor *array_type;
58 struct type_descriptor *index_type;
59};
60
61struct shift_out_of_bounds_data {
62 struct source_location location;
63 struct type_descriptor *lhs_type;
64 struct type_descriptor *rhs_type;
65};
66
67struct unreachable_data {
68 struct source_location location;
69};
70
71struct invalid_value_data {
72 struct source_location location;
73 struct type_descriptor *type;
74};
75
76#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
77typedef __int128 s_max;
78typedef unsigned __int128 u_max;
79#else
80typedef s64 s_max;
81typedef u64 u_max;
82#endif
83
84#endif