aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug34
-rw-r--r--lib/Makefile4
-rw-r--r--lib/atomic64.c11
-rw-r--r--lib/bitmap.c12
-rw-r--r--lib/checksum.c10
-rw-r--r--lib/decompress_bunzip2.c24
-rw-r--r--lib/decompress_inflate.c10
-rw-r--r--lib/decompress_unlzma.c23
-rw-r--r--lib/dma-debug.c203
-rw-r--r--lib/dynamic_debug.c2
-rw-r--r--lib/flex_array.c268
-rw-r--r--lib/gcd.c18
-rw-r--r--lib/is_single_threaded.c61
-rw-r--r--lib/lmb.c2
-rw-r--r--lib/scatterlist.c16
15 files changed, 564 insertions, 134 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6b0c2d8a2129..fbb87cf138c5 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -340,8 +340,6 @@ config DEBUG_KMEMLEAK
340 bool "Kernel memory leak detector" 340 bool "Kernel memory leak detector"
341 depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM) && \ 341 depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM) && \
342 !MEMORY_HOTPLUG 342 !MEMORY_HOTPLUG
343 select DEBUG_SLAB if SLAB
344 select SLUB_DEBUG if SLUB
345 select DEBUG_FS if SYSFS 343 select DEBUG_FS if SYSFS
346 select STACKTRACE if STACKTRACE_SUPPORT 344 select STACKTRACE if STACKTRACE_SUPPORT
347 select KALLSYMS 345 select KALLSYMS
@@ -355,9 +353,24 @@ config DEBUG_KMEMLEAK
355 allocations. See Documentation/kmemleak.txt for more 353 allocations. See Documentation/kmemleak.txt for more
356 details. 354 details.
357 355
356 Enabling DEBUG_SLAB or SLUB_DEBUG may increase the chances
357 of finding leaks due to the slab objects poisoning.
358
358 In order to access the kmemleak file, debugfs needs to be 359 In order to access the kmemleak file, debugfs needs to be
359 mounted (usually at /sys/kernel/debug). 360 mounted (usually at /sys/kernel/debug).
360 361
362config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
363 int "Maximum kmemleak early log entries"
364 depends on DEBUG_KMEMLEAK
365 range 200 2000
366 default 400
367 help
368 Kmemleak must track all the memory allocations to avoid
369 reporting false positives. Since memory may be allocated or
370 freed before kmemleak is initialised, an early log buffer is
371 used to store these actions. If kmemleak reports "early log
372 buffer exceeded", please increase this value.
373
361config DEBUG_KMEMLEAK_TEST 374config DEBUG_KMEMLEAK_TEST
362 tristate "Simple test for the kernel memory leak detector" 375 tristate "Simple test for the kernel memory leak detector"
363 depends on DEBUG_KMEMLEAK 376 depends on DEBUG_KMEMLEAK
@@ -472,7 +485,7 @@ config LOCKDEP
472 bool 485 bool
473 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 486 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
474 select STACKTRACE 487 select STACKTRACE
475 select FRAME_POINTER if !X86 && !MIPS && !PPC && !ARM_UNWIND && !S390 488 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390
476 select KALLSYMS 489 select KALLSYMS
477 select KALLSYMS_ALL 490 select KALLSYMS_ALL
478 491
@@ -640,6 +653,21 @@ config DEBUG_NOTIFIERS
640 This is a relatively cheap check but if you care about maximum 653 This is a relatively cheap check but if you care about maximum
641 performance, say N. 654 performance, say N.
642 655
656config DEBUG_CREDENTIALS
657 bool "Debug credential management"
658 depends on DEBUG_KERNEL
659 help
660 Enable this to turn on some debug checking for credential
661 management. The additional code keeps track of the number of
662 pointers from task_structs to any given cred struct, and checks to
663 see that this number never exceeds the usage count of the cred
664 struct.
665
666 Furthermore, if SELinux is enabled, this also checks that the
667 security pointer in the cred struct is never seen to be invalid.
668
669 If unsure, say N.
670
643# 671#
644# Select this config option from the architecture Kconfig, if it 672# Select this config option from the architecture Kconfig, if it
645# it is preferred to always offer frame pointers as a config 673# it is preferred to always offer frame pointers as a config
diff --git a/lib/Makefile b/lib/Makefile
index 8e9bcf9d3261..2e78277eff9d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -12,7 +12,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
12 idr.o int_sqrt.o extable.o prio_tree.o \ 12 idr.o int_sqrt.o extable.o prio_tree.o \
13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o prio_heap.o ratelimit.o show_mem.o \ 14 proportions.o prio_heap.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o decompress.o 15 is_single_threaded.o plist.o decompress.o flex_array.o
16 16
17lib-$(CONFIG_MMU) += ioremap.o 17lib-$(CONFIG_MMU) += ioremap.o
18lib-$(CONFIG_SMP) += cpumask.o 18lib-$(CONFIG_SMP) += cpumask.o
@@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o
21 21
22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
24 string_helpers.o 24 string_helpers.o gcd.o
25 25
26ifeq ($(CONFIG_DEBUG_KOBJECT),y) 26ifeq ($(CONFIG_DEBUG_KOBJECT),y)
27CFLAGS_kobject.o += -DDEBUG 27CFLAGS_kobject.o += -DDEBUG
diff --git a/lib/atomic64.c b/lib/atomic64.c
index c5e725562416..8bee16ec7524 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -13,6 +13,7 @@
13#include <linux/cache.h> 13#include <linux/cache.h>
14#include <linux/spinlock.h> 14#include <linux/spinlock.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/module.h>
16#include <asm/atomic.h> 17#include <asm/atomic.h>
17 18
18/* 19/*
@@ -52,6 +53,7 @@ long long atomic64_read(const atomic64_t *v)
52 spin_unlock_irqrestore(lock, flags); 53 spin_unlock_irqrestore(lock, flags);
53 return val; 54 return val;
54} 55}
56EXPORT_SYMBOL(atomic64_read);
55 57
56void atomic64_set(atomic64_t *v, long long i) 58void atomic64_set(atomic64_t *v, long long i)
57{ 59{
@@ -62,6 +64,7 @@ void atomic64_set(atomic64_t *v, long long i)
62 v->counter = i; 64 v->counter = i;
63 spin_unlock_irqrestore(lock, flags); 65 spin_unlock_irqrestore(lock, flags);
64} 66}
67EXPORT_SYMBOL(atomic64_set);
65 68
66void atomic64_add(long long a, atomic64_t *v) 69void atomic64_add(long long a, atomic64_t *v)
67{ 70{
@@ -72,6 +75,7 @@ void atomic64_add(long long a, atomic64_t *v)
72 v->counter += a; 75 v->counter += a;
73 spin_unlock_irqrestore(lock, flags); 76 spin_unlock_irqrestore(lock, flags);
74} 77}
78EXPORT_SYMBOL(atomic64_add);
75 79
76long long atomic64_add_return(long long a, atomic64_t *v) 80long long atomic64_add_return(long long a, atomic64_t *v)
77{ 81{
@@ -84,6 +88,7 @@ long long atomic64_add_return(long long a, atomic64_t *v)
84 spin_unlock_irqrestore(lock, flags); 88 spin_unlock_irqrestore(lock, flags);
85 return val; 89 return val;
86} 90}
91EXPORT_SYMBOL(atomic64_add_return);
87 92
88void atomic64_sub(long long a, atomic64_t *v) 93void atomic64_sub(long long a, atomic64_t *v)
89{ 94{
@@ -94,6 +99,7 @@ void atomic64_sub(long long a, atomic64_t *v)
94 v->counter -= a; 99 v->counter -= a;
95 spin_unlock_irqrestore(lock, flags); 100 spin_unlock_irqrestore(lock, flags);
96} 101}
102EXPORT_SYMBOL(atomic64_sub);
97 103
98long long atomic64_sub_return(long long a, atomic64_t *v) 104long long atomic64_sub_return(long long a, atomic64_t *v)
99{ 105{
@@ -106,6 +112,7 @@ long long atomic64_sub_return(long long a, atomic64_t *v)
106 spin_unlock_irqrestore(lock, flags); 112 spin_unlock_irqrestore(lock, flags);
107 return val; 113 return val;
108} 114}
115EXPORT_SYMBOL(atomic64_sub_return);
109 116
110long long atomic64_dec_if_positive(atomic64_t *v) 117long long atomic64_dec_if_positive(atomic64_t *v)
111{ 118{
@@ -120,6 +127,7 @@ long long atomic64_dec_if_positive(atomic64_t *v)
120 spin_unlock_irqrestore(lock, flags); 127 spin_unlock_irqrestore(lock, flags);
121 return val; 128 return val;
122} 129}
130EXPORT_SYMBOL(atomic64_dec_if_positive);
123 131
124long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) 132long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
125{ 133{
@@ -134,6 +142,7 @@ long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
134 spin_unlock_irqrestore(lock, flags); 142 spin_unlock_irqrestore(lock, flags);
135 return val; 143 return val;
136} 144}
145EXPORT_SYMBOL(atomic64_cmpxchg);
137 146
138long long atomic64_xchg(atomic64_t *v, long long new) 147long long atomic64_xchg(atomic64_t *v, long long new)
139{ 148{
@@ -147,6 +156,7 @@ long long atomic64_xchg(atomic64_t *v, long long new)
147 spin_unlock_irqrestore(lock, flags); 156 spin_unlock_irqrestore(lock, flags);
148 return val; 157 return val;
149} 158}
159EXPORT_SYMBOL(atomic64_xchg);
150 160
151int atomic64_add_unless(atomic64_t *v, long long a, long long u) 161int atomic64_add_unless(atomic64_t *v, long long a, long long u)
152{ 162{
@@ -162,6 +172,7 @@ int atomic64_add_unless(atomic64_t *v, long long a, long long u)
162 spin_unlock_irqrestore(lock, flags); 172 spin_unlock_irqrestore(lock, flags);
163 return ret; 173 return ret;
164} 174}
175EXPORT_SYMBOL(atomic64_add_unless);
165 176
166static int init_atomic64_lock(void) 177static int init_atomic64_lock(void)
167{ 178{
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 35a1f7ff4149..702565821c99 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -179,14 +179,16 @@ void __bitmap_shift_left(unsigned long *dst,
179} 179}
180EXPORT_SYMBOL(__bitmap_shift_left); 180EXPORT_SYMBOL(__bitmap_shift_left);
181 181
182void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, 182int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
183 const unsigned long *bitmap2, int bits) 183 const unsigned long *bitmap2, int bits)
184{ 184{
185 int k; 185 int k;
186 int nr = BITS_TO_LONGS(bits); 186 int nr = BITS_TO_LONGS(bits);
187 unsigned long result = 0;
187 188
188 for (k = 0; k < nr; k++) 189 for (k = 0; k < nr; k++)
189 dst[k] = bitmap1[k] & bitmap2[k]; 190 result |= (dst[k] = bitmap1[k] & bitmap2[k]);
191 return result != 0;
190} 192}
191EXPORT_SYMBOL(__bitmap_and); 193EXPORT_SYMBOL(__bitmap_and);
192 194
@@ -212,14 +214,16 @@ void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
212} 214}
213EXPORT_SYMBOL(__bitmap_xor); 215EXPORT_SYMBOL(__bitmap_xor);
214 216
215void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, 217int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
216 const unsigned long *bitmap2, int bits) 218 const unsigned long *bitmap2, int bits)
217{ 219{
218 int k; 220 int k;
219 int nr = BITS_TO_LONGS(bits); 221 int nr = BITS_TO_LONGS(bits);
222 unsigned long result = 0;
220 223
221 for (k = 0; k < nr; k++) 224 for (k = 0; k < nr; k++)
222 dst[k] = bitmap1[k] & ~bitmap2[k]; 225 result |= (dst[k] = bitmap1[k] & ~bitmap2[k]);
226 return result != 0;
223} 227}
224EXPORT_SYMBOL(__bitmap_andnot); 228EXPORT_SYMBOL(__bitmap_andnot);
225 229
diff --git a/lib/checksum.c b/lib/checksum.c
index 12e5a1c91cda..b2e2fd468461 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -55,7 +55,11 @@ static unsigned int do_csum(const unsigned char *buff, int len)
55 goto out; 55 goto out;
56 odd = 1 & (unsigned long) buff; 56 odd = 1 & (unsigned long) buff;
57 if (odd) { 57 if (odd) {
58#ifdef __LITTLE_ENDIAN
58 result = *buff; 59 result = *buff;
60#else
61 result += (*buff << 8);
62#endif
59 len--; 63 len--;
60 buff++; 64 buff++;
61 } 65 }
@@ -71,7 +75,7 @@ static unsigned int do_csum(const unsigned char *buff, int len)
71 if (count) { 75 if (count) {
72 unsigned long carry = 0; 76 unsigned long carry = 0;
73 do { 77 do {
74 unsigned long w = *(unsigned long *) buff; 78 unsigned long w = *(unsigned int *) buff;
75 count--; 79 count--;
76 buff += 4; 80 buff += 4;
77 result += carry; 81 result += carry;
@@ -87,7 +91,11 @@ static unsigned int do_csum(const unsigned char *buff, int len)
87 } 91 }
88 } 92 }
89 if (len & 1) 93 if (len & 1)
94#ifdef __LITTLE_ENDIAN
95 result += *buff;
96#else
90 result += (*buff << 8); 97 result += (*buff << 8);
98#endif
91 result = from32to16(result); 99 result = from32to16(result);
92 if (odd) 100 if (odd)
93 result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); 101 result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 708e2a86d87b..600f473a5610 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -45,12 +45,14 @@
45*/ 45*/
46 46
47 47
48#ifndef STATIC 48#ifdef STATIC
49#define PREBOOT
50#else
49#include <linux/decompress/bunzip2.h> 51#include <linux/decompress/bunzip2.h>
50#endif /* !STATIC */ 52#include <linux/slab.h>
53#endif /* STATIC */
51 54
52#include <linux/decompress/mm.h> 55#include <linux/decompress/mm.h>
53#include <linux/slab.h>
54 56
55#ifndef INT_MAX 57#ifndef INT_MAX
56#define INT_MAX 0x7fffffff 58#define INT_MAX 0x7fffffff
@@ -681,9 +683,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len,
681 set_error_fn(error_fn); 683 set_error_fn(error_fn);
682 if (flush) 684 if (flush)
683 outbuf = malloc(BZIP2_IOBUF_SIZE); 685 outbuf = malloc(BZIP2_IOBUF_SIZE);
684 else 686
685 len -= 4; /* Uncompressed size hack active in pre-boot
686 environment */
687 if (!outbuf) { 687 if (!outbuf) {
688 error("Could not allocate output bufer"); 688 error("Could not allocate output bufer");
689 return -1; 689 return -1;
@@ -733,4 +733,14 @@ exit_0:
733 return i; 733 return i;
734} 734}
735 735
736#define decompress bunzip2 736#ifdef PREBOOT
737STATIC int INIT decompress(unsigned char *buf, int len,
738 int(*fill)(void*, unsigned int),
739 int(*flush)(void*, unsigned int),
740 unsigned char *outbuf,
741 int *pos,
742 void(*error_fn)(char *x))
743{
744 return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error_fn);
745}
746#endif
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index e36b296fc9f8..68dfce59c1b8 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -19,13 +19,13 @@
19#include "zlib_inflate/inflate.h" 19#include "zlib_inflate/inflate.h"
20 20
21#include "zlib_inflate/infutil.h" 21#include "zlib_inflate/infutil.h"
22#include <linux/slab.h>
22 23
23#endif /* STATIC */ 24#endif /* STATIC */
24 25
25#include <linux/decompress/mm.h> 26#include <linux/decompress/mm.h>
26#include <linux/slab.h>
27 27
28#define INBUF_LEN (16*1024) 28#define GZIP_IOBUF_SIZE (16*1024)
29 29
30/* Included from initramfs et al code */ 30/* Included from initramfs et al code */
31STATIC int INIT gunzip(unsigned char *buf, int len, 31STATIC int INIT gunzip(unsigned char *buf, int len,
@@ -55,7 +55,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
55 if (buf) 55 if (buf)
56 zbuf = buf; 56 zbuf = buf;
57 else { 57 else {
58 zbuf = malloc(INBUF_LEN); 58 zbuf = malloc(GZIP_IOBUF_SIZE);
59 len = 0; 59 len = 0;
60 } 60 }
61 if (!zbuf) { 61 if (!zbuf) {
@@ -77,7 +77,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
77 } 77 }
78 78
79 if (len == 0) 79 if (len == 0)
80 len = fill(zbuf, INBUF_LEN); 80 len = fill(zbuf, GZIP_IOBUF_SIZE);
81 81
82 /* verify the gzip header */ 82 /* verify the gzip header */
83 if (len < 10 || 83 if (len < 10 ||
@@ -113,7 +113,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
113 while (rc == Z_OK) { 113 while (rc == Z_OK) {
114 if (strm->avail_in == 0) { 114 if (strm->avail_in == 0) {
115 /* TODO: handle case where both pos and fill are set */ 115 /* TODO: handle case where both pos and fill are set */
116 len = fill(zbuf, INBUF_LEN); 116 len = fill(zbuf, GZIP_IOBUF_SIZE);
117 if (len < 0) { 117 if (len < 0) {
118 rc = -1; 118 rc = -1;
119 error("read error"); 119 error("read error");
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
index 32123a1340e6..0b954e04bd30 100644
--- a/lib/decompress_unlzma.c
+++ b/lib/decompress_unlzma.c
@@ -29,12 +29,14 @@
29 *Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 29 *Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
30 */ 30 */
31 31
32#ifndef STATIC 32#ifdef STATIC
33#define PREBOOT
34#else
33#include <linux/decompress/unlzma.h> 35#include <linux/decompress/unlzma.h>
36#include <linux/slab.h>
34#endif /* STATIC */ 37#endif /* STATIC */
35 38
36#include <linux/decompress/mm.h> 39#include <linux/decompress/mm.h>
37#include <linux/slab.h>
38 40
39#define MIN(a, b) (((a) < (b)) ? (a) : (b)) 41#define MIN(a, b) (((a) < (b)) ? (a) : (b))
40 42
@@ -543,9 +545,7 @@ STATIC inline int INIT unlzma(unsigned char *buf, int in_len,
543 int ret = -1; 545 int ret = -1;
544 546
545 set_error_fn(error_fn); 547 set_error_fn(error_fn);
546 if (!flush) 548
547 in_len -= 4; /* Uncompressed size hack active in pre-boot
548 environment */
549 if (buf) 549 if (buf)
550 inbuf = buf; 550 inbuf = buf;
551 else 551 else
@@ -645,4 +645,15 @@ exit_0:
645 return ret; 645 return ret;
646} 646}
647 647
648#define decompress unlzma 648#ifdef PREBOOT
649STATIC int INIT decompress(unsigned char *buf, int in_len,
650 int(*fill)(void*, unsigned int),
651 int(*flush)(void*, unsigned int),
652 unsigned char *output,
653 int *posp,
654 void(*error_fn)(char *x)
655 )
656{
657 return unlzma(buf, in_len - 4, fill, flush, output, posp, error_fn);
658}
659#endif
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index ad65fc0317d9..58a9f9fc609a 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -156,9 +156,13 @@ static bool driver_filter(struct device *dev)
156 return true; 156 return true;
157 157
158 /* driver filter on and initialized */ 158 /* driver filter on and initialized */
159 if (current_driver && dev->driver == current_driver) 159 if (current_driver && dev && dev->driver == current_driver)
160 return true; 160 return true;
161 161
162 /* driver filter on, but we can't filter on a NULL device... */
163 if (!dev)
164 return false;
165
162 if (current_driver || !current_driver_name[0]) 166 if (current_driver || !current_driver_name[0])
163 return false; 167 return false;
164 168
@@ -183,17 +187,17 @@ static bool driver_filter(struct device *dev)
183 return ret; 187 return ret;
184} 188}
185 189
186#define err_printk(dev, entry, format, arg...) do { \ 190#define err_printk(dev, entry, format, arg...) do { \
187 error_count += 1; \ 191 error_count += 1; \
188 if (driver_filter(dev) && \ 192 if (driver_filter(dev) && \
189 (show_all_errors || show_num_errors > 0)) { \ 193 (show_all_errors || show_num_errors > 0)) { \
190 WARN(1, "%s %s: " format, \ 194 WARN(1, "%s %s: " format, \
191 dev_driver_string(dev), \ 195 dev ? dev_driver_string(dev) : "NULL", \
192 dev_name(dev) , ## arg); \ 196 dev ? dev_name(dev) : "NULL", ## arg); \
193 dump_entry_trace(entry); \ 197 dump_entry_trace(entry); \
194 } \ 198 } \
195 if (!show_all_errors && show_num_errors > 0) \ 199 if (!show_all_errors && show_num_errors > 0) \
196 show_num_errors -= 1; \ 200 show_num_errors -= 1; \
197 } while (0); 201 } while (0);
198 202
199/* 203/*
@@ -262,11 +266,12 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
262 */ 266 */
263 matches += 1; 267 matches += 1;
264 match_lvl = 0; 268 match_lvl = 0;
265 entry->size == ref->size ? ++match_lvl : match_lvl; 269 entry->size == ref->size ? ++match_lvl : 0;
266 entry->type == ref->type ? ++match_lvl : match_lvl; 270 entry->type == ref->type ? ++match_lvl : 0;
267 entry->direction == ref->direction ? ++match_lvl : match_lvl; 271 entry->direction == ref->direction ? ++match_lvl : 0;
272 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
268 273
269 if (match_lvl == 3) { 274 if (match_lvl == 4) {
270 /* perfect-fit - return the result */ 275 /* perfect-fit - return the result */
271 return entry; 276 return entry;
272 } else if (match_lvl > last_lvl) { 277 } else if (match_lvl > last_lvl) {
@@ -715,7 +720,7 @@ void dma_debug_init(u32 num_entries)
715 720
716 for (i = 0; i < HASH_SIZE; ++i) { 721 for (i = 0; i < HASH_SIZE; ++i) {
717 INIT_LIST_HEAD(&dma_entry_hash[i].list); 722 INIT_LIST_HEAD(&dma_entry_hash[i].list);
718 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED; 723 spin_lock_init(&dma_entry_hash[i].lock);
719 } 724 }
720 725
721 if (dma_debug_fs_init() != 0) { 726 if (dma_debug_fs_init() != 0) {
@@ -855,90 +860,85 @@ static void check_for_stack(struct device *dev, void *addr)
855 "stack [addr=%p]\n", addr); 860 "stack [addr=%p]\n", addr);
856} 861}
857 862
858static inline bool overlap(void *addr, u64 size, void *start, void *end) 863static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
859{ 864{
860 void *addr2 = (char *)addr + size; 865 unsigned long a1 = (unsigned long)addr;
866 unsigned long b1 = a1 + len;
867 unsigned long a2 = (unsigned long)start;
868 unsigned long b2 = (unsigned long)end;
861 869
862 return ((addr >= start && addr < end) || 870 return !(b1 <= a2 || a1 >= b2);
863 (addr2 >= start && addr2 < end) ||
864 ((addr < start) && (addr2 >= end)));
865} 871}
866 872
867static void check_for_illegal_area(struct device *dev, void *addr, u64 size) 873static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
868{ 874{
869 if (overlap(addr, size, _text, _etext) || 875 if (overlap(addr, len, _text, _etext) ||
870 overlap(addr, size, __start_rodata, __end_rodata)) 876 overlap(addr, len, __start_rodata, __end_rodata))
871 err_printk(dev, NULL, "DMA-API: device driver maps " 877 err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
872 "memory from kernel text or rodata "
873 "[addr=%p] [size=%llu]\n", addr, size);
874} 878}
875 879
876static void check_sync(struct device *dev, dma_addr_t addr, 880static void check_sync(struct device *dev,
877 u64 size, u64 offset, int direction, bool to_cpu) 881 struct dma_debug_entry *ref,
882 bool to_cpu)
878{ 883{
879 struct dma_debug_entry ref = {
880 .dev = dev,
881 .dev_addr = addr,
882 .size = size,
883 .direction = direction,
884 };
885 struct dma_debug_entry *entry; 884 struct dma_debug_entry *entry;
886 struct hash_bucket *bucket; 885 struct hash_bucket *bucket;
887 unsigned long flags; 886 unsigned long flags;
888 887
889 bucket = get_hash_bucket(&ref, &flags); 888 bucket = get_hash_bucket(ref, &flags);
890 889
891 entry = hash_bucket_find(bucket, &ref); 890 entry = hash_bucket_find(bucket, ref);
892 891
893 if (!entry) { 892 if (!entry) {
894 err_printk(dev, NULL, "DMA-API: device driver tries " 893 err_printk(dev, NULL, "DMA-API: device driver tries "
895 "to sync DMA memory it has not allocated " 894 "to sync DMA memory it has not allocated "
896 "[device address=0x%016llx] [size=%llu bytes]\n", 895 "[device address=0x%016llx] [size=%llu bytes]\n",
897 (unsigned long long)addr, size); 896 (unsigned long long)ref->dev_addr, ref->size);
898 goto out; 897 goto out;
899 } 898 }
900 899
901 if ((offset + size) > entry->size) { 900 if (ref->size > entry->size) {
902 err_printk(dev, entry, "DMA-API: device driver syncs" 901 err_printk(dev, entry, "DMA-API: device driver syncs"
903 " DMA memory outside allocated range " 902 " DMA memory outside allocated range "
904 "[device address=0x%016llx] " 903 "[device address=0x%016llx] "
905 "[allocation size=%llu bytes] [sync offset=%llu] " 904 "[allocation size=%llu bytes] "
906 "[sync size=%llu]\n", entry->dev_addr, entry->size, 905 "[sync offset+size=%llu]\n",
907 offset, size); 906 entry->dev_addr, entry->size,
907 ref->size);
908 } 908 }
909 909
910 if (direction != entry->direction) { 910 if (ref->direction != entry->direction) {
911 err_printk(dev, entry, "DMA-API: device driver syncs " 911 err_printk(dev, entry, "DMA-API: device driver syncs "
912 "DMA memory with different direction " 912 "DMA memory with different direction "
913 "[device address=0x%016llx] [size=%llu bytes] " 913 "[device address=0x%016llx] [size=%llu bytes] "
914 "[mapped with %s] [synced with %s]\n", 914 "[mapped with %s] [synced with %s]\n",
915 (unsigned long long)addr, entry->size, 915 (unsigned long long)ref->dev_addr, entry->size,
916 dir2name[entry->direction], 916 dir2name[entry->direction],
917 dir2name[direction]); 917 dir2name[ref->direction]);
918 } 918 }
919 919
920 if (entry->direction == DMA_BIDIRECTIONAL) 920 if (entry->direction == DMA_BIDIRECTIONAL)
921 goto out; 921 goto out;
922 922
923 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && 923 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
924 !(direction == DMA_TO_DEVICE)) 924 !(ref->direction == DMA_TO_DEVICE))
925 err_printk(dev, entry, "DMA-API: device driver syncs " 925 err_printk(dev, entry, "DMA-API: device driver syncs "
926 "device read-only DMA memory for cpu " 926 "device read-only DMA memory for cpu "
927 "[device address=0x%016llx] [size=%llu bytes] " 927 "[device address=0x%016llx] [size=%llu bytes] "
928 "[mapped with %s] [synced with %s]\n", 928 "[mapped with %s] [synced with %s]\n",
929 (unsigned long long)addr, entry->size, 929 (unsigned long long)ref->dev_addr, entry->size,
930 dir2name[entry->direction], 930 dir2name[entry->direction],
931 dir2name[direction]); 931 dir2name[ref->direction]);
932 932
933 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && 933 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
934 !(direction == DMA_FROM_DEVICE)) 934 !(ref->direction == DMA_FROM_DEVICE))
935 err_printk(dev, entry, "DMA-API: device driver syncs " 935 err_printk(dev, entry, "DMA-API: device driver syncs "
936 "device write-only DMA memory to device " 936 "device write-only DMA memory to device "
937 "[device address=0x%016llx] [size=%llu bytes] " 937 "[device address=0x%016llx] [size=%llu bytes] "
938 "[mapped with %s] [synced with %s]\n", 938 "[mapped with %s] [synced with %s]\n",
939 (unsigned long long)addr, entry->size, 939 (unsigned long long)ref->dev_addr, entry->size,
940 dir2name[entry->direction], 940 dir2name[entry->direction],
941 dir2name[direction]); 941 dir2name[ref->direction]);
942 942
943out: 943out:
944 put_hash_bucket(bucket, &flags); 944 put_hash_bucket(bucket, &flags);
@@ -972,7 +972,8 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
972 entry->type = dma_debug_single; 972 entry->type = dma_debug_single;
973 973
974 if (!PageHighMem(page)) { 974 if (!PageHighMem(page)) {
975 void *addr = ((char *)page_address(page)) + offset; 975 void *addr = page_address(page) + offset;
976
976 check_for_stack(dev, addr); 977 check_for_stack(dev, addr);
977 check_for_illegal_area(dev, addr, size); 978 check_for_illegal_area(dev, addr, size);
978 } 979 }
@@ -1036,19 +1037,16 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1036} 1037}
1037EXPORT_SYMBOL(debug_dma_map_sg); 1038EXPORT_SYMBOL(debug_dma_map_sg);
1038 1039
1039static int get_nr_mapped_entries(struct device *dev, struct scatterlist *s) 1040static int get_nr_mapped_entries(struct device *dev,
1041 struct dma_debug_entry *ref)
1040{ 1042{
1041 struct dma_debug_entry *entry, ref; 1043 struct dma_debug_entry *entry;
1042 struct hash_bucket *bucket; 1044 struct hash_bucket *bucket;
1043 unsigned long flags; 1045 unsigned long flags;
1044 int mapped_ents; 1046 int mapped_ents;
1045 1047
1046 ref.dev = dev; 1048 bucket = get_hash_bucket(ref, &flags);
1047 ref.dev_addr = sg_dma_address(s); 1049 entry = hash_bucket_find(bucket, ref);
1048 ref.size = sg_dma_len(s),
1049
1050 bucket = get_hash_bucket(&ref, &flags);
1051 entry = hash_bucket_find(bucket, &ref);
1052 mapped_ents = 0; 1050 mapped_ents = 0;
1053 1051
1054 if (entry) 1052 if (entry)
@@ -1076,16 +1074,14 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1076 .dev_addr = sg_dma_address(s), 1074 .dev_addr = sg_dma_address(s),
1077 .size = sg_dma_len(s), 1075 .size = sg_dma_len(s),
1078 .direction = dir, 1076 .direction = dir,
1079 .sg_call_ents = 0, 1077 .sg_call_ents = nelems,
1080 }; 1078 };
1081 1079
1082 if (mapped_ents && i >= mapped_ents) 1080 if (mapped_ents && i >= mapped_ents)
1083 break; 1081 break;
1084 1082
1085 if (!i) { 1083 if (!i)
1086 ref.sg_call_ents = nelems; 1084 mapped_ents = get_nr_mapped_entries(dev, &ref);
1087 mapped_ents = get_nr_mapped_entries(dev, s);
1088 }
1089 1085
1090 check_unmap(&ref); 1086 check_unmap(&ref);
1091 } 1087 }
@@ -1140,10 +1136,19 @@ EXPORT_SYMBOL(debug_dma_free_coherent);
1140void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 1136void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1141 size_t size, int direction) 1137 size_t size, int direction)
1142{ 1138{
1139 struct dma_debug_entry ref;
1140
1143 if (unlikely(global_disable)) 1141 if (unlikely(global_disable))
1144 return; 1142 return;
1145 1143
1146 check_sync(dev, dma_handle, size, 0, direction, true); 1144 ref.type = dma_debug_single;
1145 ref.dev = dev;
1146 ref.dev_addr = dma_handle;
1147 ref.size = size;
1148 ref.direction = direction;
1149 ref.sg_call_ents = 0;
1150
1151 check_sync(dev, &ref, true);
1147} 1152}
1148EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); 1153EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
1149 1154
@@ -1151,10 +1156,19 @@ void debug_dma_sync_single_for_device(struct device *dev,
1151 dma_addr_t dma_handle, size_t size, 1156 dma_addr_t dma_handle, size_t size,
1152 int direction) 1157 int direction)
1153{ 1158{
1159 struct dma_debug_entry ref;
1160
1154 if (unlikely(global_disable)) 1161 if (unlikely(global_disable))
1155 return; 1162 return;
1156 1163
1157 check_sync(dev, dma_handle, size, 0, direction, false); 1164 ref.type = dma_debug_single;
1165 ref.dev = dev;
1166 ref.dev_addr = dma_handle;
1167 ref.size = size;
1168 ref.direction = direction;
1169 ref.sg_call_ents = 0;
1170
1171 check_sync(dev, &ref, false);
1158} 1172}
1159EXPORT_SYMBOL(debug_dma_sync_single_for_device); 1173EXPORT_SYMBOL(debug_dma_sync_single_for_device);
1160 1174
@@ -1163,10 +1177,19 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev,
1163 unsigned long offset, size_t size, 1177 unsigned long offset, size_t size,
1164 int direction) 1178 int direction)
1165{ 1179{
1180 struct dma_debug_entry ref;
1181
1166 if (unlikely(global_disable)) 1182 if (unlikely(global_disable))
1167 return; 1183 return;
1168 1184
1169 check_sync(dev, dma_handle, size, offset, direction, true); 1185 ref.type = dma_debug_single;
1186 ref.dev = dev;
1187 ref.dev_addr = dma_handle;
1188 ref.size = offset + size;
1189 ref.direction = direction;
1190 ref.sg_call_ents = 0;
1191
1192 check_sync(dev, &ref, true);
1170} 1193}
1171EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); 1194EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
1172 1195
@@ -1175,10 +1198,19 @@ void debug_dma_sync_single_range_for_device(struct device *dev,
1175 unsigned long offset, 1198 unsigned long offset,
1176 size_t size, int direction) 1199 size_t size, int direction)
1177{ 1200{
1201 struct dma_debug_entry ref;
1202
1178 if (unlikely(global_disable)) 1203 if (unlikely(global_disable))
1179 return; 1204 return;
1180 1205
1181 check_sync(dev, dma_handle, size, offset, direction, false); 1206 ref.type = dma_debug_single;
1207 ref.dev = dev;
1208 ref.dev_addr = dma_handle;
1209 ref.size = offset + size;
1210 ref.direction = direction;
1211 ref.sg_call_ents = 0;
1212
1213 check_sync(dev, &ref, false);
1182} 1214}
1183EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); 1215EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
1184 1216
@@ -1192,14 +1224,24 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1192 return; 1224 return;
1193 1225
1194 for_each_sg(sg, s, nelems, i) { 1226 for_each_sg(sg, s, nelems, i) {
1227
1228 struct dma_debug_entry ref = {
1229 .type = dma_debug_sg,
1230 .dev = dev,
1231 .paddr = sg_phys(s),
1232 .dev_addr = sg_dma_address(s),
1233 .size = sg_dma_len(s),
1234 .direction = direction,
1235 .sg_call_ents = nelems,
1236 };
1237
1195 if (!i) 1238 if (!i)
1196 mapped_ents = get_nr_mapped_entries(dev, s); 1239 mapped_ents = get_nr_mapped_entries(dev, &ref);
1197 1240
1198 if (i >= mapped_ents) 1241 if (i >= mapped_ents)
1199 break; 1242 break;
1200 1243
1201 check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0, 1244 check_sync(dev, &ref, true);
1202 direction, true);
1203 } 1245 }
1204} 1246}
1205EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); 1247EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
@@ -1214,14 +1256,23 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1214 return; 1256 return;
1215 1257
1216 for_each_sg(sg, s, nelems, i) { 1258 for_each_sg(sg, s, nelems, i) {
1259
1260 struct dma_debug_entry ref = {
1261 .type = dma_debug_sg,
1262 .dev = dev,
1263 .paddr = sg_phys(s),
1264 .dev_addr = sg_dma_address(s),
1265 .size = sg_dma_len(s),
1266 .direction = direction,
1267 .sg_call_ents = nelems,
1268 };
1217 if (!i) 1269 if (!i)
1218 mapped_ents = get_nr_mapped_entries(dev, s); 1270 mapped_ents = get_nr_mapped_entries(dev, &ref);
1219 1271
1220 if (i >= mapped_ents) 1272 if (i >= mapped_ents)
1221 break; 1273 break;
1222 1274
1223 check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0, 1275 check_sync(dev, &ref, false);
1224 direction, false);
1225 } 1276 }
1226} 1277}
1227EXPORT_SYMBOL(debug_dma_sync_sg_for_device); 1278EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 833139ce1e22..e22c148e4b7f 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -164,7 +164,7 @@ static void ddebug_change(const struct ddebug_query *query,
164 164
165 if (!newflags) 165 if (!newflags)
166 dt->num_enabled--; 166 dt->num_enabled--;
167 else if (!dp-flags) 167 else if (!dp->flags)
168 dt->num_enabled++; 168 dt->num_enabled++;
169 dp->flags = newflags; 169 dp->flags = newflags;
170 if (newflags) { 170 if (newflags) {
diff --git a/lib/flex_array.c b/lib/flex_array.c
new file mode 100644
index 000000000000..7baed2fc3bc8
--- /dev/null
+++ b/lib/flex_array.c
@@ -0,0 +1,268 @@
1/*
2 * Flexible array managed in PAGE_SIZE parts
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2009
19 *
20 * Author: Dave Hansen <dave@linux.vnet.ibm.com>
21 */
22
23#include <linux/flex_array.h>
24#include <linux/slab.h>
25#include <linux/stddef.h>
26
27struct flex_array_part {
28 char elements[FLEX_ARRAY_PART_SIZE];
29};
30
31static inline int __elements_per_part(int element_size)
32{
33 return FLEX_ARRAY_PART_SIZE / element_size;
34}
35
36static inline int bytes_left_in_base(void)
37{
38 int element_offset = offsetof(struct flex_array, parts);
39 int bytes_left = FLEX_ARRAY_BASE_SIZE - element_offset;
40 return bytes_left;
41}
42
43static inline int nr_base_part_ptrs(void)
44{
45 return bytes_left_in_base() / sizeof(struct flex_array_part *);
46}
47
48/*
49 * If a user requests an allocation which is small
50 * enough, we may simply use the space in the
51 * flex_array->parts[] array to store the user
52 * data.
53 */
54static inline int elements_fit_in_base(struct flex_array *fa)
55{
56 int data_size = fa->element_size * fa->total_nr_elements;
57 if (data_size <= bytes_left_in_base())
58 return 1;
59 return 0;
60}
61
62/**
63 * flex_array_alloc - allocate a new flexible array
64 * @element_size: the size of individual elements in the array
65 * @total: total number of elements that this should hold
66 *
67 * Note: all locking must be provided by the caller.
68 *
69 * @total is used to size internal structures. If the user ever
70 * accesses any array indexes >=@total, it will produce errors.
71 *
72 * The maximum number of elements is defined as: the number of
73 * elements that can be stored in a page times the number of
74 * page pointers that we can fit in the base structure or (using
75 * integer math):
76 *
77 * (PAGE_SIZE/element_size) * (PAGE_SIZE-8)/sizeof(void *)
78 *
79 * Here's a table showing example capacities. Note that the maximum
80 * index that the get/put() functions is just nr_objects-1. This
81 * basically means that you get 4MB of storage on 32-bit and 2MB on
82 * 64-bit.
83 *
84 *
85 * Element size | Objects | Objects |
86 * PAGE_SIZE=4k | 32-bit | 64-bit |
87 * ---------------------------------|
88 * 1 bytes | 4186112 | 2093056 |
89 * 2 bytes | 2093056 | 1046528 |
90 * 3 bytes | 1395030 | 697515 |
91 * 4 bytes | 1046528 | 523264 |
92 * 32 bytes | 130816 | 65408 |
93 * 33 bytes | 126728 | 63364 |
94 * 2048 bytes | 2044 | 1022 |
95 * 2049 bytes | 1022 | 511 |
96 * void * | 1046528 | 261632 |
97 *
98 * Since 64-bit pointers are twice the size, we lose half the
99 * capacity in the base structure. Also note that no effort is made
100 * to efficiently pack objects across page boundaries.
101 */
102struct flex_array *flex_array_alloc(int element_size, unsigned int total,
103 gfp_t flags)
104{
105 struct flex_array *ret;
106 int max_size = nr_base_part_ptrs() * __elements_per_part(element_size);
107
108 /* max_size will end up 0 if element_size > PAGE_SIZE */
109 if (total > max_size)
110 return NULL;
111 ret = kzalloc(sizeof(struct flex_array), flags);
112 if (!ret)
113 return NULL;
114 ret->element_size = element_size;
115 ret->total_nr_elements = total;
116 return ret;
117}
118
119static int fa_element_to_part_nr(struct flex_array *fa,
120 unsigned int element_nr)
121{
122 return element_nr / __elements_per_part(fa->element_size);
123}
124
125/**
126 * flex_array_free_parts - just free the second-level pages
127 *
128 * This is to be used in cases where the base 'struct flex_array'
129 * has been statically allocated and should not be free.
130 */
131void flex_array_free_parts(struct flex_array *fa)
132{
133 int part_nr;
134 int max_part = nr_base_part_ptrs();
135
136 if (elements_fit_in_base(fa))
137 return;
138 for (part_nr = 0; part_nr < max_part; part_nr++)
139 kfree(fa->parts[part_nr]);
140}
141
142void flex_array_free(struct flex_array *fa)
143{
144 flex_array_free_parts(fa);
145 kfree(fa);
146}
147
148static unsigned int index_inside_part(struct flex_array *fa,
149 unsigned int element_nr)
150{
151 unsigned int part_offset;
152
153 part_offset = element_nr % __elements_per_part(fa->element_size);
154 return part_offset * fa->element_size;
155}
156
157static struct flex_array_part *
158__fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
159{
160 struct flex_array_part *part = fa->parts[part_nr];
161 if (!part) {
162 /*
163 * This leaves the part pages uninitialized
164 * and with potentially random data, just
165 * as if the user had kmalloc()'d the whole.
166 * __GFP_ZERO can be used to zero it.
167 */
168 part = kmalloc(FLEX_ARRAY_PART_SIZE, flags);
169 if (!part)
170 return NULL;
171 fa->parts[part_nr] = part;
172 }
173 return part;
174}
175
176/**
177 * flex_array_put - copy data into the array at @element_nr
178 * @src: address of data to copy into the array
179 * @element_nr: index of the position in which to insert
180 * the new element.
181 *
182 * Note that this *copies* the contents of @src into
183 * the array. If you are trying to store an array of
184 * pointers, make sure to pass in &ptr instead of ptr.
185 *
186 * Locking must be provided by the caller.
187 */
188int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
189 gfp_t flags)
190{
191 int part_nr = fa_element_to_part_nr(fa, element_nr);
192 struct flex_array_part *part;
193 void *dst;
194
195 if (element_nr >= fa->total_nr_elements)
196 return -ENOSPC;
197 if (elements_fit_in_base(fa))
198 part = (struct flex_array_part *)&fa->parts[0];
199 else {
200 part = __fa_get_part(fa, part_nr, flags);
201 if (!part)
202 return -ENOMEM;
203 }
204 dst = &part->elements[index_inside_part(fa, element_nr)];
205 memcpy(dst, src, fa->element_size);
206 return 0;
207}
208
209/**
210 * flex_array_prealloc - guarantee that array space exists
211 * @start: index of first array element for which space is allocated
212 * @end: index of last (inclusive) element for which space is allocated
213 *
214 * This will guarantee that no future calls to flex_array_put()
215 * will allocate memory. It can be used if you are expecting to
216 * be holding a lock or in some atomic context while writing
217 * data into the array.
218 *
219 * Locking must be provided by the caller.
220 */
221int flex_array_prealloc(struct flex_array *fa, unsigned int start,
222 unsigned int end, gfp_t flags)
223{
224 int start_part;
225 int end_part;
226 int part_nr;
227 struct flex_array_part *part;
228
229 if (start >= fa->total_nr_elements || end >= fa->total_nr_elements)
230 return -ENOSPC;
231 if (elements_fit_in_base(fa))
232 return 0;
233 start_part = fa_element_to_part_nr(fa, start);
234 end_part = fa_element_to_part_nr(fa, end);
235 for (part_nr = start_part; part_nr <= end_part; part_nr++) {
236 part = __fa_get_part(fa, part_nr, flags);
237 if (!part)
238 return -ENOMEM;
239 }
240 return 0;
241}
242
243/**
244 * flex_array_get - pull data back out of the array
245 * @element_nr: index of the element to fetch from the array
246 *
247 * Returns a pointer to the data at index @element_nr. Note
248 * that this is a copy of the data that was passed in. If you
249 * are using this to store pointers, you'll get back &ptr.
250 *
251 * Locking must be provided by the caller.
252 */
253void *flex_array_get(struct flex_array *fa, unsigned int element_nr)
254{
255 int part_nr = fa_element_to_part_nr(fa, element_nr);
256 struct flex_array_part *part;
257
258 if (element_nr >= fa->total_nr_elements)
259 return NULL;
260 if (elements_fit_in_base(fa))
261 part = (struct flex_array_part *)&fa->parts[0];
262 else {
263 part = fa->parts[part_nr];
264 if (!part)
265 return NULL;
266 }
267 return &part->elements[index_inside_part(fa, element_nr)];
268}
diff --git a/lib/gcd.c b/lib/gcd.c
new file mode 100644
index 000000000000..f879033d9822
--- /dev/null
+++ b/lib/gcd.c
@@ -0,0 +1,18 @@
1#include <linux/kernel.h>
2#include <linux/gcd.h>
3#include <linux/module.h>
4
5/* Greatest common divisor */
6unsigned long gcd(unsigned long a, unsigned long b)
7{
8 unsigned long r;
9
10 if (a < b)
11 swap(a, b);
12 while ((r = a % b) != 0) {
13 a = b;
14 b = r;
15 }
16 return b;
17}
18EXPORT_SYMBOL_GPL(gcd);
diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
index f1ed2fe76c65..bd2bea963364 100644
--- a/lib/is_single_threaded.c
+++ b/lib/is_single_threaded.c
@@ -12,34 +12,47 @@
12 12
13#include <linux/sched.h> 13#include <linux/sched.h>
14 14
15/** 15/*
16 * is_single_threaded - Determine if a thread group is single-threaded or not 16 * Returns true if the task does not share ->mm with another thread/process.
17 * @p: A task in the thread group in question
18 *
19 * This returns true if the thread group to which a task belongs is single
20 * threaded, false if it is not.
21 */ 17 */
22bool is_single_threaded(struct task_struct *p) 18bool current_is_single_threaded(void)
23{ 19{
24 struct task_struct *g, *t; 20 struct task_struct *task = current;
25 struct mm_struct *mm = p->mm; 21 struct mm_struct *mm = task->mm;
22 struct task_struct *p, *t;
23 bool ret;
26 24
27 if (atomic_read(&p->signal->count) != 1) 25 if (atomic_read(&task->signal->live) != 1)
28 goto no; 26 return false;
29 27
30 if (atomic_read(&p->mm->mm_users) != 1) { 28 if (atomic_read(&mm->mm_users) == 1)
31 read_lock(&tasklist_lock); 29 return true;
32 do_each_thread(g, t) {
33 if (t->mm == mm && t != p)
34 goto no_unlock;
35 } while_each_thread(g, t);
36 read_unlock(&tasklist_lock);
37 }
38 30
39 return true; 31 ret = false;
32 rcu_read_lock();
33 for_each_process(p) {
34 if (unlikely(p->flags & PF_KTHREAD))
35 continue;
36 if (unlikely(p == task->group_leader))
37 continue;
38
39 t = p;
40 do {
41 if (unlikely(t->mm == mm))
42 goto found;
43 if (likely(t->mm))
44 break;
45 /*
46 * t->mm == NULL. Make sure next_thread/next_task
47 * will see other CLONE_VM tasks which might be
48 * forked before exiting.
49 */
50 smp_rmb();
51 } while_each_thread(p, t);
52 }
53 ret = true;
54found:
55 rcu_read_unlock();
40 56
41no_unlock: 57 return ret;
42 read_unlock(&tasklist_lock);
43no:
44 return false;
45} 58}
diff --git a/lib/lmb.c b/lib/lmb.c
index e4a6482d8b26..0343c05609f0 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -429,7 +429,7 @@ u64 __init lmb_phys_mem_size(void)
429 return lmb.memory.size; 429 return lmb.memory.size;
430} 430}
431 431
432u64 __init lmb_end_of_DRAM(void) 432u64 lmb_end_of_DRAM(void)
433{ 433{
434 int idx = lmb.memory.cnt - 1; 434 int idx = lmb.memory.cnt - 1;
435 435
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index a295e404e908..0d475d8167bf 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -314,6 +314,7 @@ void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
314 miter->__sg = sgl; 314 miter->__sg = sgl;
315 miter->__nents = nents; 315 miter->__nents = nents;
316 miter->__offset = 0; 316 miter->__offset = 0;
317 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
317 miter->__flags = flags; 318 miter->__flags = flags;
318} 319}
319EXPORT_SYMBOL(sg_miter_start); 320EXPORT_SYMBOL(sg_miter_start);
@@ -394,6 +395,9 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
394 if (miter->addr) { 395 if (miter->addr) {
395 miter->__offset += miter->consumed; 396 miter->__offset += miter->consumed;
396 397
398 if (miter->__flags & SG_MITER_TO_SG)
399 flush_kernel_dcache_page(miter->page);
400
397 if (miter->__flags & SG_MITER_ATOMIC) { 401 if (miter->__flags & SG_MITER_ATOMIC) {
398 WARN_ON(!irqs_disabled()); 402 WARN_ON(!irqs_disabled());
399 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); 403 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
@@ -426,8 +430,14 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
426 unsigned int offset = 0; 430 unsigned int offset = 0;
427 struct sg_mapping_iter miter; 431 struct sg_mapping_iter miter;
428 unsigned long flags; 432 unsigned long flags;
433 unsigned int sg_flags = SG_MITER_ATOMIC;
434
435 if (to_buffer)
436 sg_flags |= SG_MITER_FROM_SG;
437 else
438 sg_flags |= SG_MITER_TO_SG;
429 439
430 sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC); 440 sg_miter_start(&miter, sgl, nents, sg_flags);
431 441
432 local_irq_save(flags); 442 local_irq_save(flags);
433 443
@@ -438,10 +448,8 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
438 448
439 if (to_buffer) 449 if (to_buffer)
440 memcpy(buf + offset, miter.addr, len); 450 memcpy(buf + offset, miter.addr, len);
441 else { 451 else
442 memcpy(miter.addr, buf + offset, len); 452 memcpy(miter.addr, buf + offset, len);
443 flush_kernel_dcache_page(miter.page);
444 }
445 453
446 offset += len; 454 offset += len;
447 } 455 }