aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-09-08 20:55:54 -0400
committerDan Williams <dan.j.williams@intel.com>2009-09-08 20:55:54 -0400
commit9134d02bc0af4a8747d448d1f811ec5f8eb96df6 (patch)
tree704c3e5dcc10f360815c4868a74711f82fb62e27 /lib
parentbbb20089a3275a19e475dbc21320c3742e3ca423 (diff)
parent80ffb3cceaefa405f2ecd46d66500ed8d53efe74 (diff)
Merge commit 'md/for-linus' into async-tx-next
Conflicts: drivers/md/raid5.c
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug12
-rw-r--r--lib/Makefile2
-rw-r--r--lib/atomic64.c11
-rw-r--r--lib/decompress_bunzip2.c24
-rw-r--r--lib/decompress_inflate.c10
-rw-r--r--lib/decompress_unlzma.c23
-rw-r--r--lib/dma-debug.c26
-rw-r--r--lib/dynamic_debug.c2
-rw-r--r--lib/flex_array.c267
-rw-r--r--lib/scatterlist.c16
10 files changed, 356 insertions, 37 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 4c32b1a1a06e..12327b2bb785 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -359,6 +359,18 @@ config DEBUG_KMEMLEAK
359 In order to access the kmemleak file, debugfs needs to be 359 In order to access the kmemleak file, debugfs needs to be
360 mounted (usually at /sys/kernel/debug). 360 mounted (usually at /sys/kernel/debug).
361 361
362config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
363 int "Maximum kmemleak early log entries"
364 depends on DEBUG_KMEMLEAK
365 range 200 2000
366 default 400
367 help
368 Kmemleak must track all the memory allocations to avoid
369 reporting false positives. Since memory may be allocated or
370 freed before kmemleak is initialised, an early log buffer is
371 used to store these actions. If kmemleak reports "early log
372 buffer exceeded", please increase this value.
373
362config DEBUG_KMEMLEAK_TEST 374config DEBUG_KMEMLEAK_TEST
363 tristate "Simple test for the kernel memory leak detector" 375 tristate "Simple test for the kernel memory leak detector"
364 depends on DEBUG_KMEMLEAK 376 depends on DEBUG_KMEMLEAK
diff --git a/lib/Makefile b/lib/Makefile
index b6d1857bbf08..2e78277eff9d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -12,7 +12,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
12 idr.o int_sqrt.o extable.o prio_tree.o \ 12 idr.o int_sqrt.o extable.o prio_tree.o \
13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o prio_heap.o ratelimit.o show_mem.o \ 14 proportions.o prio_heap.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o decompress.o 15 is_single_threaded.o plist.o decompress.o flex_array.o
16 16
17lib-$(CONFIG_MMU) += ioremap.o 17lib-$(CONFIG_MMU) += ioremap.o
18lib-$(CONFIG_SMP) += cpumask.o 18lib-$(CONFIG_SMP) += cpumask.o
diff --git a/lib/atomic64.c b/lib/atomic64.c
index c5e725562416..8bee16ec7524 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -13,6 +13,7 @@
13#include <linux/cache.h> 13#include <linux/cache.h>
14#include <linux/spinlock.h> 14#include <linux/spinlock.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/module.h>
16#include <asm/atomic.h> 17#include <asm/atomic.h>
17 18
18/* 19/*
@@ -52,6 +53,7 @@ long long atomic64_read(const atomic64_t *v)
52 spin_unlock_irqrestore(lock, flags); 53 spin_unlock_irqrestore(lock, flags);
53 return val; 54 return val;
54} 55}
56EXPORT_SYMBOL(atomic64_read);
55 57
56void atomic64_set(atomic64_t *v, long long i) 58void atomic64_set(atomic64_t *v, long long i)
57{ 59{
@@ -62,6 +64,7 @@ void atomic64_set(atomic64_t *v, long long i)
62 v->counter = i; 64 v->counter = i;
63 spin_unlock_irqrestore(lock, flags); 65 spin_unlock_irqrestore(lock, flags);
64} 66}
67EXPORT_SYMBOL(atomic64_set);
65 68
66void atomic64_add(long long a, atomic64_t *v) 69void atomic64_add(long long a, atomic64_t *v)
67{ 70{
@@ -72,6 +75,7 @@ void atomic64_add(long long a, atomic64_t *v)
72 v->counter += a; 75 v->counter += a;
73 spin_unlock_irqrestore(lock, flags); 76 spin_unlock_irqrestore(lock, flags);
74} 77}
78EXPORT_SYMBOL(atomic64_add);
75 79
76long long atomic64_add_return(long long a, atomic64_t *v) 80long long atomic64_add_return(long long a, atomic64_t *v)
77{ 81{
@@ -84,6 +88,7 @@ long long atomic64_add_return(long long a, atomic64_t *v)
84 spin_unlock_irqrestore(lock, flags); 88 spin_unlock_irqrestore(lock, flags);
85 return val; 89 return val;
86} 90}
91EXPORT_SYMBOL(atomic64_add_return);
87 92
88void atomic64_sub(long long a, atomic64_t *v) 93void atomic64_sub(long long a, atomic64_t *v)
89{ 94{
@@ -94,6 +99,7 @@ void atomic64_sub(long long a, atomic64_t *v)
94 v->counter -= a; 99 v->counter -= a;
95 spin_unlock_irqrestore(lock, flags); 100 spin_unlock_irqrestore(lock, flags);
96} 101}
102EXPORT_SYMBOL(atomic64_sub);
97 103
98long long atomic64_sub_return(long long a, atomic64_t *v) 104long long atomic64_sub_return(long long a, atomic64_t *v)
99{ 105{
@@ -106,6 +112,7 @@ long long atomic64_sub_return(long long a, atomic64_t *v)
106 spin_unlock_irqrestore(lock, flags); 112 spin_unlock_irqrestore(lock, flags);
107 return val; 113 return val;
108} 114}
115EXPORT_SYMBOL(atomic64_sub_return);
109 116
110long long atomic64_dec_if_positive(atomic64_t *v) 117long long atomic64_dec_if_positive(atomic64_t *v)
111{ 118{
@@ -120,6 +127,7 @@ long long atomic64_dec_if_positive(atomic64_t *v)
120 spin_unlock_irqrestore(lock, flags); 127 spin_unlock_irqrestore(lock, flags);
121 return val; 128 return val;
122} 129}
130EXPORT_SYMBOL(atomic64_dec_if_positive);
123 131
124long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) 132long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
125{ 133{
@@ -134,6 +142,7 @@ long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
134 spin_unlock_irqrestore(lock, flags); 142 spin_unlock_irqrestore(lock, flags);
135 return val; 143 return val;
136} 144}
145EXPORT_SYMBOL(atomic64_cmpxchg);
137 146
138long long atomic64_xchg(atomic64_t *v, long long new) 147long long atomic64_xchg(atomic64_t *v, long long new)
139{ 148{
@@ -147,6 +156,7 @@ long long atomic64_xchg(atomic64_t *v, long long new)
147 spin_unlock_irqrestore(lock, flags); 156 spin_unlock_irqrestore(lock, flags);
148 return val; 157 return val;
149} 158}
159EXPORT_SYMBOL(atomic64_xchg);
150 160
151int atomic64_add_unless(atomic64_t *v, long long a, long long u) 161int atomic64_add_unless(atomic64_t *v, long long a, long long u)
152{ 162{
@@ -162,6 +172,7 @@ int atomic64_add_unless(atomic64_t *v, long long a, long long u)
162 spin_unlock_irqrestore(lock, flags); 172 spin_unlock_irqrestore(lock, flags);
163 return ret; 173 return ret;
164} 174}
175EXPORT_SYMBOL(atomic64_add_unless);
165 176
166static int init_atomic64_lock(void) 177static int init_atomic64_lock(void)
167{ 178{
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 708e2a86d87b..600f473a5610 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -45,12 +45,14 @@
45*/ 45*/
46 46
47 47
48#ifndef STATIC 48#ifdef STATIC
49#define PREBOOT
50#else
49#include <linux/decompress/bunzip2.h> 51#include <linux/decompress/bunzip2.h>
50#endif /* !STATIC */ 52#include <linux/slab.h>
53#endif /* STATIC */
51 54
52#include <linux/decompress/mm.h> 55#include <linux/decompress/mm.h>
53#include <linux/slab.h>
54 56
55#ifndef INT_MAX 57#ifndef INT_MAX
56#define INT_MAX 0x7fffffff 58#define INT_MAX 0x7fffffff
@@ -681,9 +683,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len,
681 set_error_fn(error_fn); 683 set_error_fn(error_fn);
682 if (flush) 684 if (flush)
683 outbuf = malloc(BZIP2_IOBUF_SIZE); 685 outbuf = malloc(BZIP2_IOBUF_SIZE);
684 else 686
685 len -= 4; /* Uncompressed size hack active in pre-boot
686 environment */
687 if (!outbuf) { 687 if (!outbuf) {
688 error("Could not allocate output bufer"); 688 error("Could not allocate output bufer");
689 return -1; 689 return -1;
@@ -733,4 +733,14 @@ exit_0:
733 return i; 733 return i;
734} 734}
735 735
736#define decompress bunzip2 736#ifdef PREBOOT
737STATIC int INIT decompress(unsigned char *buf, int len,
738 int(*fill)(void*, unsigned int),
739 int(*flush)(void*, unsigned int),
740 unsigned char *outbuf,
741 int *pos,
742 void(*error_fn)(char *x))
743{
744 return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error_fn);
745}
746#endif
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index e36b296fc9f8..68dfce59c1b8 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -19,13 +19,13 @@
19#include "zlib_inflate/inflate.h" 19#include "zlib_inflate/inflate.h"
20 20
21#include "zlib_inflate/infutil.h" 21#include "zlib_inflate/infutil.h"
22#include <linux/slab.h>
22 23
23#endif /* STATIC */ 24#endif /* STATIC */
24 25
25#include <linux/decompress/mm.h> 26#include <linux/decompress/mm.h>
26#include <linux/slab.h>
27 27
28#define INBUF_LEN (16*1024) 28#define GZIP_IOBUF_SIZE (16*1024)
29 29
30/* Included from initramfs et al code */ 30/* Included from initramfs et al code */
31STATIC int INIT gunzip(unsigned char *buf, int len, 31STATIC int INIT gunzip(unsigned char *buf, int len,
@@ -55,7 +55,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
55 if (buf) 55 if (buf)
56 zbuf = buf; 56 zbuf = buf;
57 else { 57 else {
58 zbuf = malloc(INBUF_LEN); 58 zbuf = malloc(GZIP_IOBUF_SIZE);
59 len = 0; 59 len = 0;
60 } 60 }
61 if (!zbuf) { 61 if (!zbuf) {
@@ -77,7 +77,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
77 } 77 }
78 78
79 if (len == 0) 79 if (len == 0)
80 len = fill(zbuf, INBUF_LEN); 80 len = fill(zbuf, GZIP_IOBUF_SIZE);
81 81
82 /* verify the gzip header */ 82 /* verify the gzip header */
83 if (len < 10 || 83 if (len < 10 ||
@@ -113,7 +113,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
113 while (rc == Z_OK) { 113 while (rc == Z_OK) {
114 if (strm->avail_in == 0) { 114 if (strm->avail_in == 0) {
115 /* TODO: handle case where both pos and fill are set */ 115 /* TODO: handle case where both pos and fill are set */
116 len = fill(zbuf, INBUF_LEN); 116 len = fill(zbuf, GZIP_IOBUF_SIZE);
117 if (len < 0) { 117 if (len < 0) {
118 rc = -1; 118 rc = -1;
119 error("read error"); 119 error("read error");
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
index 32123a1340e6..0b954e04bd30 100644
--- a/lib/decompress_unlzma.c
+++ b/lib/decompress_unlzma.c
@@ -29,12 +29,14 @@
29 *Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 29 *Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
30 */ 30 */
31 31
32#ifndef STATIC 32#ifdef STATIC
33#define PREBOOT
34#else
33#include <linux/decompress/unlzma.h> 35#include <linux/decompress/unlzma.h>
36#include <linux/slab.h>
34#endif /* STATIC */ 37#endif /* STATIC */
35 38
36#include <linux/decompress/mm.h> 39#include <linux/decompress/mm.h>
37#include <linux/slab.h>
38 40
39#define MIN(a, b) (((a) < (b)) ? (a) : (b)) 41#define MIN(a, b) (((a) < (b)) ? (a) : (b))
40 42
@@ -543,9 +545,7 @@ STATIC inline int INIT unlzma(unsigned char *buf, int in_len,
543 int ret = -1; 545 int ret = -1;
544 546
545 set_error_fn(error_fn); 547 set_error_fn(error_fn);
546 if (!flush) 548
547 in_len -= 4; /* Uncompressed size hack active in pre-boot
548 environment */
549 if (buf) 549 if (buf)
550 inbuf = buf; 550 inbuf = buf;
551 else 551 else
@@ -645,4 +645,15 @@ exit_0:
645 return ret; 645 return ret;
646} 646}
647 647
648#define decompress unlzma 648#ifdef PREBOOT
649STATIC int INIT decompress(unsigned char *buf, int in_len,
650 int(*fill)(void*, unsigned int),
651 int(*flush)(void*, unsigned int),
652 unsigned char *output,
653 int *posp,
654 void(*error_fn)(char *x)
655 )
656{
657 return unlzma(buf, in_len - 4, fill, flush, output, posp, error_fn);
658}
659#endif
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 3b93129a968c..65b0d99b6d0a 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -716,7 +716,7 @@ void dma_debug_init(u32 num_entries)
716 716
717 for (i = 0; i < HASH_SIZE; ++i) { 717 for (i = 0; i < HASH_SIZE; ++i) {
718 INIT_LIST_HEAD(&dma_entry_hash[i].list); 718 INIT_LIST_HEAD(&dma_entry_hash[i].list);
719 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED; 719 spin_lock_init(&dma_entry_hash[i].lock);
720 } 720 }
721 721
722 if (dma_debug_fs_init() != 0) { 722 if (dma_debug_fs_init() != 0) {
@@ -856,22 +856,21 @@ static void check_for_stack(struct device *dev, void *addr)
856 "stack [addr=%p]\n", addr); 856 "stack [addr=%p]\n", addr);
857} 857}
858 858
859static inline bool overlap(void *addr, u64 size, void *start, void *end) 859static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
860{ 860{
861 void *addr2 = (char *)addr + size; 861 unsigned long a1 = (unsigned long)addr;
862 unsigned long b1 = a1 + len;
863 unsigned long a2 = (unsigned long)start;
864 unsigned long b2 = (unsigned long)end;
862 865
863 return ((addr >= start && addr < end) || 866 return !(b1 <= a2 || a1 >= b2);
864 (addr2 >= start && addr2 < end) ||
865 ((addr < start) && (addr2 >= end)));
866} 867}
867 868
868static void check_for_illegal_area(struct device *dev, void *addr, u64 size) 869static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
869{ 870{
870 if (overlap(addr, size, _text, _etext) || 871 if (overlap(addr, len, _text, _etext) ||
871 overlap(addr, size, __start_rodata, __end_rodata)) 872 overlap(addr, len, __start_rodata, __end_rodata))
872 err_printk(dev, NULL, "DMA-API: device driver maps " 873 err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
873 "memory from kernel text or rodata "
874 "[addr=%p] [size=%llu]\n", addr, size);
875} 874}
876 875
877static void check_sync(struct device *dev, 876static void check_sync(struct device *dev,
@@ -969,7 +968,8 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
969 entry->type = dma_debug_single; 968 entry->type = dma_debug_single;
970 969
971 if (!PageHighMem(page)) { 970 if (!PageHighMem(page)) {
972 void *addr = ((char *)page_address(page)) + offset; 971 void *addr = page_address(page) + offset;
972
973 check_for_stack(dev, addr); 973 check_for_stack(dev, addr);
974 check_for_illegal_area(dev, addr, size); 974 check_for_illegal_area(dev, addr, size);
975 } 975 }
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 833139ce1e22..e22c148e4b7f 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -164,7 +164,7 @@ static void ddebug_change(const struct ddebug_query *query,
164 164
165 if (!newflags) 165 if (!newflags)
166 dt->num_enabled--; 166 dt->num_enabled--;
167 else if (!dp-flags) 167 else if (!dp->flags)
168 dt->num_enabled++; 168 dt->num_enabled++;
169 dp->flags = newflags; 169 dp->flags = newflags;
170 if (newflags) { 170 if (newflags) {
diff --git a/lib/flex_array.c b/lib/flex_array.c
new file mode 100644
index 000000000000..08f1636d296a
--- /dev/null
+++ b/lib/flex_array.c
@@ -0,0 +1,267 @@
1/*
2 * Flexible array managed in PAGE_SIZE parts
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2009
19 *
20 * Author: Dave Hansen <dave@linux.vnet.ibm.com>
21 */
22
23#include <linux/flex_array.h>
24#include <linux/slab.h>
25#include <linux/stddef.h>
26
27struct flex_array_part {
28 char elements[FLEX_ARRAY_PART_SIZE];
29};
30
31static inline int __elements_per_part(int element_size)
32{
33 return FLEX_ARRAY_PART_SIZE / element_size;
34}
35
36static inline int bytes_left_in_base(void)
37{
38 int element_offset = offsetof(struct flex_array, parts);
39 int bytes_left = FLEX_ARRAY_BASE_SIZE - element_offset;
40 return bytes_left;
41}
42
43static inline int nr_base_part_ptrs(void)
44{
45 return bytes_left_in_base() / sizeof(struct flex_array_part *);
46}
47
48/*
49 * If a user requests an allocation which is small
50 * enough, we may simply use the space in the
51 * flex_array->parts[] array to store the user
52 * data.
53 */
54static inline int elements_fit_in_base(struct flex_array *fa)
55{
56 int data_size = fa->element_size * fa->total_nr_elements;
57 if (data_size <= bytes_left_in_base())
58 return 1;
59 return 0;
60}
61
62/**
63 * flex_array_alloc - allocate a new flexible array
64 * @element_size: the size of individual elements in the array
65 * @total: total number of elements that this should hold
66 *
67 * Note: all locking must be provided by the caller.
68 *
69 * @total is used to size internal structures. If the user ever
70 * accesses any array indexes >=@total, it will produce errors.
71 *
72 * The maximum number of elements is defined as: the number of
73 * elements that can be stored in a page times the number of
74 * page pointers that we can fit in the base structure or (using
75 * integer math):
76 *
77 * (PAGE_SIZE/element_size) * (PAGE_SIZE-8)/sizeof(void *)
78 *
79 * Here's a table showing example capacities. Note that the maximum
80 * index that the get/put() functions is just nr_objects-1. This
81 * basically means that you get 4MB of storage on 32-bit and 2MB on
82 * 64-bit.
83 *
84 *
85 * Element size | Objects | Objects |
86 * PAGE_SIZE=4k | 32-bit | 64-bit |
87 * ---------------------------------|
88 * 1 bytes | 4186112 | 2093056 |
89 * 2 bytes | 2093056 | 1046528 |
90 * 3 bytes | 1395030 | 697515 |
91 * 4 bytes | 1046528 | 523264 |
92 * 32 bytes | 130816 | 65408 |
93 * 33 bytes | 126728 | 63364 |
94 * 2048 bytes | 2044 | 1022 |
95 * 2049 bytes | 1022 | 511 |
96 * void * | 1046528 | 261632 |
97 *
98 * Since 64-bit pointers are twice the size, we lose half the
99 * capacity in the base structure. Also note that no effort is made
100 * to efficiently pack objects across page boundaries.
101 */
102struct flex_array *flex_array_alloc(int element_size, int total, gfp_t flags)
103{
104 struct flex_array *ret;
105 int max_size = nr_base_part_ptrs() * __elements_per_part(element_size);
106
107 /* max_size will end up 0 if element_size > PAGE_SIZE */
108 if (total > max_size)
109 return NULL;
110 ret = kzalloc(sizeof(struct flex_array), flags);
111 if (!ret)
112 return NULL;
113 ret->element_size = element_size;
114 ret->total_nr_elements = total;
115 return ret;
116}
117
118static int fa_element_to_part_nr(struct flex_array *fa, int element_nr)
119{
120 return element_nr / __elements_per_part(fa->element_size);
121}
122
123/**
124 * flex_array_free_parts - just free the second-level pages
125 * @src: address of data to copy into the array
126 * @element_nr: index of the position in which to insert
127 * the new element.
128 *
129 * This is to be used in cases where the base 'struct flex_array'
130 * has been statically allocated and should not be free.
131 */
132void flex_array_free_parts(struct flex_array *fa)
133{
134 int part_nr;
135 int max_part = nr_base_part_ptrs();
136
137 if (elements_fit_in_base(fa))
138 return;
139 for (part_nr = 0; part_nr < max_part; part_nr++)
140 kfree(fa->parts[part_nr]);
141}
142
143void flex_array_free(struct flex_array *fa)
144{
145 flex_array_free_parts(fa);
146 kfree(fa);
147}
148
149static int fa_index_inside_part(struct flex_array *fa, int element_nr)
150{
151 return element_nr % __elements_per_part(fa->element_size);
152}
153
154static int index_inside_part(struct flex_array *fa, int element_nr)
155{
156 int part_offset = fa_index_inside_part(fa, element_nr);
157 return part_offset * fa->element_size;
158}
159
160static struct flex_array_part *
161__fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
162{
163 struct flex_array_part *part = fa->parts[part_nr];
164 if (!part) {
165 /*
166 * This leaves the part pages uninitialized
167 * and with potentially random data, just
168 * as if the user had kmalloc()'d the whole.
169 * __GFP_ZERO can be used to zero it.
170 */
171 part = kmalloc(FLEX_ARRAY_PART_SIZE, flags);
172 if (!part)
173 return NULL;
174 fa->parts[part_nr] = part;
175 }
176 return part;
177}
178
179/**
180 * flex_array_put - copy data into the array at @element_nr
181 * @src: address of data to copy into the array
182 * @element_nr: index of the position in which to insert
183 * the new element.
184 *
185 * Note that this *copies* the contents of @src into
186 * the array. If you are trying to store an array of
187 * pointers, make sure to pass in &ptr instead of ptr.
188 *
189 * Locking must be provided by the caller.
190 */
191int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags)
192{
193 int part_nr = fa_element_to_part_nr(fa, element_nr);
194 struct flex_array_part *part;
195 void *dst;
196
197 if (element_nr >= fa->total_nr_elements)
198 return -ENOSPC;
199 if (elements_fit_in_base(fa))
200 part = (struct flex_array_part *)&fa->parts[0];
201 else
202 part = __fa_get_part(fa, part_nr, flags);
203 if (!part)
204 return -ENOMEM;
205 dst = &part->elements[index_inside_part(fa, element_nr)];
206 memcpy(dst, src, fa->element_size);
207 return 0;
208}
209
210/**
211 * flex_array_prealloc - guarantee that array space exists
212 * @start: index of first array element for which space is allocated
213 * @end: index of last (inclusive) element for which space is allocated
214 *
215 * This will guarantee that no future calls to flex_array_put()
216 * will allocate memory. It can be used if you are expecting to
217 * be holding a lock or in some atomic context while writing
218 * data into the array.
219 *
220 * Locking must be provided by the caller.
221 */
222int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags)
223{
224 int start_part;
225 int end_part;
226 int part_nr;
227 struct flex_array_part *part;
228
229 if (start >= fa->total_nr_elements || end >= fa->total_nr_elements)
230 return -ENOSPC;
231 if (elements_fit_in_base(fa))
232 return 0;
233 start_part = fa_element_to_part_nr(fa, start);
234 end_part = fa_element_to_part_nr(fa, end);
235 for (part_nr = start_part; part_nr <= end_part; part_nr++) {
236 part = __fa_get_part(fa, part_nr, flags);
237 if (!part)
238 return -ENOMEM;
239 }
240 return 0;
241}
242
243/**
244 * flex_array_get - pull data back out of the array
245 * @element_nr: index of the element to fetch from the array
246 *
247 * Returns a pointer to the data at index @element_nr. Note
248 * that this is a copy of the data that was passed in. If you
249 * are using this to store pointers, you'll get back &ptr.
250 *
251 * Locking must be provided by the caller.
252 */
253void *flex_array_get(struct flex_array *fa, int element_nr)
254{
255 int part_nr = fa_element_to_part_nr(fa, element_nr);
256 struct flex_array_part *part;
257
258 if (element_nr >= fa->total_nr_elements)
259 return NULL;
260 if (!fa->parts[part_nr])
261 return NULL;
262 if (elements_fit_in_base(fa))
263 part = (struct flex_array_part *)&fa->parts[0];
264 else
265 part = fa->parts[part_nr];
266 return &part->elements[index_inside_part(fa, element_nr)];
267}
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index a295e404e908..0d475d8167bf 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -314,6 +314,7 @@ void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
314 miter->__sg = sgl; 314 miter->__sg = sgl;
315 miter->__nents = nents; 315 miter->__nents = nents;
316 miter->__offset = 0; 316 miter->__offset = 0;
317 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
317 miter->__flags = flags; 318 miter->__flags = flags;
318} 319}
319EXPORT_SYMBOL(sg_miter_start); 320EXPORT_SYMBOL(sg_miter_start);
@@ -394,6 +395,9 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
394 if (miter->addr) { 395 if (miter->addr) {
395 miter->__offset += miter->consumed; 396 miter->__offset += miter->consumed;
396 397
398 if (miter->__flags & SG_MITER_TO_SG)
399 flush_kernel_dcache_page(miter->page);
400
397 if (miter->__flags & SG_MITER_ATOMIC) { 401 if (miter->__flags & SG_MITER_ATOMIC) {
398 WARN_ON(!irqs_disabled()); 402 WARN_ON(!irqs_disabled());
399 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); 403 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
@@ -426,8 +430,14 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
426 unsigned int offset = 0; 430 unsigned int offset = 0;
427 struct sg_mapping_iter miter; 431 struct sg_mapping_iter miter;
428 unsigned long flags; 432 unsigned long flags;
433 unsigned int sg_flags = SG_MITER_ATOMIC;
434
435 if (to_buffer)
436 sg_flags |= SG_MITER_FROM_SG;
437 else
438 sg_flags |= SG_MITER_TO_SG;
429 439
430 sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC); 440 sg_miter_start(&miter, sgl, nents, sg_flags);
431 441
432 local_irq_save(flags); 442 local_irq_save(flags);
433 443
@@ -438,10 +448,8 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
438 448
439 if (to_buffer) 449 if (to_buffer)
440 memcpy(buf + offset, miter.addr, len); 450 memcpy(buf + offset, miter.addr, len);
441 else { 451 else
442 memcpy(miter.addr, buf + offset, len); 452 memcpy(miter.addr, buf + offset, len);
443 flush_kernel_dcache_page(miter.page);
444 }
445 453
446 offset += len; 454 offset += len;
447 } 455 }