diff options
-rw-r--r-- | fs/btrfs/Makefile | 2 | ||||
-rw-r--r-- | fs/btrfs/ulist.c | 220 | ||||
-rw-r--r-- | fs/btrfs/ulist.h | 68 |
3 files changed, 289 insertions, 1 deletions
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile index c0ddfd29c5e5..70798407b9a2 100644 --- a/fs/btrfs/Makefile +++ b/fs/btrfs/Makefile | |||
@@ -8,6 +8,6 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ | |||
8 | extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ | 8 | extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ |
9 | export.o tree-log.o free-space-cache.o zlib.o lzo.o \ | 9 | export.o tree-log.o free-space-cache.o zlib.o lzo.o \ |
10 | compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \ | 10 | compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \ |
11 | reada.o backref.o | 11 | reada.o backref.o ulist.o |
12 | 12 | ||
13 | btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o | 13 | btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o |
diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c new file mode 100644 index 000000000000..12f5147bd2b1 --- /dev/null +++ b/fs/btrfs/ulist.c | |||
@@ -0,0 +1,220 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011 STRATO AG | ||
3 | * written by Arne Jansen <sensille@gmx.net> | ||
4 | * Distributed under the GNU GPL license version 2. | ||
5 | */ | ||
6 | |||
7 | #include <linux/slab.h> | ||
8 | #include <linux/module.h> | ||
9 | #include "ulist.h" | ||
10 | |||
11 | /* | ||
12 | * ulist is a generic data structure to hold a collection of unique u64 | ||
13 | * values. The only operations it supports is adding to the list and | ||
14 | * enumerating it. | ||
15 | * It is possible to store an auxiliary value along with the key. | ||
16 | * | ||
17 | * The implementation is preliminary and can probably be sped up | ||
18 | * significantly. A first step would be to store the values in an rbtree | ||
19 | * as soon as ULIST_SIZE is exceeded. | ||
20 | * | ||
21 | * A sample usage for ulists is the enumeration of directed graphs without | ||
22 | * visiting a node twice. The pseudo-code could look like this: | ||
23 | * | ||
24 | * ulist = ulist_alloc(); | ||
25 | * ulist_add(ulist, root); | ||
26 | * elem = NULL; | ||
27 | * | ||
28 | * while ((elem = ulist_next(ulist, elem)) { | ||
29 | * for (all child nodes n in elem) | ||
30 | * ulist_add(ulist, n); | ||
31 | * do something useful with the node; | ||
32 | * } | ||
33 | * ulist_free(ulist); | ||
34 | * | ||
35 | * This assumes the graph nodes are adressable by u64. This stems from the | ||
36 | * usage for tree enumeration in btrfs, where the logical addresses are | ||
37 | * 64 bit. | ||
38 | * | ||
39 | * It is also useful for tree enumeration which could be done elegantly | ||
40 | * recursively, but is not possible due to kernel stack limitations. The | ||
41 | * loop would be similar to the above. | ||
42 | */ | ||
43 | |||
44 | /** | ||
45 | * ulist_init - freshly initialize a ulist | ||
46 | * @ulist: the ulist to initialize | ||
47 | * | ||
48 | * Note: don't use this function to init an already used ulist, use | ||
49 | * ulist_reinit instead. | ||
50 | */ | ||
51 | void ulist_init(struct ulist *ulist) | ||
52 | { | ||
53 | ulist->nnodes = 0; | ||
54 | ulist->nodes = ulist->int_nodes; | ||
55 | ulist->nodes_alloced = ULIST_SIZE; | ||
56 | } | ||
57 | EXPORT_SYMBOL(ulist_init); | ||
58 | |||
59 | /** | ||
60 | * ulist_fini - free up additionally allocated memory for the ulist | ||
61 | * @ulist: the ulist from which to free the additional memory | ||
62 | * | ||
63 | * This is useful in cases where the base 'struct ulist' has been statically | ||
64 | * allocated. | ||
65 | */ | ||
66 | void ulist_fini(struct ulist *ulist) | ||
67 | { | ||
68 | /* | ||
69 | * The first ULIST_SIZE elements are stored inline in struct ulist. | ||
70 | * Only if more elements are alocated they need to be freed. | ||
71 | */ | ||
72 | if (ulist->nodes_alloced > ULIST_SIZE) | ||
73 | kfree(ulist->nodes); | ||
74 | ulist->nodes_alloced = 0; /* in case ulist_fini is called twice */ | ||
75 | } | ||
76 | EXPORT_SYMBOL(ulist_fini); | ||
77 | |||
78 | /** | ||
79 | * ulist_reinit - prepare a ulist for reuse | ||
80 | * @ulist: ulist to be reused | ||
81 | * | ||
82 | * Free up all additional memory allocated for the list elements and reinit | ||
83 | * the ulist. | ||
84 | */ | ||
85 | void ulist_reinit(struct ulist *ulist) | ||
86 | { | ||
87 | ulist_fini(ulist); | ||
88 | ulist_init(ulist); | ||
89 | } | ||
90 | EXPORT_SYMBOL(ulist_reinit); | ||
91 | |||
92 | /** | ||
93 | * ulist_alloc - dynamically allocate a ulist | ||
94 | * @gfp_mask: allocation flags to for base allocation | ||
95 | * | ||
96 | * The allocated ulist will be returned in an initialized state. | ||
97 | */ | ||
98 | struct ulist *ulist_alloc(unsigned long gfp_mask) | ||
99 | { | ||
100 | struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask); | ||
101 | |||
102 | if (!ulist) | ||
103 | return NULL; | ||
104 | |||
105 | ulist_init(ulist); | ||
106 | |||
107 | return ulist; | ||
108 | } | ||
109 | EXPORT_SYMBOL(ulist_alloc); | ||
110 | |||
111 | /** | ||
112 | * ulist_free - free dynamically allocated ulist | ||
113 | * @ulist: ulist to free | ||
114 | * | ||
115 | * It is not necessary to call ulist_fini before. | ||
116 | */ | ||
117 | void ulist_free(struct ulist *ulist) | ||
118 | { | ||
119 | if (!ulist) | ||
120 | return; | ||
121 | ulist_fini(ulist); | ||
122 | kfree(ulist); | ||
123 | } | ||
124 | EXPORT_SYMBOL(ulist_free); | ||
125 | |||
126 | /** | ||
127 | * ulist_add - add an element to the ulist | ||
128 | * @ulist: ulist to add the element to | ||
129 | * @val: value to add to ulist | ||
130 | * @aux: auxiliary value to store along with val | ||
131 | * @gfp_mask: flags to use for allocation | ||
132 | * | ||
133 | * Note: locking must be provided by the caller. In case of rwlocks write | ||
134 | * locking is needed | ||
135 | * | ||
136 | * Add an element to a ulist. The @val will only be added if it doesn't | ||
137 | * already exist. If it is added, the auxiliary value @aux is stored along with | ||
138 | * it. In case @val already exists in the ulist, @aux is ignored, even if | ||
139 | * it differs from the already stored value. | ||
140 | * | ||
141 | * ulist_add returns 0 if @val already exists in ulist and 1 if @val has been | ||
142 | * inserted. | ||
143 | * In case of allocation failure -ENOMEM is returned and the ulist stays | ||
144 | * unaltered. | ||
145 | */ | ||
146 | int ulist_add(struct ulist *ulist, u64 val, unsigned long aux, | ||
147 | unsigned long gfp_mask) | ||
148 | { | ||
149 | int i; | ||
150 | |||
151 | for (i = 0; i < ulist->nnodes; ++i) { | ||
152 | if (ulist->nodes[i].val == val) | ||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | if (ulist->nnodes >= ulist->nodes_alloced) { | ||
157 | u64 new_alloced = ulist->nodes_alloced + 128; | ||
158 | struct ulist_node *new_nodes; | ||
159 | void *old = NULL; | ||
160 | |||
161 | /* | ||
162 | * if nodes_alloced == ULIST_SIZE no memory has been allocated | ||
163 | * yet, so pass NULL to krealloc | ||
164 | */ | ||
165 | if (ulist->nodes_alloced > ULIST_SIZE) | ||
166 | old = ulist->nodes; | ||
167 | |||
168 | new_nodes = krealloc(old, sizeof(*new_nodes) * new_alloced, | ||
169 | gfp_mask); | ||
170 | if (!new_nodes) | ||
171 | return -ENOMEM; | ||
172 | |||
173 | if (!old) | ||
174 | memcpy(new_nodes, ulist->int_nodes, | ||
175 | sizeof(ulist->int_nodes)); | ||
176 | |||
177 | ulist->nodes = new_nodes; | ||
178 | ulist->nodes_alloced = new_alloced; | ||
179 | } | ||
180 | ulist->nodes[ulist->nnodes].val = val; | ||
181 | ulist->nodes[ulist->nnodes].aux = aux; | ||
182 | ++ulist->nnodes; | ||
183 | |||
184 | return 1; | ||
185 | } | ||
186 | EXPORT_SYMBOL(ulist_add); | ||
187 | |||
188 | /** | ||
189 | * ulist_next - iterate ulist | ||
190 | * @ulist: ulist to iterate | ||
191 | * @prev: previously returned element or %NULL to start iteration | ||
192 | * | ||
193 | * Note: locking must be provided by the caller. In case of rwlocks only read | ||
194 | * locking is needed | ||
195 | * | ||
196 | * This function is used to iterate an ulist. The iteration is started with | ||
197 | * @prev = %NULL. It returns the next element from the ulist or %NULL when the | ||
198 | * end is reached. No guarantee is made with respect to the order in which | ||
199 | * the elements are returned. They might neither be returned in order of | ||
200 | * addition nor in ascending order. | ||
201 | * It is allowed to call ulist_add during an enumeration. Newly added items | ||
202 | * are guaranteed to show up in the running enumeration. | ||
203 | */ | ||
204 | struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_node *prev) | ||
205 | { | ||
206 | int next; | ||
207 | |||
208 | if (ulist->nnodes == 0) | ||
209 | return NULL; | ||
210 | |||
211 | if (!prev) | ||
212 | return &ulist->nodes[0]; | ||
213 | |||
214 | next = (prev - ulist->nodes) + 1; | ||
215 | if (next < 0 || next >= ulist->nnodes) | ||
216 | return NULL; | ||
217 | |||
218 | return &ulist->nodes[next]; | ||
219 | } | ||
220 | EXPORT_SYMBOL(ulist_next); | ||
diff --git a/fs/btrfs/ulist.h b/fs/btrfs/ulist.h new file mode 100644 index 000000000000..2e25dec58ec0 --- /dev/null +++ b/fs/btrfs/ulist.h | |||
@@ -0,0 +1,68 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011 STRATO AG | ||
3 | * written by Arne Jansen <sensille@gmx.net> | ||
4 | * Distributed under the GNU GPL license version 2. | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #ifndef __ULIST__ | ||
9 | #define __ULIST__ | ||
10 | |||
11 | /* | ||
12 | * ulist is a generic data structure to hold a collection of unique u64 | ||
13 | * values. The only operations it supports is adding to the list and | ||
14 | * enumerating it. | ||
15 | * It is possible to store an auxiliary value along with the key. | ||
16 | * | ||
17 | * The implementation is preliminary and can probably be sped up | ||
18 | * significantly. A first step would be to store the values in an rbtree | ||
19 | * as soon as ULIST_SIZE is exceeded. | ||
20 | */ | ||
21 | |||
22 | /* | ||
23 | * number of elements statically allocated inside struct ulist | ||
24 | */ | ||
25 | #define ULIST_SIZE 16 | ||
26 | |||
27 | /* | ||
28 | * element of the list | ||
29 | */ | ||
30 | struct ulist_node { | ||
31 | u64 val; /* value to store */ | ||
32 | unsigned long aux; /* auxiliary value saved along with the val */ | ||
33 | }; | ||
34 | |||
35 | struct ulist { | ||
36 | /* | ||
37 | * number of elements stored in list | ||
38 | */ | ||
39 | unsigned long nnodes; | ||
40 | |||
41 | /* | ||
42 | * number of nodes we already have room for | ||
43 | */ | ||
44 | unsigned long nodes_alloced; | ||
45 | |||
46 | /* | ||
47 | * pointer to the array storing the elements. The first ULIST_SIZE | ||
48 | * elements are stored inline. In this case the it points to int_nodes. | ||
49 | * After exceeding ULIST_SIZE, dynamic memory is allocated. | ||
50 | */ | ||
51 | struct ulist_node *nodes; | ||
52 | |||
53 | /* | ||
54 | * inline storage space for the first ULIST_SIZE entries | ||
55 | */ | ||
56 | struct ulist_node int_nodes[ULIST_SIZE]; | ||
57 | }; | ||
58 | |||
59 | void ulist_init(struct ulist *ulist); | ||
60 | void ulist_fini(struct ulist *ulist); | ||
61 | void ulist_reinit(struct ulist *ulist); | ||
62 | struct ulist *ulist_alloc(unsigned long gfp_mask); | ||
63 | void ulist_free(struct ulist *ulist); | ||
64 | int ulist_add(struct ulist *ulist, u64 val, unsigned long aux, | ||
65 | unsigned long gfp_mask); | ||
66 | struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_node *prev); | ||
67 | |||
68 | #endif | ||