aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2006-05-23 23:02:26 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2006-06-26 03:34:40 -0400
commitc7fc05992afcf1d63d6d5fb6142c8d39094dbca9 (patch)
tree201d72844c0b27269e34bf3172d579b9e556e10c /include
parent110bf1c0e932615cbe43a8af8a07bc3750ae4295 (diff)
[CRYPTO] api: Added cra_init/cra_exit
This patch adds the hooks cra_init/cra_exit which are called during a tfm's construction and destruction respectively. This will be used by the instances to allocate child tfm's. For now this lets us get rid of the coa_init/coa_exit functions which are used for exactly that purpose (unlike the dia_init function which is called for each transaction). In fact the coa_exit path is currently buggy as it may get called twice when an error is encountered during initialisation. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'include')
-rw-r--r--include/linux/crypto.h5
1 files changed, 3 insertions, 2 deletions
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index ef918803ec30..6c013c88080f 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -109,8 +109,6 @@ struct digest_alg {
109}; 109};
110 110
111struct compress_alg { 111struct compress_alg {
112 int (*coa_init)(struct crypto_tfm *tfm);
113 void (*coa_exit)(struct crypto_tfm *tfm);
114 int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, 112 int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src,
115 unsigned int slen, u8 *dst, unsigned int *dlen); 113 unsigned int slen, u8 *dst, unsigned int *dlen);
116 int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src, 114 int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src,
@@ -138,6 +136,9 @@ struct crypto_alg {
138 struct digest_alg digest; 136 struct digest_alg digest;
139 struct compress_alg compress; 137 struct compress_alg compress;
140 } cra_u; 138 } cra_u;
139
140 int (*cra_init)(struct crypto_tfm *tfm);
141 void (*cra_exit)(struct crypto_tfm *tfm);
141 142
142 struct module *cra_module; 143 struct module *cra_module;
143}; 144};
house <dwmw2@hera.kernel.org> 2007-08-27 16:49:44 -0400 Btrfs: Extent based page cache code. This uses an rbtree of extents and tests' href='/cgit/cgit.cgi/litmus-rt.git/commit/fs/btrfs/extent_map.c?h=v2.6.33.4&id=a52d9a8033c454cd9b4697cfafb467509fc1693f'>a52d9a8033c4
d1310b2e0cd9
a52d9a8033c4


a52d9a8033c4
9d2423c5c3fb






a52d9a8033c4

2bf5a725a3b8

d1310b2e0cd9
a52d9a8033c4




a52d9a8033c4
a52d9a8033c4


d397712bcc6a

d1310b2e0cd9
a52d9a8033c4
d397712bcc6a
a52d9a8033c4
d1310b2e0cd9


a52d9a8033c4


d1310b2e0cd9
a52d9a8033c4




d1310b2e0cd9
a52d9a8033c4





d352ac68148b



a52d9a8033c4
5f56406aabdf

a52d9a8033c4
d397712bcc6a
a52d9a8033c4
5f56406aabdf
d1310b2e0cd9

a52d9a8033c4
d397712bcc6a
d1310b2e0cd9
a52d9a8033c4


d1310b2e0cd9

a52d9a8033c4

d1310b2e0cd9
a52d9a8033c4



5f56406aabdf


d397712bcc6a
5f56406aabdf
d1310b2e0cd9
5f56406aabdf





d1310b2e0cd9
d397712bcc6a
5f56406aabdf
d1310b2e0cd9
5f56406aabdf

a52d9a8033c4
a52d9a8033c4


d352ac68148b
d1310b2e0cd9
a52d9a8033c4
7f3c74fb831f


c8b978188c9a






d1310b2e0cd9












a52d9a8033c4


a1ed835e1ab5









4eb3991c5def
a1ed835e1ab5






































9d2423c5c3fb







af901ca181d9
a52d9a8033c4




d1310b2e0cd9
a52d9a8033c4
7c2fe32a238e
a52d9a8033c4
7c2fe32a238e





d1310b2e0cd9
a52d9a8033c4
a52d9a8033c4






d1310b2e0cd9



c8b978188c9a
d1310b2e0cd9



a52d9a8033c4

d1310b2e0cd9




c8b978188c9a
d1310b2e0cd9



a52d9a8033c4
a52d9a8033c4

a52d9a8033c4
d352ac68148b
d1310b2e0cd9






9d2423c5c3fb









a52d9a8033c4

d1310b2e0cd9
a52d9a8033c4


306929f364b9



5f56406aabdf


d1310b2e0cd9
5f56406aabdf



d1310b2e0cd9
5f56406aabdf

a52d9a8033c4








d1310b2e0cd9





5f56406aabdf
a52d9a8033c4

a52d9a8033c4

a52d9a8033c4
9d2423c5c3fb
b917b7c3be50















































9d2423c5c3fb





a52d9a8033c4


d1310b2e0cd9
a52d9a8033c4
7f3c74fb831f
d1310b2e0cd9

a52d9a8033c4

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
                      
                      
                       

                           
                          

                       
 
                                           
 
                                
 


                                                                      

                               
                 

 
                          
 

                                                     

 







                                                                           
                                                                   

                                 
                                 
 
 







                                                             






                                                      
                      


                                 
 






                                                               

                                           

                       
                                             




                                                      
 


                                                                    

                                            
                                 
 
                    
                            


                                                                     


                                           
                                                         




                                            
                                                           





                                      



                                                                     
                                                                      

                                                               
 
                                          
                                    
                                         

                                             
 
                   
                                                                


                                   

                                         

                                          
                                                         



                                        


                                 
                                                                      
                                             
                                                                                





                                 
                                                                        
                                                            
                                             
                                                                                

                                 
         


                    
                                                                           
                                                                          
 


                                                       






                                                                












                                                                  


                 









                                                                        
                                           






































                                                                         







                                                                        
                                                          




                                                    
                                        
                           
                                 
 





                                                                
                                                              
                 






                                           



                                                                         
                                                          



                                                              

                 




                                                                 
                                            



                                                      
    

                   
 
                                                                         






                                        









                                                                           

                                                                      
                                                            


                                



                                        


                                                                 
                                                                  



                                                                
                                                                  

                                   








                                                           





                                                          
      

                              

                  
 
   















































                                                                           





                                                                          


                                                                              
                    
 
                                                          

                                           

                   
#include <linux/err.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/hardirq.h>
#include "extent_map.h"


static struct kmem_cache *extent_map_cache;

int __init extent_map_init(void)
{
	extent_map_cache = kmem_cache_create("extent_map",
			sizeof(struct extent_map), 0,
			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
	if (!extent_map_cache)
		return -ENOMEM;
	return 0;
}

void extent_map_exit(void)
{
	if (extent_map_cache)
		kmem_cache_destroy(extent_map_cache);
}

/**
 * extent_map_tree_init - initialize extent map tree
 * @tree:		tree to initialize
 * @mask:		flags for memory allocations during tree operations
 *
 * Initialize the extent tree @tree.  Should be called for each new inode
 * or other user of the extent_map interface.
 */
void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
{
	tree->map.rb_node = NULL;
	rwlock_init(&tree->lock);
}

/**
 * alloc_extent_map - allocate new extent map structure
 * @mask:	memory allocation flags
 *
 * Allocate a new extent_map structure.  The new structure is
 * returned with a reference count of one and needs to be
 * freed using free_extent_map()
 */
struct extent_map *alloc_extent_map(gfp_t mask)
{
	struct extent_map *em;
	em = kmem_cache_alloc(extent_map_cache, mask);
	if (!em || IS_ERR(em))
		return em;
	em->in_tree = 0;
	em->flags = 0;
	atomic_set(&em->refs, 1);
	return em;
}

/**
 * free_extent_map - drop reference count of an extent_map
 * @em:		extent map beeing releasead
 *
 * Drops the reference out on @em by one and free the structure
 * if the reference count hits zero.
 */
void free_extent_map(struct extent_map *em)
{
	if (!em)
		return;
	WARN_ON(atomic_read(&em->refs) == 0);
	if (atomic_dec_and_test(&em->refs)) {
		WARN_ON(em->in_tree);
		kmem_cache_free(extent_map_cache, em);
	}
}

static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
				   struct rb_node *node)
{
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent = NULL;
	struct extent_map *entry;

	while (*p) {
		parent = *p;
		entry = rb_entry(parent, struct extent_map, rb_node);

		WARN_ON(!entry->in_tree);

		if (offset < entry->start)
			p = &(*p)->rb_left;
		else if (offset >= extent_map_end(entry))
			p = &(*p)->rb_right;
		else
			return parent;
	}

	entry = rb_entry(node, struct extent_map, rb_node);
	entry->in_tree = 1;
	rb_link_node(node, parent, p);
	rb_insert_color(node, root);
	return NULL;
}

/*
 * search through the tree for an extent_map with a given offset.  If
 * it can't be found, try to find some neighboring extents
 */
static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
				     struct rb_node **prev_ret,
				     struct rb_node **next_ret)
{
	struct rb_node *n = root->rb_node;
	struct rb_node *prev = NULL;
	struct rb_node *orig_prev = NULL;
	struct extent_map *entry;
	struct extent_map *prev_entry = NULL;

	while (n) {
		entry = rb_entry(n, struct extent_map, rb_node);
		prev = n;
		prev_entry = entry;

		WARN_ON(!entry->in_tree);

		if (offset < entry->start)
			n = n->rb_left;
		else if (offset >= extent_map_end(entry))
			n = n->rb_right;
		else
			return n;
	}

	if (prev_ret) {
		orig_prev = prev;
		while (prev && offset >= extent_map_end(prev_entry)) {
			prev = rb_next(prev);
			prev_entry = rb_entry(prev, struct extent_map, rb_node);
		}
		*prev_ret = prev;
		prev = orig_prev;
	}

	if (next_ret) {
		prev_entry = rb_entry(prev, struct extent_map, rb_node);
		while (prev && offset < prev_entry->start) {
			prev = rb_prev(prev);
			prev_entry = rb_entry(prev, struct extent_map, rb_node);
		}
		*next_ret = prev;
	}
	return NULL;
}

/* check to see if two extent_map structs are adjacent and safe to merge */
static int mergable_maps(struct extent_map *prev, struct extent_map *next)
{
	if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
		return 0;

	/*
	 * don't merge compressed extents, we need to know their
	 * actual size
	 */
	if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
		return 0;

	if (extent_map_end(prev) == next->start &&
	    prev->flags == next->flags &&
	    prev->bdev == next->bdev &&
	    ((next->block_start == EXTENT_MAP_HOLE &&
	      prev->block_start == EXTENT_MAP_HOLE) ||
	     (next->block_start == EXTENT_MAP_INLINE &&
	      prev->block_start == EXTENT_MAP_INLINE) ||
	     (next->block_start == EXTENT_MAP_DELALLOC &&
	      prev->block_start == EXTENT_MAP_DELALLOC) ||
	     (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
	      next->block_start == extent_map_block_end(prev)))) {
		return 1;
	}
	return 0;
}

int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
{
	int ret = 0;
	struct extent_map *merge = NULL;
	struct rb_node *rb;
	struct extent_map *em;

	write_lock(&tree->lock);
	em = lookup_extent_mapping(tree, start, len);

	WARN_ON(!em || em->start != start);

	if (!em)
		goto out;

	clear_bit(EXTENT_FLAG_PINNED, &em->flags);

	if (em->start != 0) {
		rb = rb_prev(&em->rb_node);
		if (rb)
			merge = rb_entry(rb, struct extent_map, rb_node);
		if (rb && mergable_maps(merge, em)) {
			em->start = merge->start;
			em->len += merge->len;
			em->block_len += merge->block_len;
			em->block_start = merge->block_start;
			merge->in_tree = 0;
			rb_erase(&merge->rb_node, &tree->map);
			free_extent_map(merge);
		}
	}

	rb = rb_next(&em->rb_node);
	if (rb)
		merge = rb_entry(rb, struct extent_map, rb_node);
	if (rb && mergable_maps(em, merge)) {
		em->len += merge->len;
		em->block_len += merge->len;
		rb_erase(&merge->rb_node, &tree->map);
		merge->in_tree = 0;
		free_extent_map(merge);
	}

	free_extent_map(em);
out:
	write_unlock(&tree->lock);
	return ret;

}

/**
 * add_extent_mapping - add new extent map to the extent tree
 * @tree:	tree to insert new map in
 * @em:		map to insert
 *
 * Insert @em into @tree or perform a simple forward/backward merge with
 * existing mappings.  The extent_map struct passed in will be inserted
 * into the tree directly, with an additional reference taken, or a
 * reference dropped if the merge attempt was successfull.
 */
int add_extent_mapping(struct extent_map_tree *tree,
		       struct extent_map *em)
{
	int ret = 0;
	struct extent_map *merge = NULL;
	struct rb_node *rb;
	struct extent_map *exist;

	exist = lookup_extent_mapping(tree, em->start, em->len);
	if (exist) {
		free_extent_map(exist);
		ret = -EEXIST;
		goto out;
	}
	rb = tree_insert(&tree->map, em->start, &em->rb_node);
	if (rb) {
		ret = -EEXIST;
		goto out;
	}
	atomic_inc(&em->refs);
	if (em->start != 0) {
		rb = rb_prev(&em->rb_node);
		if (rb)
			merge = rb_entry(rb, struct extent_map, rb_node);
		if (rb && mergable_maps(merge, em)) {
			em->start = merge->start;
			em->len += merge->len;
			em->block_len += merge->block_len;
			em->block_start = merge->block_start;
			merge->in_tree = 0;
			rb_erase(&merge->rb_node, &tree->map);
			free_extent_map(merge);
		}
	 }
	rb = rb_next(&em->rb_node);
	if (rb)
		merge = rb_entry(rb, struct extent_map, rb_node);
	if (rb && mergable_maps(em, merge)) {
		em->len += merge->len;
		em->block_len += merge->len;
		rb_erase(&merge->rb_node, &tree->map);
		merge->in_tree = 0;
		free_extent_map(merge);
	}
out:
	return ret;
}

/* simple helper to do math around the end of an extent, handling wrap */
static u64 range_end(u64 start, u64 len)
{
	if (start + len < start)
		return (u64)-1;
	return start + len;
}

/**
 * lookup_extent_mapping - lookup extent_map
 * @tree:	tree to lookup in
 * @start:	byte offset to start the search
 * @len:	length of the lookup range
 *
 * Find and return the first extent_map struct in @tree that intersects the
 * [start, len] range.  There may be additional objects in the tree that
 * intersect, so check the object returned carefully to make sure that no
 * additional lookups are needed.
 */
struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
					 u64 start, u64 len)
{
	struct extent_map *em;
	struct rb_node *rb_node;
	struct rb_node *prev = NULL;
	struct rb_node *next = NULL;
	u64 end = range_end(start, len);

	rb_node = __tree_search(&tree->map, start, &prev, &next);
	if (!rb_node && prev) {
		em = rb_entry(prev, struct extent_map, rb_node);
		if (end > em->start && start < extent_map_end(em))
			goto found;
	}
	if (!rb_node && next) {
		em = rb_entry(next, struct extent_map, rb_node);
		if (end > em->start && start < extent_map_end(em))
			goto found;
	}
	if (!rb_node) {
		em = NULL;
		goto out;
	}
	if (IS_ERR(rb_node)) {
		em = ERR_PTR(PTR_ERR(rb_node));
		goto out;
	}
	em = rb_entry(rb_node, struct extent_map, rb_node);
	if (end > em->start && start < extent_map_end(em))
		goto found;

	em = NULL;
	goto out;

found:
	atomic_inc(&em->refs);
out:
	return em;
}

/**
 * search_extent_mapping - find a nearby extent map
 * @tree:	tree to lookup in
 * @start:	byte offset to start the search
 * @len:	length of the lookup range
 *
 * Find and return the first extent_map struct in @tree that intersects the
 * [start, len] range.
 *
 * If one can't be found, any nearby extent may be returned
 */
struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
					 u64 start, u64 len)
{
	struct extent_map *em;
	struct rb_node *rb_node;
	struct rb_node *prev = NULL;
	struct rb_node *next = NULL;

	rb_node = __tree_search(&tree->map, start, &prev, &next);
	if (!rb_node && prev) {
		em = rb_entry(prev, struct extent_map, rb_node);
		goto found;
	}
	if (!rb_node && next) {
		em = rb_entry(next, struct extent_map, rb_node);
		goto found;
	}
	if (!rb_node) {
		em = NULL;
		goto out;
	}
	if (IS_ERR(rb_node)) {
		em = ERR_PTR(PTR_ERR(rb_node));
		goto out;
	}
	em = rb_entry(rb_node, struct extent_map, rb_node);
	goto found;

	em = NULL;
	goto out;

found:
	atomic_inc(&em->refs);
out:
	return em;
}

/**
 * remove_extent_mapping - removes an extent_map from the extent tree
 * @tree:	extent tree to remove from
 * @em:		extent map beeing removed
 *
 * Removes @em from @tree.  No reference counts are dropped, and no checks
 * are done to see if the range is in use
 */
int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
{
	int ret = 0;

	WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
	rb_erase(&em->rb_node, &tree->map);
	em->in_tree = 0;
	return ret;
}