diff options
Diffstat (limited to 'fs/fscache')
-rw-r--r-- | fs/fscache/Kconfig | 56 | ||||
-rw-r--r-- | fs/fscache/Makefile | 19 | ||||
-rw-r--r-- | fs/fscache/cache.c | 415 | ||||
-rw-r--r-- | fs/fscache/cookie.c | 500 | ||||
-rw-r--r-- | fs/fscache/fsdef.c | 144 | ||||
-rw-r--r-- | fs/fscache/histogram.c | 109 | ||||
-rw-r--r-- | fs/fscache/internal.h | 380 | ||||
-rw-r--r-- | fs/fscache/main.c | 124 | ||||
-rw-r--r-- | fs/fscache/netfs.c | 103 | ||||
-rw-r--r-- | fs/fscache/object.c | 810 | ||||
-rw-r--r-- | fs/fscache/operation.c | 459 | ||||
-rw-r--r-- | fs/fscache/page.c | 816 | ||||
-rw-r--r-- | fs/fscache/proc.c | 68 | ||||
-rw-r--r-- | fs/fscache/stats.c | 212 |
14 files changed, 4215 insertions, 0 deletions
diff --git a/fs/fscache/Kconfig b/fs/fscache/Kconfig new file mode 100644 index 000000000000..9bbb8ce7bea0 --- /dev/null +++ b/fs/fscache/Kconfig | |||
@@ -0,0 +1,56 @@ | |||
1 | |||
2 | config FSCACHE | ||
3 | tristate "General filesystem local caching manager" | ||
4 | depends on EXPERIMENTAL | ||
5 | select SLOW_WORK | ||
6 | help | ||
7 | This option enables a generic filesystem caching manager that can be | ||
8 | used by various network and other filesystems to cache data locally. | ||
9 | Different sorts of caches can be plugged in, depending on the | ||
10 | resources available. | ||
11 | |||
12 | See Documentation/filesystems/caching/fscache.txt for more information. | ||
13 | |||
14 | config FSCACHE_STATS | ||
15 | bool "Gather statistical information on local caching" | ||
16 | depends on FSCACHE && PROC_FS | ||
17 | help | ||
18 | This option causes statistical information to be gathered on local | ||
19 | caching and exported through file: | ||
20 | |||
21 | /proc/fs/fscache/stats | ||
22 | |||
23 | The gathering of statistics adds a certain amount of overhead to | ||
24 | execution as there are a quite a few stats gathered, and on a | ||
25 | multi-CPU system these may be on cachelines that keep bouncing | ||
26 | between CPUs. On the other hand, the stats are very useful for | ||
27 | debugging purposes. Saying 'Y' here is recommended. | ||
28 | |||
29 | See Documentation/filesystems/caching/fscache.txt for more information. | ||
30 | |||
31 | config FSCACHE_HISTOGRAM | ||
32 | bool "Gather latency information on local caching" | ||
33 | depends on FSCACHE && PROC_FS | ||
34 | help | ||
35 | This option causes latency information to be gathered on local | ||
36 | caching and exported through file: | ||
37 | |||
38 | /proc/fs/fscache/histogram | ||
39 | |||
40 | The generation of this histogram adds a certain amount of overhead to | ||
41 | execution as there are a number of points at which data is gathered, | ||
42 | and on a multi-CPU system these may be on cachelines that keep | ||
43 | bouncing between CPUs. On the other hand, the histogram may be | ||
44 | useful for debugging purposes. Saying 'N' here is recommended. | ||
45 | |||
46 | See Documentation/filesystems/caching/fscache.txt for more information. | ||
47 | |||
48 | config FSCACHE_DEBUG | ||
49 | bool "Debug FS-Cache" | ||
50 | depends on FSCACHE | ||
51 | help | ||
52 | This permits debugging to be dynamically enabled in the local caching | ||
53 | management module. If this is set, the debugging output may be | ||
54 | enabled by setting bits in /sys/modules/fscache/parameter/debug. | ||
55 | |||
56 | See Documentation/filesystems/caching/fscache.txt for more information. | ||
diff --git a/fs/fscache/Makefile b/fs/fscache/Makefile new file mode 100644 index 000000000000..91571b95aacc --- /dev/null +++ b/fs/fscache/Makefile | |||
@@ -0,0 +1,19 @@ | |||
1 | # | ||
2 | # Makefile for general filesystem caching code | ||
3 | # | ||
4 | |||
5 | fscache-y := \ | ||
6 | cache.o \ | ||
7 | cookie.o \ | ||
8 | fsdef.o \ | ||
9 | main.o \ | ||
10 | netfs.o \ | ||
11 | object.o \ | ||
12 | operation.o \ | ||
13 | page.o | ||
14 | |||
15 | fscache-$(CONFIG_PROC_FS) += proc.o | ||
16 | fscache-$(CONFIG_FSCACHE_STATS) += stats.o | ||
17 | fscache-$(CONFIG_FSCACHE_HISTOGRAM) += histogram.o | ||
18 | |||
19 | obj-$(CONFIG_FSCACHE) := fscache.o | ||
diff --git a/fs/fscache/cache.c b/fs/fscache/cache.c new file mode 100644 index 000000000000..e21985bbb1fb --- /dev/null +++ b/fs/fscache/cache.c | |||
@@ -0,0 +1,415 @@ | |||
1 | /* FS-Cache cache handling | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #define FSCACHE_DEBUG_LEVEL CACHE | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include "internal.h" | ||
16 | |||
17 | LIST_HEAD(fscache_cache_list); | ||
18 | DECLARE_RWSEM(fscache_addremove_sem); | ||
19 | DECLARE_WAIT_QUEUE_HEAD(fscache_cache_cleared_wq); | ||
20 | EXPORT_SYMBOL(fscache_cache_cleared_wq); | ||
21 | |||
22 | static LIST_HEAD(fscache_cache_tag_list); | ||
23 | |||
24 | /* | ||
25 | * look up a cache tag | ||
26 | */ | ||
27 | struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *name) | ||
28 | { | ||
29 | struct fscache_cache_tag *tag, *xtag; | ||
30 | |||
31 | /* firstly check for the existence of the tag under read lock */ | ||
32 | down_read(&fscache_addremove_sem); | ||
33 | |||
34 | list_for_each_entry(tag, &fscache_cache_tag_list, link) { | ||
35 | if (strcmp(tag->name, name) == 0) { | ||
36 | atomic_inc(&tag->usage); | ||
37 | up_read(&fscache_addremove_sem); | ||
38 | return tag; | ||
39 | } | ||
40 | } | ||
41 | |||
42 | up_read(&fscache_addremove_sem); | ||
43 | |||
44 | /* the tag does not exist - create a candidate */ | ||
45 | xtag = kzalloc(sizeof(*xtag) + strlen(name) + 1, GFP_KERNEL); | ||
46 | if (!xtag) | ||
47 | /* return a dummy tag if out of memory */ | ||
48 | return ERR_PTR(-ENOMEM); | ||
49 | |||
50 | atomic_set(&xtag->usage, 1); | ||
51 | strcpy(xtag->name, name); | ||
52 | |||
53 | /* write lock, search again and add if still not present */ | ||
54 | down_write(&fscache_addremove_sem); | ||
55 | |||
56 | list_for_each_entry(tag, &fscache_cache_tag_list, link) { | ||
57 | if (strcmp(tag->name, name) == 0) { | ||
58 | atomic_inc(&tag->usage); | ||
59 | up_write(&fscache_addremove_sem); | ||
60 | kfree(xtag); | ||
61 | return tag; | ||
62 | } | ||
63 | } | ||
64 | |||
65 | list_add_tail(&xtag->link, &fscache_cache_tag_list); | ||
66 | up_write(&fscache_addremove_sem); | ||
67 | return xtag; | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * release a reference to a cache tag | ||
72 | */ | ||
73 | void __fscache_release_cache_tag(struct fscache_cache_tag *tag) | ||
74 | { | ||
75 | if (tag != ERR_PTR(-ENOMEM)) { | ||
76 | down_write(&fscache_addremove_sem); | ||
77 | |||
78 | if (atomic_dec_and_test(&tag->usage)) | ||
79 | list_del_init(&tag->link); | ||
80 | else | ||
81 | tag = NULL; | ||
82 | |||
83 | up_write(&fscache_addremove_sem); | ||
84 | |||
85 | kfree(tag); | ||
86 | } | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * select a cache in which to store an object | ||
91 | * - the cache addremove semaphore must be at least read-locked by the caller | ||
92 | * - the object will never be an index | ||
93 | */ | ||
94 | struct fscache_cache *fscache_select_cache_for_object( | ||
95 | struct fscache_cookie *cookie) | ||
96 | { | ||
97 | struct fscache_cache_tag *tag; | ||
98 | struct fscache_object *object; | ||
99 | struct fscache_cache *cache; | ||
100 | |||
101 | _enter(""); | ||
102 | |||
103 | if (list_empty(&fscache_cache_list)) { | ||
104 | _leave(" = NULL [no cache]"); | ||
105 | return NULL; | ||
106 | } | ||
107 | |||
108 | /* we check the parent to determine the cache to use */ | ||
109 | spin_lock(&cookie->lock); | ||
110 | |||
111 | /* the first in the parent's backing list should be the preferred | ||
112 | * cache */ | ||
113 | if (!hlist_empty(&cookie->backing_objects)) { | ||
114 | object = hlist_entry(cookie->backing_objects.first, | ||
115 | struct fscache_object, cookie_link); | ||
116 | |||
117 | cache = object->cache; | ||
118 | if (object->state >= FSCACHE_OBJECT_DYING || | ||
119 | test_bit(FSCACHE_IOERROR, &cache->flags)) | ||
120 | cache = NULL; | ||
121 | |||
122 | spin_unlock(&cookie->lock); | ||
123 | _leave(" = %p [parent]", cache); | ||
124 | return cache; | ||
125 | } | ||
126 | |||
127 | /* the parent is unbacked */ | ||
128 | if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) { | ||
129 | /* cookie not an index and is unbacked */ | ||
130 | spin_unlock(&cookie->lock); | ||
131 | _leave(" = NULL [cookie ub,ni]"); | ||
132 | return NULL; | ||
133 | } | ||
134 | |||
135 | spin_unlock(&cookie->lock); | ||
136 | |||
137 | if (!cookie->def->select_cache) | ||
138 | goto no_preference; | ||
139 | |||
140 | /* ask the netfs for its preference */ | ||
141 | tag = cookie->def->select_cache(cookie->parent->netfs_data, | ||
142 | cookie->netfs_data); | ||
143 | if (!tag) | ||
144 | goto no_preference; | ||
145 | |||
146 | if (tag == ERR_PTR(-ENOMEM)) { | ||
147 | _leave(" = NULL [nomem tag]"); | ||
148 | return NULL; | ||
149 | } | ||
150 | |||
151 | if (!tag->cache) { | ||
152 | _leave(" = NULL [unbacked tag]"); | ||
153 | return NULL; | ||
154 | } | ||
155 | |||
156 | if (test_bit(FSCACHE_IOERROR, &tag->cache->flags)) | ||
157 | return NULL; | ||
158 | |||
159 | _leave(" = %p [specific]", tag->cache); | ||
160 | return tag->cache; | ||
161 | |||
162 | no_preference: | ||
163 | /* netfs has no preference - just select first cache */ | ||
164 | cache = list_entry(fscache_cache_list.next, | ||
165 | struct fscache_cache, link); | ||
166 | _leave(" = %p [first]", cache); | ||
167 | return cache; | ||
168 | } | ||
169 | |||
170 | /** | ||
171 | * fscache_init_cache - Initialise a cache record | ||
172 | * @cache: The cache record to be initialised | ||
173 | * @ops: The cache operations to be installed in that record | ||
174 | * @idfmt: Format string to define identifier | ||
175 | * @...: sprintf-style arguments | ||
176 | * | ||
177 | * Initialise a record of a cache and fill in the name. | ||
178 | * | ||
179 | * See Documentation/filesystems/caching/backend-api.txt for a complete | ||
180 | * description. | ||
181 | */ | ||
182 | void fscache_init_cache(struct fscache_cache *cache, | ||
183 | const struct fscache_cache_ops *ops, | ||
184 | const char *idfmt, | ||
185 | ...) | ||
186 | { | ||
187 | va_list va; | ||
188 | |||
189 | memset(cache, 0, sizeof(*cache)); | ||
190 | |||
191 | cache->ops = ops; | ||
192 | |||
193 | va_start(va, idfmt); | ||
194 | vsnprintf(cache->identifier, sizeof(cache->identifier), idfmt, va); | ||
195 | va_end(va); | ||
196 | |||
197 | INIT_WORK(&cache->op_gc, fscache_operation_gc); | ||
198 | INIT_LIST_HEAD(&cache->link); | ||
199 | INIT_LIST_HEAD(&cache->object_list); | ||
200 | INIT_LIST_HEAD(&cache->op_gc_list); | ||
201 | spin_lock_init(&cache->object_list_lock); | ||
202 | spin_lock_init(&cache->op_gc_list_lock); | ||
203 | } | ||
204 | EXPORT_SYMBOL(fscache_init_cache); | ||
205 | |||
206 | /** | ||
207 | * fscache_add_cache - Declare a cache as being open for business | ||
208 | * @cache: The record describing the cache | ||
209 | * @ifsdef: The record of the cache object describing the top-level index | ||
210 | * @tagname: The tag describing this cache | ||
211 | * | ||
212 | * Add a cache to the system, making it available for netfs's to use. | ||
213 | * | ||
214 | * See Documentation/filesystems/caching/backend-api.txt for a complete | ||
215 | * description. | ||
216 | */ | ||
217 | int fscache_add_cache(struct fscache_cache *cache, | ||
218 | struct fscache_object *ifsdef, | ||
219 | const char *tagname) | ||
220 | { | ||
221 | struct fscache_cache_tag *tag; | ||
222 | |||
223 | BUG_ON(!cache->ops); | ||
224 | BUG_ON(!ifsdef); | ||
225 | |||
226 | cache->flags = 0; | ||
227 | ifsdef->event_mask = ULONG_MAX & ~(1 << FSCACHE_OBJECT_EV_CLEARED); | ||
228 | ifsdef->state = FSCACHE_OBJECT_ACTIVE; | ||
229 | |||
230 | if (!tagname) | ||
231 | tagname = cache->identifier; | ||
232 | |||
233 | BUG_ON(!tagname[0]); | ||
234 | |||
235 | _enter("{%s.%s},,%s", cache->ops->name, cache->identifier, tagname); | ||
236 | |||
237 | /* we use the cache tag to uniquely identify caches */ | ||
238 | tag = __fscache_lookup_cache_tag(tagname); | ||
239 | if (IS_ERR(tag)) | ||
240 | goto nomem; | ||
241 | |||
242 | if (test_and_set_bit(FSCACHE_TAG_RESERVED, &tag->flags)) | ||
243 | goto tag_in_use; | ||
244 | |||
245 | cache->kobj = kobject_create_and_add(tagname, fscache_root); | ||
246 | if (!cache->kobj) | ||
247 | goto error; | ||
248 | |||
249 | ifsdef->cookie = &fscache_fsdef_index; | ||
250 | ifsdef->cache = cache; | ||
251 | cache->fsdef = ifsdef; | ||
252 | |||
253 | down_write(&fscache_addremove_sem); | ||
254 | |||
255 | tag->cache = cache; | ||
256 | cache->tag = tag; | ||
257 | |||
258 | /* add the cache to the list */ | ||
259 | list_add(&cache->link, &fscache_cache_list); | ||
260 | |||
261 | /* add the cache's netfs definition index object to the cache's | ||
262 | * list */ | ||
263 | spin_lock(&cache->object_list_lock); | ||
264 | list_add_tail(&ifsdef->cache_link, &cache->object_list); | ||
265 | spin_unlock(&cache->object_list_lock); | ||
266 | |||
267 | /* add the cache's netfs definition index object to the top level index | ||
268 | * cookie as a known backing object */ | ||
269 | spin_lock(&fscache_fsdef_index.lock); | ||
270 | |||
271 | hlist_add_head(&ifsdef->cookie_link, | ||
272 | &fscache_fsdef_index.backing_objects); | ||
273 | |||
274 | atomic_inc(&fscache_fsdef_index.usage); | ||
275 | |||
276 | /* done */ | ||
277 | spin_unlock(&fscache_fsdef_index.lock); | ||
278 | up_write(&fscache_addremove_sem); | ||
279 | |||
280 | printk(KERN_NOTICE "FS-Cache: Cache \"%s\" added (type %s)\n", | ||
281 | cache->tag->name, cache->ops->name); | ||
282 | kobject_uevent(cache->kobj, KOBJ_ADD); | ||
283 | |||
284 | _leave(" = 0 [%s]", cache->identifier); | ||
285 | return 0; | ||
286 | |||
287 | tag_in_use: | ||
288 | printk(KERN_ERR "FS-Cache: Cache tag '%s' already in use\n", tagname); | ||
289 | __fscache_release_cache_tag(tag); | ||
290 | _leave(" = -EXIST"); | ||
291 | return -EEXIST; | ||
292 | |||
293 | error: | ||
294 | __fscache_release_cache_tag(tag); | ||
295 | _leave(" = -EINVAL"); | ||
296 | return -EINVAL; | ||
297 | |||
298 | nomem: | ||
299 | _leave(" = -ENOMEM"); | ||
300 | return -ENOMEM; | ||
301 | } | ||
302 | EXPORT_SYMBOL(fscache_add_cache); | ||
303 | |||
304 | /** | ||
305 | * fscache_io_error - Note a cache I/O error | ||
306 | * @cache: The record describing the cache | ||
307 | * | ||
308 | * Note that an I/O error occurred in a cache and that it should no longer be | ||
309 | * used for anything. This also reports the error into the kernel log. | ||
310 | * | ||
311 | * See Documentation/filesystems/caching/backend-api.txt for a complete | ||
312 | * description. | ||
313 | */ | ||
314 | void fscache_io_error(struct fscache_cache *cache) | ||
315 | { | ||
316 | set_bit(FSCACHE_IOERROR, &cache->flags); | ||
317 | |||
318 | printk(KERN_ERR "FS-Cache: Cache %s stopped due to I/O error\n", | ||
319 | cache->ops->name); | ||
320 | } | ||
321 | EXPORT_SYMBOL(fscache_io_error); | ||
322 | |||
323 | /* | ||
324 | * request withdrawal of all the objects in a cache | ||
325 | * - all the objects being withdrawn are moved onto the supplied list | ||
326 | */ | ||
327 | static void fscache_withdraw_all_objects(struct fscache_cache *cache, | ||
328 | struct list_head *dying_objects) | ||
329 | { | ||
330 | struct fscache_object *object; | ||
331 | |||
332 | spin_lock(&cache->object_list_lock); | ||
333 | |||
334 | while (!list_empty(&cache->object_list)) { | ||
335 | object = list_entry(cache->object_list.next, | ||
336 | struct fscache_object, cache_link); | ||
337 | list_move_tail(&object->cache_link, dying_objects); | ||
338 | |||
339 | _debug("withdraw %p", object->cookie); | ||
340 | |||
341 | spin_lock(&object->lock); | ||
342 | spin_unlock(&cache->object_list_lock); | ||
343 | fscache_raise_event(object, FSCACHE_OBJECT_EV_WITHDRAW); | ||
344 | spin_unlock(&object->lock); | ||
345 | |||
346 | cond_resched(); | ||
347 | spin_lock(&cache->object_list_lock); | ||
348 | } | ||
349 | |||
350 | spin_unlock(&cache->object_list_lock); | ||
351 | } | ||
352 | |||
353 | /** | ||
354 | * fscache_withdraw_cache - Withdraw a cache from the active service | ||
355 | * @cache: The record describing the cache | ||
356 | * | ||
357 | * Withdraw a cache from service, unbinding all its cache objects from the | ||
358 | * netfs cookies they're currently representing. | ||
359 | * | ||
360 | * See Documentation/filesystems/caching/backend-api.txt for a complete | ||
361 | * description. | ||
362 | */ | ||
363 | void fscache_withdraw_cache(struct fscache_cache *cache) | ||
364 | { | ||
365 | LIST_HEAD(dying_objects); | ||
366 | |||
367 | _enter(""); | ||
368 | |||
369 | printk(KERN_NOTICE "FS-Cache: Withdrawing cache \"%s\"\n", | ||
370 | cache->tag->name); | ||
371 | |||
372 | /* make the cache unavailable for cookie acquisition */ | ||
373 | if (test_and_set_bit(FSCACHE_CACHE_WITHDRAWN, &cache->flags)) | ||
374 | BUG(); | ||
375 | |||
376 | down_write(&fscache_addremove_sem); | ||
377 | list_del_init(&cache->link); | ||
378 | cache->tag->cache = NULL; | ||
379 | up_write(&fscache_addremove_sem); | ||
380 | |||
381 | /* make sure all pages pinned by operations on behalf of the netfs are | ||
382 | * written to disk */ | ||
383 | cache->ops->sync_cache(cache); | ||
384 | |||
385 | /* dissociate all the netfs pages backed by this cache from the block | ||
386 | * mappings in the cache */ | ||
387 | cache->ops->dissociate_pages(cache); | ||
388 | |||
389 | /* we now have to destroy all the active objects pertaining to this | ||
390 | * cache - which we do by passing them off to thread pool to be | ||
391 | * disposed of */ | ||
392 | _debug("destroy"); | ||
393 | |||
394 | fscache_withdraw_all_objects(cache, &dying_objects); | ||
395 | |||
396 | /* wait for all extant objects to finish their outstanding operations | ||
397 | * and go away */ | ||
398 | _debug("wait for finish"); | ||
399 | wait_event(fscache_cache_cleared_wq, | ||
400 | atomic_read(&cache->object_count) == 0); | ||
401 | _debug("wait for clearance"); | ||
402 | wait_event(fscache_cache_cleared_wq, | ||
403 | list_empty(&cache->object_list)); | ||
404 | _debug("cleared"); | ||
405 | ASSERT(list_empty(&dying_objects)); | ||
406 | |||
407 | kobject_put(cache->kobj); | ||
408 | |||
409 | clear_bit(FSCACHE_TAG_RESERVED, &cache->tag->flags); | ||
410 | fscache_release_cache_tag(cache->tag); | ||
411 | cache->tag = NULL; | ||
412 | |||
413 | _leave(""); | ||
414 | } | ||
415 | EXPORT_SYMBOL(fscache_withdraw_cache); | ||
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c new file mode 100644 index 000000000000..72fd18f6c71f --- /dev/null +++ b/fs/fscache/cookie.c | |||
@@ -0,0 +1,500 @@ | |||
1 | /* netfs cookie management | ||
2 | * | ||
3 | * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * See Documentation/filesystems/caching/netfs-api.txt for more information on | ||
12 | * the netfs API. | ||
13 | */ | ||
14 | |||
15 | #define FSCACHE_DEBUG_LEVEL COOKIE | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include "internal.h" | ||
19 | |||
20 | struct kmem_cache *fscache_cookie_jar; | ||
21 | |||
22 | static atomic_t fscache_object_debug_id = ATOMIC_INIT(0); | ||
23 | |||
24 | static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie); | ||
25 | static int fscache_alloc_object(struct fscache_cache *cache, | ||
26 | struct fscache_cookie *cookie); | ||
27 | static int fscache_attach_object(struct fscache_cookie *cookie, | ||
28 | struct fscache_object *object); | ||
29 | |||
30 | /* | ||
31 | * initialise an cookie jar slab element prior to any use | ||
32 | */ | ||
33 | void fscache_cookie_init_once(void *_cookie) | ||
34 | { | ||
35 | struct fscache_cookie *cookie = _cookie; | ||
36 | |||
37 | memset(cookie, 0, sizeof(*cookie)); | ||
38 | spin_lock_init(&cookie->lock); | ||
39 | INIT_HLIST_HEAD(&cookie->backing_objects); | ||
40 | } | ||
41 | |||
42 | /* | ||
43 | * request a cookie to represent an object (index, datafile, xattr, etc) | ||
44 | * - parent specifies the parent object | ||
45 | * - the top level index cookie for each netfs is stored in the fscache_netfs | ||
46 | * struct upon registration | ||
47 | * - def points to the definition | ||
48 | * - the netfs_data will be passed to the functions pointed to in *def | ||
49 | * - all attached caches will be searched to see if they contain this object | ||
50 | * - index objects aren't stored on disk until there's a dependent file that | ||
51 | * needs storing | ||
52 | * - other objects are stored in a selected cache immediately, and all the | ||
53 | * indices forming the path to it are instantiated if necessary | ||
54 | * - we never let on to the netfs about errors | ||
55 | * - we may set a negative cookie pointer, but that's okay | ||
56 | */ | ||
57 | struct fscache_cookie *__fscache_acquire_cookie( | ||
58 | struct fscache_cookie *parent, | ||
59 | const struct fscache_cookie_def *def, | ||
60 | void *netfs_data) | ||
61 | { | ||
62 | struct fscache_cookie *cookie; | ||
63 | |||
64 | BUG_ON(!def); | ||
65 | |||
66 | _enter("{%s},{%s},%p", | ||
67 | parent ? (char *) parent->def->name : "<no-parent>", | ||
68 | def->name, netfs_data); | ||
69 | |||
70 | fscache_stat(&fscache_n_acquires); | ||
71 | |||
72 | /* if there's no parent cookie, then we don't create one here either */ | ||
73 | if (!parent) { | ||
74 | fscache_stat(&fscache_n_acquires_null); | ||
75 | _leave(" [no parent]"); | ||
76 | return NULL; | ||
77 | } | ||
78 | |||
79 | /* validate the definition */ | ||
80 | BUG_ON(!def->get_key); | ||
81 | BUG_ON(!def->name[0]); | ||
82 | |||
83 | BUG_ON(def->type == FSCACHE_COOKIE_TYPE_INDEX && | ||
84 | parent->def->type != FSCACHE_COOKIE_TYPE_INDEX); | ||
85 | |||
86 | /* allocate and initialise a cookie */ | ||
87 | cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL); | ||
88 | if (!cookie) { | ||
89 | fscache_stat(&fscache_n_acquires_oom); | ||
90 | _leave(" [ENOMEM]"); | ||
91 | return NULL; | ||
92 | } | ||
93 | |||
94 | atomic_set(&cookie->usage, 1); | ||
95 | atomic_set(&cookie->n_children, 0); | ||
96 | |||
97 | atomic_inc(&parent->usage); | ||
98 | atomic_inc(&parent->n_children); | ||
99 | |||
100 | cookie->def = def; | ||
101 | cookie->parent = parent; | ||
102 | cookie->netfs_data = netfs_data; | ||
103 | cookie->flags = 0; | ||
104 | |||
105 | INIT_RADIX_TREE(&cookie->stores, GFP_NOFS); | ||
106 | |||
107 | switch (cookie->def->type) { | ||
108 | case FSCACHE_COOKIE_TYPE_INDEX: | ||
109 | fscache_stat(&fscache_n_cookie_index); | ||
110 | break; | ||
111 | case FSCACHE_COOKIE_TYPE_DATAFILE: | ||
112 | fscache_stat(&fscache_n_cookie_data); | ||
113 | break; | ||
114 | default: | ||
115 | fscache_stat(&fscache_n_cookie_special); | ||
116 | break; | ||
117 | } | ||
118 | |||
119 | /* if the object is an index then we need do nothing more here - we | ||
120 | * create indices on disk when we need them as an index may exist in | ||
121 | * multiple caches */ | ||
122 | if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) { | ||
123 | if (fscache_acquire_non_index_cookie(cookie) < 0) { | ||
124 | atomic_dec(&parent->n_children); | ||
125 | __fscache_cookie_put(cookie); | ||
126 | fscache_stat(&fscache_n_acquires_nobufs); | ||
127 | _leave(" = NULL"); | ||
128 | return NULL; | ||
129 | } | ||
130 | } | ||
131 | |||
132 | fscache_stat(&fscache_n_acquires_ok); | ||
133 | _leave(" = %p", cookie); | ||
134 | return cookie; | ||
135 | } | ||
136 | EXPORT_SYMBOL(__fscache_acquire_cookie); | ||
137 | |||
138 | /* | ||
139 | * acquire a non-index cookie | ||
140 | * - this must make sure the index chain is instantiated and instantiate the | ||
141 | * object representation too | ||
142 | */ | ||
143 | static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie) | ||
144 | { | ||
145 | struct fscache_object *object; | ||
146 | struct fscache_cache *cache; | ||
147 | uint64_t i_size; | ||
148 | int ret; | ||
149 | |||
150 | _enter(""); | ||
151 | |||
152 | cookie->flags = 1 << FSCACHE_COOKIE_UNAVAILABLE; | ||
153 | |||
154 | /* now we need to see whether the backing objects for this cookie yet | ||
155 | * exist, if not there'll be nothing to search */ | ||
156 | down_read(&fscache_addremove_sem); | ||
157 | |||
158 | if (list_empty(&fscache_cache_list)) { | ||
159 | up_read(&fscache_addremove_sem); | ||
160 | _leave(" = 0 [no caches]"); | ||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | /* select a cache in which to store the object */ | ||
165 | cache = fscache_select_cache_for_object(cookie->parent); | ||
166 | if (!cache) { | ||
167 | up_read(&fscache_addremove_sem); | ||
168 | fscache_stat(&fscache_n_acquires_no_cache); | ||
169 | _leave(" = -ENOMEDIUM [no cache]"); | ||
170 | return -ENOMEDIUM; | ||
171 | } | ||
172 | |||
173 | _debug("cache %s", cache->tag->name); | ||
174 | |||
175 | cookie->flags = | ||
176 | (1 << FSCACHE_COOKIE_LOOKING_UP) | | ||
177 | (1 << FSCACHE_COOKIE_CREATING) | | ||
178 | (1 << FSCACHE_COOKIE_NO_DATA_YET); | ||
179 | |||
180 | /* ask the cache to allocate objects for this cookie and its parent | ||
181 | * chain */ | ||
182 | ret = fscache_alloc_object(cache, cookie); | ||
183 | if (ret < 0) { | ||
184 | up_read(&fscache_addremove_sem); | ||
185 | _leave(" = %d", ret); | ||
186 | return ret; | ||
187 | } | ||
188 | |||
189 | /* pass on how big the object we're caching is supposed to be */ | ||
190 | cookie->def->get_attr(cookie->netfs_data, &i_size); | ||
191 | |||
192 | spin_lock(&cookie->lock); | ||
193 | if (hlist_empty(&cookie->backing_objects)) { | ||
194 | spin_unlock(&cookie->lock); | ||
195 | goto unavailable; | ||
196 | } | ||
197 | |||
198 | object = hlist_entry(cookie->backing_objects.first, | ||
199 | struct fscache_object, cookie_link); | ||
200 | |||
201 | fscache_set_store_limit(object, i_size); | ||
202 | |||
203 | /* initiate the process of looking up all the objects in the chain | ||
204 | * (done by fscache_initialise_object()) */ | ||
205 | fscache_enqueue_object(object); | ||
206 | |||
207 | spin_unlock(&cookie->lock); | ||
208 | |||
209 | /* we may be required to wait for lookup to complete at this point */ | ||
210 | if (!fscache_defer_lookup) { | ||
211 | _debug("non-deferred lookup %p", &cookie->flags); | ||
212 | wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP, | ||
213 | fscache_wait_bit, TASK_UNINTERRUPTIBLE); | ||
214 | _debug("complete"); | ||
215 | if (test_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags)) | ||
216 | goto unavailable; | ||
217 | } | ||
218 | |||
219 | up_read(&fscache_addremove_sem); | ||
220 | _leave(" = 0 [deferred]"); | ||
221 | return 0; | ||
222 | |||
223 | unavailable: | ||
224 | up_read(&fscache_addremove_sem); | ||
225 | _leave(" = -ENOBUFS"); | ||
226 | return -ENOBUFS; | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * recursively allocate cache object records for a cookie/cache combination | ||
231 | * - caller must be holding the addremove sem | ||
232 | */ | ||
233 | static int fscache_alloc_object(struct fscache_cache *cache, | ||
234 | struct fscache_cookie *cookie) | ||
235 | { | ||
236 | struct fscache_object *object; | ||
237 | struct hlist_node *_n; | ||
238 | int ret; | ||
239 | |||
240 | _enter("%p,%p{%s}", cache, cookie, cookie->def->name); | ||
241 | |||
242 | spin_lock(&cookie->lock); | ||
243 | hlist_for_each_entry(object, _n, &cookie->backing_objects, | ||
244 | cookie_link) { | ||
245 | if (object->cache == cache) | ||
246 | goto object_already_extant; | ||
247 | } | ||
248 | spin_unlock(&cookie->lock); | ||
249 | |||
250 | /* ask the cache to allocate an object (we may end up with duplicate | ||
251 | * objects at this stage, but we sort that out later) */ | ||
252 | object = cache->ops->alloc_object(cache, cookie); | ||
253 | if (IS_ERR(object)) { | ||
254 | fscache_stat(&fscache_n_object_no_alloc); | ||
255 | ret = PTR_ERR(object); | ||
256 | goto error; | ||
257 | } | ||
258 | |||
259 | fscache_stat(&fscache_n_object_alloc); | ||
260 | |||
261 | object->debug_id = atomic_inc_return(&fscache_object_debug_id); | ||
262 | |||
263 | _debug("ALLOC OBJ%x: %s {%lx}", | ||
264 | object->debug_id, cookie->def->name, object->events); | ||
265 | |||
266 | ret = fscache_alloc_object(cache, cookie->parent); | ||
267 | if (ret < 0) | ||
268 | goto error_put; | ||
269 | |||
270 | /* only attach if we managed to allocate all we needed, otherwise | ||
271 | * discard the object we just allocated and instead use the one | ||
272 | * attached to the cookie */ | ||
273 | if (fscache_attach_object(cookie, object) < 0) | ||
274 | cache->ops->put_object(object); | ||
275 | |||
276 | _leave(" = 0"); | ||
277 | return 0; | ||
278 | |||
279 | object_already_extant: | ||
280 | ret = -ENOBUFS; | ||
281 | if (object->state >= FSCACHE_OBJECT_DYING) { | ||
282 | spin_unlock(&cookie->lock); | ||
283 | goto error; | ||
284 | } | ||
285 | spin_unlock(&cookie->lock); | ||
286 | _leave(" = 0 [found]"); | ||
287 | return 0; | ||
288 | |||
289 | error_put: | ||
290 | cache->ops->put_object(object); | ||
291 | error: | ||
292 | _leave(" = %d", ret); | ||
293 | return ret; | ||
294 | } | ||
295 | |||
296 | /* | ||
297 | * attach a cache object to a cookie | ||
298 | */ | ||
299 | static int fscache_attach_object(struct fscache_cookie *cookie, | ||
300 | struct fscache_object *object) | ||
301 | { | ||
302 | struct fscache_object *p; | ||
303 | struct fscache_cache *cache = object->cache; | ||
304 | struct hlist_node *_n; | ||
305 | int ret; | ||
306 | |||
307 | _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id); | ||
308 | |||
309 | spin_lock(&cookie->lock); | ||
310 | |||
311 | /* there may be multiple initial creations of this object, but we only | ||
312 | * want one */ | ||
313 | ret = -EEXIST; | ||
314 | hlist_for_each_entry(p, _n, &cookie->backing_objects, cookie_link) { | ||
315 | if (p->cache == object->cache) { | ||
316 | if (p->state >= FSCACHE_OBJECT_DYING) | ||
317 | ret = -ENOBUFS; | ||
318 | goto cant_attach_object; | ||
319 | } | ||
320 | } | ||
321 | |||
322 | /* pin the parent object */ | ||
323 | spin_lock_nested(&cookie->parent->lock, 1); | ||
324 | hlist_for_each_entry(p, _n, &cookie->parent->backing_objects, | ||
325 | cookie_link) { | ||
326 | if (p->cache == object->cache) { | ||
327 | if (p->state >= FSCACHE_OBJECT_DYING) { | ||
328 | ret = -ENOBUFS; | ||
329 | spin_unlock(&cookie->parent->lock); | ||
330 | goto cant_attach_object; | ||
331 | } | ||
332 | object->parent = p; | ||
333 | spin_lock(&p->lock); | ||
334 | p->n_children++; | ||
335 | spin_unlock(&p->lock); | ||
336 | break; | ||
337 | } | ||
338 | } | ||
339 | spin_unlock(&cookie->parent->lock); | ||
340 | |||
341 | /* attach to the cache's object list */ | ||
342 | if (list_empty(&object->cache_link)) { | ||
343 | spin_lock(&cache->object_list_lock); | ||
344 | list_add(&object->cache_link, &cache->object_list); | ||
345 | spin_unlock(&cache->object_list_lock); | ||
346 | } | ||
347 | |||
348 | /* attach to the cookie */ | ||
349 | object->cookie = cookie; | ||
350 | atomic_inc(&cookie->usage); | ||
351 | hlist_add_head(&object->cookie_link, &cookie->backing_objects); | ||
352 | ret = 0; | ||
353 | |||
354 | cant_attach_object: | ||
355 | spin_unlock(&cookie->lock); | ||
356 | _leave(" = %d", ret); | ||
357 | return ret; | ||
358 | } | ||
359 | |||
360 | /* | ||
361 | * update the index entries backing a cookie | ||
362 | */ | ||
363 | void __fscache_update_cookie(struct fscache_cookie *cookie) | ||
364 | { | ||
365 | struct fscache_object *object; | ||
366 | struct hlist_node *_p; | ||
367 | |||
368 | fscache_stat(&fscache_n_updates); | ||
369 | |||
370 | if (!cookie) { | ||
371 | fscache_stat(&fscache_n_updates_null); | ||
372 | _leave(" [no cookie]"); | ||
373 | return; | ||
374 | } | ||
375 | |||
376 | _enter("{%s}", cookie->def->name); | ||
377 | |||
378 | BUG_ON(!cookie->def->get_aux); | ||
379 | |||
380 | spin_lock(&cookie->lock); | ||
381 | |||
382 | /* update the index entry on disk in each cache backing this cookie */ | ||
383 | hlist_for_each_entry(object, _p, | ||
384 | &cookie->backing_objects, cookie_link) { | ||
385 | fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE); | ||
386 | } | ||
387 | |||
388 | spin_unlock(&cookie->lock); | ||
389 | _leave(""); | ||
390 | } | ||
391 | EXPORT_SYMBOL(__fscache_update_cookie); | ||
392 | |||
393 | /* | ||
394 | * release a cookie back to the cache | ||
395 | * - the object will be marked as recyclable on disk if retire is true | ||
396 | * - all dependents of this cookie must have already been unregistered | ||
397 | * (indices/files/pages) | ||
398 | */ | ||
399 | void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) | ||
400 | { | ||
401 | struct fscache_cache *cache; | ||
402 | struct fscache_object *object; | ||
403 | unsigned long event; | ||
404 | |||
405 | fscache_stat(&fscache_n_relinquishes); | ||
406 | |||
407 | if (!cookie) { | ||
408 | fscache_stat(&fscache_n_relinquishes_null); | ||
409 | _leave(" [no cookie]"); | ||
410 | return; | ||
411 | } | ||
412 | |||
413 | _enter("%p{%s,%p},%d", | ||
414 | cookie, cookie->def->name, cookie->netfs_data, retire); | ||
415 | |||
416 | if (atomic_read(&cookie->n_children) != 0) { | ||
417 | printk(KERN_ERR "FS-Cache: Cookie '%s' still has children\n", | ||
418 | cookie->def->name); | ||
419 | BUG(); | ||
420 | } | ||
421 | |||
422 | /* wait for the cookie to finish being instantiated (or to fail) */ | ||
423 | if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) { | ||
424 | fscache_stat(&fscache_n_relinquishes_waitcrt); | ||
425 | wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING, | ||
426 | fscache_wait_bit, TASK_UNINTERRUPTIBLE); | ||
427 | } | ||
428 | |||
429 | event = retire ? FSCACHE_OBJECT_EV_RETIRE : FSCACHE_OBJECT_EV_RELEASE; | ||
430 | |||
431 | /* detach pointers back to the netfs */ | ||
432 | spin_lock(&cookie->lock); | ||
433 | |||
434 | cookie->netfs_data = NULL; | ||
435 | cookie->def = NULL; | ||
436 | |||
437 | /* break links with all the active objects */ | ||
438 | while (!hlist_empty(&cookie->backing_objects)) { | ||
439 | object = hlist_entry(cookie->backing_objects.first, | ||
440 | struct fscache_object, | ||
441 | cookie_link); | ||
442 | |||
443 | _debug("RELEASE OBJ%x", object->debug_id); | ||
444 | |||
445 | /* detach each cache object from the object cookie */ | ||
446 | spin_lock(&object->lock); | ||
447 | hlist_del_init(&object->cookie_link); | ||
448 | |||
449 | cache = object->cache; | ||
450 | object->cookie = NULL; | ||
451 | fscache_raise_event(object, event); | ||
452 | spin_unlock(&object->lock); | ||
453 | |||
454 | if (atomic_dec_and_test(&cookie->usage)) | ||
455 | /* the cookie refcount shouldn't be reduced to 0 yet */ | ||
456 | BUG(); | ||
457 | } | ||
458 | |||
459 | spin_unlock(&cookie->lock); | ||
460 | |||
461 | if (cookie->parent) { | ||
462 | ASSERTCMP(atomic_read(&cookie->parent->usage), >, 0); | ||
463 | ASSERTCMP(atomic_read(&cookie->parent->n_children), >, 0); | ||
464 | atomic_dec(&cookie->parent->n_children); | ||
465 | } | ||
466 | |||
467 | /* finally dispose of the cookie */ | ||
468 | ASSERTCMP(atomic_read(&cookie->usage), >, 0); | ||
469 | fscache_cookie_put(cookie); | ||
470 | |||
471 | _leave(""); | ||
472 | } | ||
473 | EXPORT_SYMBOL(__fscache_relinquish_cookie); | ||
474 | |||
475 | /* | ||
476 | * destroy a cookie | ||
477 | */ | ||
478 | void __fscache_cookie_put(struct fscache_cookie *cookie) | ||
479 | { | ||
480 | struct fscache_cookie *parent; | ||
481 | |||
482 | _enter("%p", cookie); | ||
483 | |||
484 | for (;;) { | ||
485 | _debug("FREE COOKIE %p", cookie); | ||
486 | parent = cookie->parent; | ||
487 | BUG_ON(!hlist_empty(&cookie->backing_objects)); | ||
488 | kmem_cache_free(fscache_cookie_jar, cookie); | ||
489 | |||
490 | if (!parent) | ||
491 | break; | ||
492 | |||
493 | cookie = parent; | ||
494 | BUG_ON(atomic_read(&cookie->usage) <= 0); | ||
495 | if (!atomic_dec_and_test(&cookie->usage)) | ||
496 | break; | ||
497 | } | ||
498 | |||
499 | _leave(""); | ||
500 | } | ||
diff --git a/fs/fscache/fsdef.c b/fs/fscache/fsdef.c new file mode 100644 index 000000000000..f5b4baee7352 --- /dev/null +++ b/fs/fscache/fsdef.c | |||
@@ -0,0 +1,144 @@ | |||
1 | /* Filesystem index definition | ||
2 | * | ||
3 | * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #define FSCACHE_DEBUG_LEVEL CACHE | ||
13 | #include <linux/module.h> | ||
14 | #include "internal.h" | ||
15 | |||
16 | static uint16_t fscache_fsdef_netfs_get_key(const void *cookie_netfs_data, | ||
17 | void *buffer, uint16_t bufmax); | ||
18 | |||
19 | static uint16_t fscache_fsdef_netfs_get_aux(const void *cookie_netfs_data, | ||
20 | void *buffer, uint16_t bufmax); | ||
21 | |||
22 | static | ||
23 | enum fscache_checkaux fscache_fsdef_netfs_check_aux(void *cookie_netfs_data, | ||
24 | const void *data, | ||
25 | uint16_t datalen); | ||
26 | |||
27 | /* | ||
28 | * The root index is owned by FS-Cache itself. | ||
29 | * | ||
30 | * When a netfs requests caching facilities, FS-Cache will, if one doesn't | ||
31 | * already exist, create an entry in the root index with the key being the name | ||
32 | * of the netfs ("AFS" for example), and the auxiliary data holding the index | ||
33 | * structure version supplied by the netfs: | ||
34 | * | ||
35 | * FSDEF | ||
36 | * | | ||
37 | * +-----------+ | ||
38 | * | | | ||
39 | * NFS AFS | ||
40 | * [v=1] [v=1] | ||
41 | * | ||
42 | * If an entry with the appropriate name does already exist, the version is | ||
43 | * compared. If the version is different, the entire subtree from that entry | ||
44 | * will be discarded and a new entry created. | ||
45 | * | ||
46 | * The new entry will be an index, and a cookie referring to it will be passed | ||
47 | * to the netfs. This is then the root handle by which the netfs accesses the | ||
48 | * cache. It can create whatever objects it likes in that index, including | ||
49 | * further indices. | ||
50 | */ | ||
51 | static struct fscache_cookie_def fscache_fsdef_index_def = { | ||
52 | .name = ".FS-Cache", | ||
53 | .type = FSCACHE_COOKIE_TYPE_INDEX, | ||
54 | }; | ||
55 | |||
56 | struct fscache_cookie fscache_fsdef_index = { | ||
57 | .usage = ATOMIC_INIT(1), | ||
58 | .lock = __SPIN_LOCK_UNLOCKED(fscache_fsdef_index.lock), | ||
59 | .backing_objects = HLIST_HEAD_INIT, | ||
60 | .def = &fscache_fsdef_index_def, | ||
61 | }; | ||
62 | EXPORT_SYMBOL(fscache_fsdef_index); | ||
63 | |||
64 | /* | ||
65 | * Definition of an entry in the root index. Each entry is an index, keyed to | ||
66 | * a specific netfs and only applicable to a particular version of the index | ||
67 | * structure used by that netfs. | ||
68 | */ | ||
69 | struct fscache_cookie_def fscache_fsdef_netfs_def = { | ||
70 | .name = "FSDEF.netfs", | ||
71 | .type = FSCACHE_COOKIE_TYPE_INDEX, | ||
72 | .get_key = fscache_fsdef_netfs_get_key, | ||
73 | .get_aux = fscache_fsdef_netfs_get_aux, | ||
74 | .check_aux = fscache_fsdef_netfs_check_aux, | ||
75 | }; | ||
76 | |||
77 | /* | ||
78 | * get the key data for an FSDEF index record - this is the name of the netfs | ||
79 | * for which this entry is created | ||
80 | */ | ||
81 | static uint16_t fscache_fsdef_netfs_get_key(const void *cookie_netfs_data, | ||
82 | void *buffer, uint16_t bufmax) | ||
83 | { | ||
84 | const struct fscache_netfs *netfs = cookie_netfs_data; | ||
85 | unsigned klen; | ||
86 | |||
87 | _enter("{%s.%u},", netfs->name, netfs->version); | ||
88 | |||
89 | klen = strlen(netfs->name); | ||
90 | if (klen > bufmax) | ||
91 | return 0; | ||
92 | |||
93 | memcpy(buffer, netfs->name, klen); | ||
94 | return klen; | ||
95 | } | ||
96 | |||
97 | /* | ||
98 | * get the auxiliary data for an FSDEF index record - this is the index | ||
99 | * structure version number of the netfs for which this version is created | ||
100 | */ | ||
101 | static uint16_t fscache_fsdef_netfs_get_aux(const void *cookie_netfs_data, | ||
102 | void *buffer, uint16_t bufmax) | ||
103 | { | ||
104 | const struct fscache_netfs *netfs = cookie_netfs_data; | ||
105 | unsigned dlen; | ||
106 | |||
107 | _enter("{%s.%u},", netfs->name, netfs->version); | ||
108 | |||
109 | dlen = sizeof(uint32_t); | ||
110 | if (dlen > bufmax) | ||
111 | return 0; | ||
112 | |||
113 | memcpy(buffer, &netfs->version, dlen); | ||
114 | return dlen; | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * check that the index structure version number stored in the auxiliary data | ||
119 | * matches the one the netfs gave us | ||
120 | */ | ||
121 | static enum fscache_checkaux fscache_fsdef_netfs_check_aux( | ||
122 | void *cookie_netfs_data, | ||
123 | const void *data, | ||
124 | uint16_t datalen) | ||
125 | { | ||
126 | struct fscache_netfs *netfs = cookie_netfs_data; | ||
127 | uint32_t version; | ||
128 | |||
129 | _enter("{%s},,%hu", netfs->name, datalen); | ||
130 | |||
131 | if (datalen != sizeof(version)) { | ||
132 | _leave(" = OBSOLETE [dl=%d v=%zu]", datalen, sizeof(version)); | ||
133 | return FSCACHE_CHECKAUX_OBSOLETE; | ||
134 | } | ||
135 | |||
136 | memcpy(&version, data, sizeof(version)); | ||
137 | if (version != netfs->version) { | ||
138 | _leave(" = OBSOLETE [ver=%x net=%x]", version, netfs->version); | ||
139 | return FSCACHE_CHECKAUX_OBSOLETE; | ||
140 | } | ||
141 | |||
142 | _leave(" = OKAY"); | ||
143 | return FSCACHE_CHECKAUX_OKAY; | ||
144 | } | ||
diff --git a/fs/fscache/histogram.c b/fs/fscache/histogram.c new file mode 100644 index 000000000000..bad496748a59 --- /dev/null +++ b/fs/fscache/histogram.c | |||
@@ -0,0 +1,109 @@ | |||
1 | /* FS-Cache latency histogram | ||
2 | * | ||
3 | * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #define FSCACHE_DEBUG_LEVEL THREAD | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/proc_fs.h> | ||
15 | #include <linux/seq_file.h> | ||
16 | #include "internal.h" | ||
17 | |||
18 | atomic_t fscache_obj_instantiate_histogram[HZ]; | ||
19 | atomic_t fscache_objs_histogram[HZ]; | ||
20 | atomic_t fscache_ops_histogram[HZ]; | ||
21 | atomic_t fscache_retrieval_delay_histogram[HZ]; | ||
22 | atomic_t fscache_retrieval_histogram[HZ]; | ||
23 | |||
24 | /* | ||
25 | * display the time-taken histogram | ||
26 | */ | ||
27 | static int fscache_histogram_show(struct seq_file *m, void *v) | ||
28 | { | ||
29 | unsigned long index; | ||
30 | unsigned n[5], t; | ||
31 | |||
32 | switch ((unsigned long) v) { | ||
33 | case 1: | ||
34 | seq_puts(m, "JIFS SECS OBJ INST OP RUNS OBJ RUNS " | ||
35 | " RETRV DLY RETRIEVLS\n"); | ||
36 | return 0; | ||
37 | case 2: | ||
38 | seq_puts(m, "===== ===== ========= ========= =========" | ||
39 | " ========= =========\n"); | ||
40 | return 0; | ||
41 | default: | ||
42 | index = (unsigned long) v - 3; | ||
43 | n[0] = atomic_read(&fscache_obj_instantiate_histogram[index]); | ||
44 | n[1] = atomic_read(&fscache_ops_histogram[index]); | ||
45 | n[2] = atomic_read(&fscache_objs_histogram[index]); | ||
46 | n[3] = atomic_read(&fscache_retrieval_delay_histogram[index]); | ||
47 | n[4] = atomic_read(&fscache_retrieval_histogram[index]); | ||
48 | if (!(n[0] | n[1] | n[2] | n[3] | n[4])) | ||
49 | return 0; | ||
50 | |||
51 | t = (index * 1000) / HZ; | ||
52 | |||
53 | seq_printf(m, "%4lu 0.%03u %9u %9u %9u %9u %9u\n", | ||
54 | index, t, n[0], n[1], n[2], n[3], n[4]); | ||
55 | return 0; | ||
56 | } | ||
57 | } | ||
58 | |||
59 | /* | ||
60 | * set up the iterator to start reading from the first line | ||
61 | */ | ||
62 | static void *fscache_histogram_start(struct seq_file *m, loff_t *_pos) | ||
63 | { | ||
64 | if ((unsigned long long)*_pos >= HZ + 2) | ||
65 | return NULL; | ||
66 | if (*_pos == 0) | ||
67 | *_pos = 1; | ||
68 | return (void *)(unsigned long) *_pos; | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * move to the next line | ||
73 | */ | ||
74 | static void *fscache_histogram_next(struct seq_file *m, void *v, loff_t *pos) | ||
75 | { | ||
76 | (*pos)++; | ||
77 | return (unsigned long long)*pos > HZ + 2 ? | ||
78 | NULL : (void *)(unsigned long) *pos; | ||
79 | } | ||
80 | |||
81 | /* | ||
82 | * clean up after reading | ||
83 | */ | ||
84 | static void fscache_histogram_stop(struct seq_file *m, void *v) | ||
85 | { | ||
86 | } | ||
87 | |||
88 | static const struct seq_operations fscache_histogram_ops = { | ||
89 | .start = fscache_histogram_start, | ||
90 | .stop = fscache_histogram_stop, | ||
91 | .next = fscache_histogram_next, | ||
92 | .show = fscache_histogram_show, | ||
93 | }; | ||
94 | |||
95 | /* | ||
96 | * open "/proc/fs/fscache/histogram" to provide latency data | ||
97 | */ | ||
98 | static int fscache_histogram_open(struct inode *inode, struct file *file) | ||
99 | { | ||
100 | return seq_open(file, &fscache_histogram_ops); | ||
101 | } | ||
102 | |||
103 | const struct file_operations fscache_histogram_fops = { | ||
104 | .owner = THIS_MODULE, | ||
105 | .open = fscache_histogram_open, | ||
106 | .read = seq_read, | ||
107 | .llseek = seq_lseek, | ||
108 | .release = seq_release, | ||
109 | }; | ||
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h new file mode 100644 index 000000000000..e0cbd16f6dc9 --- /dev/null +++ b/fs/fscache/internal.h | |||
@@ -0,0 +1,380 @@ | |||
1 | /* Internal definitions for FS-Cache | ||
2 | * | ||
3 | * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | /* | ||
13 | * Lock order, in the order in which multiple locks should be obtained: | ||
14 | * - fscache_addremove_sem | ||
15 | * - cookie->lock | ||
16 | * - cookie->parent->lock | ||
17 | * - cache->object_list_lock | ||
18 | * - object->lock | ||
19 | * - object->parent->lock | ||
20 | * - fscache_thread_lock | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/fscache-cache.h> | ||
25 | #include <linux/sched.h> | ||
26 | |||
27 | #define FSCACHE_MIN_THREADS 4 | ||
28 | #define FSCACHE_MAX_THREADS 32 | ||
29 | |||
30 | /* | ||
31 | * fsc-cache.c | ||
32 | */ | ||
33 | extern struct list_head fscache_cache_list; | ||
34 | extern struct rw_semaphore fscache_addremove_sem; | ||
35 | |||
36 | extern struct fscache_cache *fscache_select_cache_for_object( | ||
37 | struct fscache_cookie *); | ||
38 | |||
39 | /* | ||
40 | * fsc-cookie.c | ||
41 | */ | ||
42 | extern struct kmem_cache *fscache_cookie_jar; | ||
43 | |||
44 | extern void fscache_cookie_init_once(void *); | ||
45 | extern void __fscache_cookie_put(struct fscache_cookie *); | ||
46 | |||
47 | /* | ||
48 | * fsc-fsdef.c | ||
49 | */ | ||
50 | extern struct fscache_cookie fscache_fsdef_index; | ||
51 | extern struct fscache_cookie_def fscache_fsdef_netfs_def; | ||
52 | |||
53 | /* | ||
54 | * fsc-histogram.c | ||
55 | */ | ||
56 | #ifdef CONFIG_FSCACHE_HISTOGRAM | ||
57 | extern atomic_t fscache_obj_instantiate_histogram[HZ]; | ||
58 | extern atomic_t fscache_objs_histogram[HZ]; | ||
59 | extern atomic_t fscache_ops_histogram[HZ]; | ||
60 | extern atomic_t fscache_retrieval_delay_histogram[HZ]; | ||
61 | extern atomic_t fscache_retrieval_histogram[HZ]; | ||
62 | |||
63 | static inline void fscache_hist(atomic_t histogram[], unsigned long start_jif) | ||
64 | { | ||
65 | unsigned long jif = jiffies - start_jif; | ||
66 | if (jif >= HZ) | ||
67 | jif = HZ - 1; | ||
68 | atomic_inc(&histogram[jif]); | ||
69 | } | ||
70 | |||
71 | extern const struct file_operations fscache_histogram_fops; | ||
72 | |||
73 | #else | ||
74 | #define fscache_hist(hist, start_jif) do {} while (0) | ||
75 | #endif | ||
76 | |||
77 | /* | ||
78 | * fsc-main.c | ||
79 | */ | ||
80 | extern unsigned fscache_defer_lookup; | ||
81 | extern unsigned fscache_defer_create; | ||
82 | extern unsigned fscache_debug; | ||
83 | extern struct kobject *fscache_root; | ||
84 | |||
85 | extern int fscache_wait_bit(void *); | ||
86 | extern int fscache_wait_bit_interruptible(void *); | ||
87 | |||
88 | /* | ||
89 | * fsc-object.c | ||
90 | */ | ||
91 | extern void fscache_withdrawing_object(struct fscache_cache *, | ||
92 | struct fscache_object *); | ||
93 | extern void fscache_enqueue_object(struct fscache_object *); | ||
94 | |||
95 | /* | ||
96 | * fsc-operation.c | ||
97 | */ | ||
98 | extern int fscache_submit_exclusive_op(struct fscache_object *, | ||
99 | struct fscache_operation *); | ||
100 | extern int fscache_submit_op(struct fscache_object *, | ||
101 | struct fscache_operation *); | ||
102 | extern void fscache_abort_object(struct fscache_object *); | ||
103 | extern void fscache_start_operations(struct fscache_object *); | ||
104 | extern void fscache_operation_gc(struct work_struct *); | ||
105 | |||
106 | /* | ||
107 | * fsc-proc.c | ||
108 | */ | ||
109 | #ifdef CONFIG_PROC_FS | ||
110 | extern int __init fscache_proc_init(void); | ||
111 | extern void fscache_proc_cleanup(void); | ||
112 | #else | ||
113 | #define fscache_proc_init() (0) | ||
114 | #define fscache_proc_cleanup() do {} while (0) | ||
115 | #endif | ||
116 | |||
117 | /* | ||
118 | * fsc-stats.c | ||
119 | */ | ||
120 | #ifdef CONFIG_FSCACHE_STATS | ||
121 | extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS]; | ||
122 | extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS]; | ||
123 | |||
124 | extern atomic_t fscache_n_op_pend; | ||
125 | extern atomic_t fscache_n_op_run; | ||
126 | extern atomic_t fscache_n_op_enqueue; | ||
127 | extern atomic_t fscache_n_op_deferred_release; | ||
128 | extern atomic_t fscache_n_op_release; | ||
129 | extern atomic_t fscache_n_op_gc; | ||
130 | |||
131 | extern atomic_t fscache_n_attr_changed; | ||
132 | extern atomic_t fscache_n_attr_changed_ok; | ||
133 | extern atomic_t fscache_n_attr_changed_nobufs; | ||
134 | extern atomic_t fscache_n_attr_changed_nomem; | ||
135 | extern atomic_t fscache_n_attr_changed_calls; | ||
136 | |||
137 | extern atomic_t fscache_n_allocs; | ||
138 | extern atomic_t fscache_n_allocs_ok; | ||
139 | extern atomic_t fscache_n_allocs_wait; | ||
140 | extern atomic_t fscache_n_allocs_nobufs; | ||
141 | extern atomic_t fscache_n_alloc_ops; | ||
142 | extern atomic_t fscache_n_alloc_op_waits; | ||
143 | |||
144 | extern atomic_t fscache_n_retrievals; | ||
145 | extern atomic_t fscache_n_retrievals_ok; | ||
146 | extern atomic_t fscache_n_retrievals_wait; | ||
147 | extern atomic_t fscache_n_retrievals_nodata; | ||
148 | extern atomic_t fscache_n_retrievals_nobufs; | ||
149 | extern atomic_t fscache_n_retrievals_intr; | ||
150 | extern atomic_t fscache_n_retrievals_nomem; | ||
151 | extern atomic_t fscache_n_retrieval_ops; | ||
152 | extern atomic_t fscache_n_retrieval_op_waits; | ||
153 | |||
154 | extern atomic_t fscache_n_stores; | ||
155 | extern atomic_t fscache_n_stores_ok; | ||
156 | extern atomic_t fscache_n_stores_again; | ||
157 | extern atomic_t fscache_n_stores_nobufs; | ||
158 | extern atomic_t fscache_n_stores_oom; | ||
159 | extern atomic_t fscache_n_store_ops; | ||
160 | extern atomic_t fscache_n_store_calls; | ||
161 | |||
162 | extern atomic_t fscache_n_marks; | ||
163 | extern atomic_t fscache_n_uncaches; | ||
164 | |||
165 | extern atomic_t fscache_n_acquires; | ||
166 | extern atomic_t fscache_n_acquires_null; | ||
167 | extern atomic_t fscache_n_acquires_no_cache; | ||
168 | extern atomic_t fscache_n_acquires_ok; | ||
169 | extern atomic_t fscache_n_acquires_nobufs; | ||
170 | extern atomic_t fscache_n_acquires_oom; | ||
171 | |||
172 | extern atomic_t fscache_n_updates; | ||
173 | extern atomic_t fscache_n_updates_null; | ||
174 | extern atomic_t fscache_n_updates_run; | ||
175 | |||
176 | extern atomic_t fscache_n_relinquishes; | ||
177 | extern atomic_t fscache_n_relinquishes_null; | ||
178 | extern atomic_t fscache_n_relinquishes_waitcrt; | ||
179 | |||
180 | extern atomic_t fscache_n_cookie_index; | ||
181 | extern atomic_t fscache_n_cookie_data; | ||
182 | extern atomic_t fscache_n_cookie_special; | ||
183 | |||
184 | extern atomic_t fscache_n_object_alloc; | ||
185 | extern atomic_t fscache_n_object_no_alloc; | ||
186 | extern atomic_t fscache_n_object_lookups; | ||
187 | extern atomic_t fscache_n_object_lookups_negative; | ||
188 | extern atomic_t fscache_n_object_lookups_positive; | ||
189 | extern atomic_t fscache_n_object_created; | ||
190 | extern atomic_t fscache_n_object_avail; | ||
191 | extern atomic_t fscache_n_object_dead; | ||
192 | |||
193 | extern atomic_t fscache_n_checkaux_none; | ||
194 | extern atomic_t fscache_n_checkaux_okay; | ||
195 | extern atomic_t fscache_n_checkaux_update; | ||
196 | extern atomic_t fscache_n_checkaux_obsolete; | ||
197 | |||
198 | static inline void fscache_stat(atomic_t *stat) | ||
199 | { | ||
200 | atomic_inc(stat); | ||
201 | } | ||
202 | |||
203 | extern const struct file_operations fscache_stats_fops; | ||
204 | #else | ||
205 | |||
206 | #define fscache_stat(stat) do {} while (0) | ||
207 | #endif | ||
208 | |||
209 | /* | ||
210 | * raise an event on an object | ||
211 | * - if the event is not masked for that object, then the object is | ||
212 | * queued for attention by the thread pool. | ||
213 | */ | ||
214 | static inline void fscache_raise_event(struct fscache_object *object, | ||
215 | unsigned event) | ||
216 | { | ||
217 | if (!test_and_set_bit(event, &object->events) && | ||
218 | test_bit(event, &object->event_mask)) | ||
219 | fscache_enqueue_object(object); | ||
220 | } | ||
221 | |||
222 | /* | ||
223 | * drop a reference to a cookie | ||
224 | */ | ||
225 | static inline void fscache_cookie_put(struct fscache_cookie *cookie) | ||
226 | { | ||
227 | BUG_ON(atomic_read(&cookie->usage) <= 0); | ||
228 | if (atomic_dec_and_test(&cookie->usage)) | ||
229 | __fscache_cookie_put(cookie); | ||
230 | } | ||
231 | |||
232 | /* | ||
233 | * get an extra reference to a netfs retrieval context | ||
234 | */ | ||
235 | static inline | ||
236 | void *fscache_get_context(struct fscache_cookie *cookie, void *context) | ||
237 | { | ||
238 | if (cookie->def->get_context) | ||
239 | cookie->def->get_context(cookie->netfs_data, context); | ||
240 | return context; | ||
241 | } | ||
242 | |||
243 | /* | ||
244 | * release a reference to a netfs retrieval context | ||
245 | */ | ||
246 | static inline | ||
247 | void fscache_put_context(struct fscache_cookie *cookie, void *context) | ||
248 | { | ||
249 | if (cookie->def->put_context) | ||
250 | cookie->def->put_context(cookie->netfs_data, context); | ||
251 | } | ||
252 | |||
253 | /*****************************************************************************/ | ||
254 | /* | ||
255 | * debug tracing | ||
256 | */ | ||
257 | #define dbgprintk(FMT, ...) \ | ||
258 | printk(KERN_DEBUG "[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__) | ||
259 | |||
260 | /* make sure we maintain the format strings, even when debugging is disabled */ | ||
261 | static inline __attribute__((format(printf, 1, 2))) | ||
262 | void _dbprintk(const char *fmt, ...) | ||
263 | { | ||
264 | } | ||
265 | |||
266 | #define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__) | ||
267 | #define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__) | ||
268 | #define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__) | ||
269 | |||
270 | #define kjournal(FMT, ...) _dbprintk(FMT, ##__VA_ARGS__) | ||
271 | |||
272 | #ifdef __KDEBUG | ||
273 | #define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__) | ||
274 | #define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__) | ||
275 | #define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__) | ||
276 | |||
277 | #elif defined(CONFIG_FSCACHE_DEBUG) | ||
278 | #define _enter(FMT, ...) \ | ||
279 | do { \ | ||
280 | if (__do_kdebug(ENTER)) \ | ||
281 | kenter(FMT, ##__VA_ARGS__); \ | ||
282 | } while (0) | ||
283 | |||
284 | #define _leave(FMT, ...) \ | ||
285 | do { \ | ||
286 | if (__do_kdebug(LEAVE)) \ | ||
287 | kleave(FMT, ##__VA_ARGS__); \ | ||
288 | } while (0) | ||
289 | |||
290 | #define _debug(FMT, ...) \ | ||
291 | do { \ | ||
292 | if (__do_kdebug(DEBUG)) \ | ||
293 | kdebug(FMT, ##__VA_ARGS__); \ | ||
294 | } while (0) | ||
295 | |||
296 | #else | ||
297 | #define _enter(FMT, ...) _dbprintk("==> %s("FMT")", __func__, ##__VA_ARGS__) | ||
298 | #define _leave(FMT, ...) _dbprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__) | ||
299 | #define _debug(FMT, ...) _dbprintk(FMT, ##__VA_ARGS__) | ||
300 | #endif | ||
301 | |||
302 | /* | ||
303 | * determine whether a particular optional debugging point should be logged | ||
304 | * - we need to go through three steps to persuade cpp to correctly join the | ||
305 | * shorthand in FSCACHE_DEBUG_LEVEL with its prefix | ||
306 | */ | ||
307 | #define ____do_kdebug(LEVEL, POINT) \ | ||
308 | unlikely((fscache_debug & \ | ||
309 | (FSCACHE_POINT_##POINT << (FSCACHE_DEBUG_ ## LEVEL * 3)))) | ||
310 | #define ___do_kdebug(LEVEL, POINT) \ | ||
311 | ____do_kdebug(LEVEL, POINT) | ||
312 | #define __do_kdebug(POINT) \ | ||
313 | ___do_kdebug(FSCACHE_DEBUG_LEVEL, POINT) | ||
314 | |||
315 | #define FSCACHE_DEBUG_CACHE 0 | ||
316 | #define FSCACHE_DEBUG_COOKIE 1 | ||
317 | #define FSCACHE_DEBUG_PAGE 2 | ||
318 | #define FSCACHE_DEBUG_OPERATION 3 | ||
319 | |||
320 | #define FSCACHE_POINT_ENTER 1 | ||
321 | #define FSCACHE_POINT_LEAVE 2 | ||
322 | #define FSCACHE_POINT_DEBUG 4 | ||
323 | |||
324 | #ifndef FSCACHE_DEBUG_LEVEL | ||
325 | #define FSCACHE_DEBUG_LEVEL CACHE | ||
326 | #endif | ||
327 | |||
328 | /* | ||
329 | * assertions | ||
330 | */ | ||
331 | #if 1 /* defined(__KDEBUGALL) */ | ||
332 | |||
333 | #define ASSERT(X) \ | ||
334 | do { \ | ||
335 | if (unlikely(!(X))) { \ | ||
336 | printk(KERN_ERR "\n"); \ | ||
337 | printk(KERN_ERR "FS-Cache: Assertion failed\n"); \ | ||
338 | BUG(); \ | ||
339 | } \ | ||
340 | } while (0) | ||
341 | |||
342 | #define ASSERTCMP(X, OP, Y) \ | ||
343 | do { \ | ||
344 | if (unlikely(!((X) OP (Y)))) { \ | ||
345 | printk(KERN_ERR "\n"); \ | ||
346 | printk(KERN_ERR "FS-Cache: Assertion failed\n"); \ | ||
347 | printk(KERN_ERR "%lx " #OP " %lx is false\n", \ | ||
348 | (unsigned long)(X), (unsigned long)(Y)); \ | ||
349 | BUG(); \ | ||
350 | } \ | ||
351 | } while (0) | ||
352 | |||
353 | #define ASSERTIF(C, X) \ | ||
354 | do { \ | ||
355 | if (unlikely((C) && !(X))) { \ | ||
356 | printk(KERN_ERR "\n"); \ | ||
357 | printk(KERN_ERR "FS-Cache: Assertion failed\n"); \ | ||
358 | BUG(); \ | ||
359 | } \ | ||
360 | } while (0) | ||
361 | |||
362 | #define ASSERTIFCMP(C, X, OP, Y) \ | ||
363 | do { \ | ||
364 | if (unlikely((C) && !((X) OP (Y)))) { \ | ||
365 | printk(KERN_ERR "\n"); \ | ||
366 | printk(KERN_ERR "FS-Cache: Assertion failed\n"); \ | ||
367 | printk(KERN_ERR "%lx " #OP " %lx is false\n", \ | ||
368 | (unsigned long)(X), (unsigned long)(Y)); \ | ||
369 | BUG(); \ | ||
370 | } \ | ||
371 | } while (0) | ||
372 | |||
373 | #else | ||
374 | |||
375 | #define ASSERT(X) do {} while (0) | ||
376 | #define ASSERTCMP(X, OP, Y) do {} while (0) | ||
377 | #define ASSERTIF(C, X) do {} while (0) | ||
378 | #define ASSERTIFCMP(C, X, OP, Y) do {} while (0) | ||
379 | |||
380 | #endif /* assert or not */ | ||
diff --git a/fs/fscache/main.c b/fs/fscache/main.c new file mode 100644 index 000000000000..4de41b597499 --- /dev/null +++ b/fs/fscache/main.c | |||
@@ -0,0 +1,124 @@ | |||
1 | /* General filesystem local caching manager | ||
2 | * | ||
3 | * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #define FSCACHE_DEBUG_LEVEL CACHE | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/completion.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include "internal.h" | ||
19 | |||
20 | MODULE_DESCRIPTION("FS Cache Manager"); | ||
21 | MODULE_AUTHOR("Red Hat, Inc."); | ||
22 | MODULE_LICENSE("GPL"); | ||
23 | |||
24 | unsigned fscache_defer_lookup = 1; | ||
25 | module_param_named(defer_lookup, fscache_defer_lookup, uint, | ||
26 | S_IWUSR | S_IRUGO); | ||
27 | MODULE_PARM_DESC(fscache_defer_lookup, | ||
28 | "Defer cookie lookup to background thread"); | ||
29 | |||
30 | unsigned fscache_defer_create = 1; | ||
31 | module_param_named(defer_create, fscache_defer_create, uint, | ||
32 | S_IWUSR | S_IRUGO); | ||
33 | MODULE_PARM_DESC(fscache_defer_create, | ||
34 | "Defer cookie creation to background thread"); | ||
35 | |||
36 | unsigned fscache_debug; | ||
37 | module_param_named(debug, fscache_debug, uint, | ||
38 | S_IWUSR | S_IRUGO); | ||
39 | MODULE_PARM_DESC(fscache_debug, | ||
40 | "FS-Cache debugging mask"); | ||
41 | |||
42 | struct kobject *fscache_root; | ||
43 | |||
44 | /* | ||
45 | * initialise the fs caching module | ||
46 | */ | ||
47 | static int __init fscache_init(void) | ||
48 | { | ||
49 | int ret; | ||
50 | |||
51 | ret = slow_work_register_user(); | ||
52 | if (ret < 0) | ||
53 | goto error_slow_work; | ||
54 | |||
55 | ret = fscache_proc_init(); | ||
56 | if (ret < 0) | ||
57 | goto error_proc; | ||
58 | |||
59 | fscache_cookie_jar = kmem_cache_create("fscache_cookie_jar", | ||
60 | sizeof(struct fscache_cookie), | ||
61 | 0, | ||
62 | 0, | ||
63 | fscache_cookie_init_once); | ||
64 | if (!fscache_cookie_jar) { | ||
65 | printk(KERN_NOTICE | ||
66 | "FS-Cache: Failed to allocate a cookie jar\n"); | ||
67 | ret = -ENOMEM; | ||
68 | goto error_cookie_jar; | ||
69 | } | ||
70 | |||
71 | fscache_root = kobject_create_and_add("fscache", kernel_kobj); | ||
72 | if (!fscache_root) | ||
73 | goto error_kobj; | ||
74 | |||
75 | printk(KERN_NOTICE "FS-Cache: Loaded\n"); | ||
76 | return 0; | ||
77 | |||
78 | error_kobj: | ||
79 | kmem_cache_destroy(fscache_cookie_jar); | ||
80 | error_cookie_jar: | ||
81 | fscache_proc_cleanup(); | ||
82 | error_proc: | ||
83 | slow_work_unregister_user(); | ||
84 | error_slow_work: | ||
85 | return ret; | ||
86 | } | ||
87 | |||
88 | fs_initcall(fscache_init); | ||
89 | |||
90 | /* | ||
91 | * clean up on module removal | ||
92 | */ | ||
93 | static void __exit fscache_exit(void) | ||
94 | { | ||
95 | _enter(""); | ||
96 | |||
97 | kobject_put(fscache_root); | ||
98 | kmem_cache_destroy(fscache_cookie_jar); | ||
99 | fscache_proc_cleanup(); | ||
100 | slow_work_unregister_user(); | ||
101 | printk(KERN_NOTICE "FS-Cache: Unloaded\n"); | ||
102 | } | ||
103 | |||
104 | module_exit(fscache_exit); | ||
105 | |||
106 | /* | ||
107 | * wait_on_bit() sleep function for uninterruptible waiting | ||
108 | */ | ||
109 | int fscache_wait_bit(void *flags) | ||
110 | { | ||
111 | schedule(); | ||
112 | return 0; | ||
113 | } | ||
114 | EXPORT_SYMBOL(fscache_wait_bit); | ||
115 | |||
116 | /* | ||
117 | * wait_on_bit() sleep function for interruptible waiting | ||
118 | */ | ||
119 | int fscache_wait_bit_interruptible(void *flags) | ||
120 | { | ||
121 | schedule(); | ||
122 | return signal_pending(current); | ||
123 | } | ||
124 | EXPORT_SYMBOL(fscache_wait_bit_interruptible); | ||
diff --git a/fs/fscache/netfs.c b/fs/fscache/netfs.c new file mode 100644 index 000000000000..e028b8eb1c40 --- /dev/null +++ b/fs/fscache/netfs.c | |||
@@ -0,0 +1,103 @@ | |||
1 | /* FS-Cache netfs (client) registration | ||
2 | * | ||
3 | * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #define FSCACHE_DEBUG_LEVEL COOKIE | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include "internal.h" | ||
16 | |||
17 | static LIST_HEAD(fscache_netfs_list); | ||
18 | |||
19 | /* | ||
20 | * register a network filesystem for caching | ||
21 | */ | ||
22 | int __fscache_register_netfs(struct fscache_netfs *netfs) | ||
23 | { | ||
24 | struct fscache_netfs *ptr; | ||
25 | int ret; | ||
26 | |||
27 | _enter("{%s}", netfs->name); | ||
28 | |||
29 | INIT_LIST_HEAD(&netfs->link); | ||
30 | |||
31 | /* allocate a cookie for the primary index */ | ||
32 | netfs->primary_index = | ||
33 | kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL); | ||
34 | |||
35 | if (!netfs->primary_index) { | ||
36 | _leave(" = -ENOMEM"); | ||
37 | return -ENOMEM; | ||
38 | } | ||
39 | |||
40 | /* initialise the primary index cookie */ | ||
41 | atomic_set(&netfs->primary_index->usage, 1); | ||
42 | atomic_set(&netfs->primary_index->n_children, 0); | ||
43 | |||
44 | netfs->primary_index->def = &fscache_fsdef_netfs_def; | ||
45 | netfs->primary_index->parent = &fscache_fsdef_index; | ||
46 | netfs->primary_index->netfs_data = netfs; | ||
47 | |||
48 | atomic_inc(&netfs->primary_index->parent->usage); | ||
49 | atomic_inc(&netfs->primary_index->parent->n_children); | ||
50 | |||
51 | spin_lock_init(&netfs->primary_index->lock); | ||
52 | INIT_HLIST_HEAD(&netfs->primary_index->backing_objects); | ||
53 | |||
54 | /* check the netfs type is not already present */ | ||
55 | down_write(&fscache_addremove_sem); | ||
56 | |||
57 | ret = -EEXIST; | ||
58 | list_for_each_entry(ptr, &fscache_netfs_list, link) { | ||
59 | if (strcmp(ptr->name, netfs->name) == 0) | ||
60 | goto already_registered; | ||
61 | } | ||
62 | |||
63 | list_add(&netfs->link, &fscache_netfs_list); | ||
64 | ret = 0; | ||
65 | |||
66 | printk(KERN_NOTICE "FS-Cache: Netfs '%s' registered for caching\n", | ||
67 | netfs->name); | ||
68 | |||
69 | already_registered: | ||
70 | up_write(&fscache_addremove_sem); | ||
71 | |||
72 | if (ret < 0) { | ||
73 | netfs->primary_index->parent = NULL; | ||
74 | __fscache_cookie_put(netfs->primary_index); | ||
75 | netfs->primary_index = NULL; | ||
76 | } | ||
77 | |||
78 | _leave(" = %d", ret); | ||
79 | return ret; | ||
80 | } | ||
81 | EXPORT_SYMBOL(__fscache_register_netfs); | ||
82 | |||
83 | /* | ||
84 | * unregister a network filesystem from the cache | ||
85 | * - all cookies must have been released first | ||
86 | */ | ||
87 | void __fscache_unregister_netfs(struct fscache_netfs *netfs) | ||
88 | { | ||
89 | _enter("{%s.%u}", netfs->name, netfs->version); | ||
90 | |||
91 | down_write(&fscache_addremove_sem); | ||
92 | |||
93 | list_del(&netfs->link); | ||
94 | fscache_relinquish_cookie(netfs->primary_index, 0); | ||
95 | |||
96 | up_write(&fscache_addremove_sem); | ||
97 | |||
98 | printk(KERN_NOTICE "FS-Cache: Netfs '%s' unregistered from caching\n", | ||
99 | netfs->name); | ||
100 | |||
101 | _leave(""); | ||
102 | } | ||
103 | EXPORT_SYMBOL(__fscache_unregister_netfs); | ||
diff --git a/fs/fscache/object.c b/fs/fscache/object.c new file mode 100644 index 000000000000..392a41b1b79d --- /dev/null +++ b/fs/fscache/object.c | |||
@@ -0,0 +1,810 @@ | |||
1 | /* FS-Cache object state machine handler | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * See Documentation/filesystems/caching/object.txt for a description of the | ||
12 | * object state machine and the in-kernel representations. | ||
13 | */ | ||
14 | |||
15 | #define FSCACHE_DEBUG_LEVEL COOKIE | ||
16 | #include <linux/module.h> | ||
17 | #include "internal.h" | ||
18 | |||
19 | const char *fscache_object_states[] = { | ||
20 | [FSCACHE_OBJECT_INIT] = "OBJECT_INIT", | ||
21 | [FSCACHE_OBJECT_LOOKING_UP] = "OBJECT_LOOKING_UP", | ||
22 | [FSCACHE_OBJECT_CREATING] = "OBJECT_CREATING", | ||
23 | [FSCACHE_OBJECT_AVAILABLE] = "OBJECT_AVAILABLE", | ||
24 | [FSCACHE_OBJECT_ACTIVE] = "OBJECT_ACTIVE", | ||
25 | [FSCACHE_OBJECT_UPDATING] = "OBJECT_UPDATING", | ||
26 | [FSCACHE_OBJECT_DYING] = "OBJECT_DYING", | ||
27 | [FSCACHE_OBJECT_LC_DYING] = "OBJECT_LC_DYING", | ||
28 | [FSCACHE_OBJECT_ABORT_INIT] = "OBJECT_ABORT_INIT", | ||
29 | [FSCACHE_OBJECT_RELEASING] = "OBJECT_RELEASING", | ||
30 | [FSCACHE_OBJECT_RECYCLING] = "OBJECT_RECYCLING", | ||
31 | [FSCACHE_OBJECT_WITHDRAWING] = "OBJECT_WITHDRAWING", | ||
32 | [FSCACHE_OBJECT_DEAD] = "OBJECT_DEAD", | ||
33 | }; | ||
34 | EXPORT_SYMBOL(fscache_object_states); | ||
35 | |||
36 | static void fscache_object_slow_work_put_ref(struct slow_work *); | ||
37 | static int fscache_object_slow_work_get_ref(struct slow_work *); | ||
38 | static void fscache_object_slow_work_execute(struct slow_work *); | ||
39 | static void fscache_initialise_object(struct fscache_object *); | ||
40 | static void fscache_lookup_object(struct fscache_object *); | ||
41 | static void fscache_object_available(struct fscache_object *); | ||
42 | static void fscache_release_object(struct fscache_object *); | ||
43 | static void fscache_withdraw_object(struct fscache_object *); | ||
44 | static void fscache_enqueue_dependents(struct fscache_object *); | ||
45 | static void fscache_dequeue_object(struct fscache_object *); | ||
46 | |||
47 | const struct slow_work_ops fscache_object_slow_work_ops = { | ||
48 | .get_ref = fscache_object_slow_work_get_ref, | ||
49 | .put_ref = fscache_object_slow_work_put_ref, | ||
50 | .execute = fscache_object_slow_work_execute, | ||
51 | }; | ||
52 | EXPORT_SYMBOL(fscache_object_slow_work_ops); | ||
53 | |||
54 | /* | ||
55 | * we need to notify the parent when an op completes that we had outstanding | ||
56 | * upon it | ||
57 | */ | ||
58 | static inline void fscache_done_parent_op(struct fscache_object *object) | ||
59 | { | ||
60 | struct fscache_object *parent = object->parent; | ||
61 | |||
62 | _enter("OBJ%x {OBJ%x,%x}", | ||
63 | object->debug_id, parent->debug_id, parent->n_ops); | ||
64 | |||
65 | spin_lock_nested(&parent->lock, 1); | ||
66 | parent->n_ops--; | ||
67 | parent->n_obj_ops--; | ||
68 | if (parent->n_ops == 0) | ||
69 | fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED); | ||
70 | spin_unlock(&parent->lock); | ||
71 | } | ||
72 | |||
73 | /* | ||
74 | * process events that have been sent to an object's state machine | ||
75 | * - initiates parent lookup | ||
76 | * - does object lookup | ||
77 | * - does object creation | ||
78 | * - does object recycling and retirement | ||
79 | * - does object withdrawal | ||
80 | */ | ||
81 | static void fscache_object_state_machine(struct fscache_object *object) | ||
82 | { | ||
83 | enum fscache_object_state new_state; | ||
84 | |||
85 | ASSERT(object != NULL); | ||
86 | |||
87 | _enter("{OBJ%x,%s,%lx}", | ||
88 | object->debug_id, fscache_object_states[object->state], | ||
89 | object->events); | ||
90 | |||
91 | switch (object->state) { | ||
92 | /* wait for the parent object to become ready */ | ||
93 | case FSCACHE_OBJECT_INIT: | ||
94 | object->event_mask = | ||
95 | ULONG_MAX & ~(1 << FSCACHE_OBJECT_EV_CLEARED); | ||
96 | fscache_initialise_object(object); | ||
97 | goto done; | ||
98 | |||
99 | /* look up the object metadata on disk */ | ||
100 | case FSCACHE_OBJECT_LOOKING_UP: | ||
101 | fscache_lookup_object(object); | ||
102 | goto lookup_transit; | ||
103 | |||
104 | /* create the object metadata on disk */ | ||
105 | case FSCACHE_OBJECT_CREATING: | ||
106 | fscache_lookup_object(object); | ||
107 | goto lookup_transit; | ||
108 | |||
109 | /* handle an object becoming available; start pending | ||
110 | * operations and queue dependent operations for processing */ | ||
111 | case FSCACHE_OBJECT_AVAILABLE: | ||
112 | fscache_object_available(object); | ||
113 | goto active_transit; | ||
114 | |||
115 | /* normal running state */ | ||
116 | case FSCACHE_OBJECT_ACTIVE: | ||
117 | goto active_transit; | ||
118 | |||
119 | /* update the object metadata on disk */ | ||
120 | case FSCACHE_OBJECT_UPDATING: | ||
121 | clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events); | ||
122 | fscache_stat(&fscache_n_updates_run); | ||
123 | object->cache->ops->update_object(object); | ||
124 | goto active_transit; | ||
125 | |||
126 | /* handle an object dying during lookup or creation */ | ||
127 | case FSCACHE_OBJECT_LC_DYING: | ||
128 | object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE); | ||
129 | object->cache->ops->lookup_complete(object); | ||
130 | |||
131 | spin_lock(&object->lock); | ||
132 | object->state = FSCACHE_OBJECT_DYING; | ||
133 | if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, | ||
134 | &object->cookie->flags)) | ||
135 | wake_up_bit(&object->cookie->flags, | ||
136 | FSCACHE_COOKIE_CREATING); | ||
137 | spin_unlock(&object->lock); | ||
138 | |||
139 | fscache_done_parent_op(object); | ||
140 | |||
141 | /* wait for completion of all active operations on this object | ||
142 | * and the death of all child objects of this object */ | ||
143 | case FSCACHE_OBJECT_DYING: | ||
144 | dying: | ||
145 | clear_bit(FSCACHE_OBJECT_EV_CLEARED, &object->events); | ||
146 | spin_lock(&object->lock); | ||
147 | _debug("dying OBJ%x {%d,%d}", | ||
148 | object->debug_id, object->n_ops, object->n_children); | ||
149 | if (object->n_ops == 0 && object->n_children == 0) { | ||
150 | object->event_mask &= | ||
151 | ~(1 << FSCACHE_OBJECT_EV_CLEARED); | ||
152 | object->event_mask |= | ||
153 | (1 << FSCACHE_OBJECT_EV_WITHDRAW) | | ||
154 | (1 << FSCACHE_OBJECT_EV_RETIRE) | | ||
155 | (1 << FSCACHE_OBJECT_EV_RELEASE) | | ||
156 | (1 << FSCACHE_OBJECT_EV_ERROR); | ||
157 | } else { | ||
158 | object->event_mask &= | ||
159 | ~((1 << FSCACHE_OBJECT_EV_WITHDRAW) | | ||
160 | (1 << FSCACHE_OBJECT_EV_RETIRE) | | ||
161 | (1 << FSCACHE_OBJECT_EV_RELEASE) | | ||
162 | (1 << FSCACHE_OBJECT_EV_ERROR)); | ||
163 | object->event_mask |= | ||
164 | 1 << FSCACHE_OBJECT_EV_CLEARED; | ||
165 | } | ||
166 | spin_unlock(&object->lock); | ||
167 | fscache_enqueue_dependents(object); | ||
168 | goto terminal_transit; | ||
169 | |||
170 | /* handle an abort during initialisation */ | ||
171 | case FSCACHE_OBJECT_ABORT_INIT: | ||
172 | _debug("handle abort init %lx", object->events); | ||
173 | object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE); | ||
174 | |||
175 | spin_lock(&object->lock); | ||
176 | fscache_dequeue_object(object); | ||
177 | |||
178 | object->state = FSCACHE_OBJECT_DYING; | ||
179 | if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, | ||
180 | &object->cookie->flags)) | ||
181 | wake_up_bit(&object->cookie->flags, | ||
182 | FSCACHE_COOKIE_CREATING); | ||
183 | spin_unlock(&object->lock); | ||
184 | goto dying; | ||
185 | |||
186 | /* handle the netfs releasing an object and possibly marking it | ||
187 | * obsolete too */ | ||
188 | case FSCACHE_OBJECT_RELEASING: | ||
189 | case FSCACHE_OBJECT_RECYCLING: | ||
190 | object->event_mask &= | ||
191 | ~((1 << FSCACHE_OBJECT_EV_WITHDRAW) | | ||
192 | (1 << FSCACHE_OBJECT_EV_RETIRE) | | ||
193 | (1 << FSCACHE_OBJECT_EV_RELEASE) | | ||
194 | (1 << FSCACHE_OBJECT_EV_ERROR)); | ||
195 | fscache_release_object(object); | ||
196 | spin_lock(&object->lock); | ||
197 | object->state = FSCACHE_OBJECT_DEAD; | ||
198 | spin_unlock(&object->lock); | ||
199 | fscache_stat(&fscache_n_object_dead); | ||
200 | goto terminal_transit; | ||
201 | |||
202 | /* handle the parent cache of this object being withdrawn from | ||
203 | * active service */ | ||
204 | case FSCACHE_OBJECT_WITHDRAWING: | ||
205 | object->event_mask &= | ||
206 | ~((1 << FSCACHE_OBJECT_EV_WITHDRAW) | | ||
207 | (1 << FSCACHE_OBJECT_EV_RETIRE) | | ||
208 | (1 << FSCACHE_OBJECT_EV_RELEASE) | | ||
209 | (1 << FSCACHE_OBJECT_EV_ERROR)); | ||
210 | fscache_withdraw_object(object); | ||
211 | spin_lock(&object->lock); | ||
212 | object->state = FSCACHE_OBJECT_DEAD; | ||
213 | spin_unlock(&object->lock); | ||
214 | fscache_stat(&fscache_n_object_dead); | ||
215 | goto terminal_transit; | ||
216 | |||
217 | /* complain about the object being woken up once it is | ||
218 | * deceased */ | ||
219 | case FSCACHE_OBJECT_DEAD: | ||
220 | printk(KERN_ERR "FS-Cache:" | ||
221 | " Unexpected event in dead state %lx\n", | ||
222 | object->events & object->event_mask); | ||
223 | BUG(); | ||
224 | |||
225 | default: | ||
226 | printk(KERN_ERR "FS-Cache: Unknown object state %u\n", | ||
227 | object->state); | ||
228 | BUG(); | ||
229 | } | ||
230 | |||
231 | /* determine the transition from a lookup state */ | ||
232 | lookup_transit: | ||
233 | switch (fls(object->events & object->event_mask) - 1) { | ||
234 | case FSCACHE_OBJECT_EV_WITHDRAW: | ||
235 | case FSCACHE_OBJECT_EV_RETIRE: | ||
236 | case FSCACHE_OBJECT_EV_RELEASE: | ||
237 | case FSCACHE_OBJECT_EV_ERROR: | ||
238 | new_state = FSCACHE_OBJECT_LC_DYING; | ||
239 | goto change_state; | ||
240 | case FSCACHE_OBJECT_EV_REQUEUE: | ||
241 | goto done; | ||
242 | case -1: | ||
243 | goto done; /* sleep until event */ | ||
244 | default: | ||
245 | goto unsupported_event; | ||
246 | } | ||
247 | |||
248 | /* determine the transition from an active state */ | ||
249 | active_transit: | ||
250 | switch (fls(object->events & object->event_mask) - 1) { | ||
251 | case FSCACHE_OBJECT_EV_WITHDRAW: | ||
252 | case FSCACHE_OBJECT_EV_RETIRE: | ||
253 | case FSCACHE_OBJECT_EV_RELEASE: | ||
254 | case FSCACHE_OBJECT_EV_ERROR: | ||
255 | new_state = FSCACHE_OBJECT_DYING; | ||
256 | goto change_state; | ||
257 | case FSCACHE_OBJECT_EV_UPDATE: | ||
258 | new_state = FSCACHE_OBJECT_UPDATING; | ||
259 | goto change_state; | ||
260 | case -1: | ||
261 | new_state = FSCACHE_OBJECT_ACTIVE; | ||
262 | goto change_state; /* sleep until event */ | ||
263 | default: | ||
264 | goto unsupported_event; | ||
265 | } | ||
266 | |||
267 | /* determine the transition from a terminal state */ | ||
268 | terminal_transit: | ||
269 | switch (fls(object->events & object->event_mask) - 1) { | ||
270 | case FSCACHE_OBJECT_EV_WITHDRAW: | ||
271 | new_state = FSCACHE_OBJECT_WITHDRAWING; | ||
272 | goto change_state; | ||
273 | case FSCACHE_OBJECT_EV_RETIRE: | ||
274 | new_state = FSCACHE_OBJECT_RECYCLING; | ||
275 | goto change_state; | ||
276 | case FSCACHE_OBJECT_EV_RELEASE: | ||
277 | new_state = FSCACHE_OBJECT_RELEASING; | ||
278 | goto change_state; | ||
279 | case FSCACHE_OBJECT_EV_ERROR: | ||
280 | new_state = FSCACHE_OBJECT_WITHDRAWING; | ||
281 | goto change_state; | ||
282 | case FSCACHE_OBJECT_EV_CLEARED: | ||
283 | new_state = FSCACHE_OBJECT_DYING; | ||
284 | goto change_state; | ||
285 | case -1: | ||
286 | goto done; /* sleep until event */ | ||
287 | default: | ||
288 | goto unsupported_event; | ||
289 | } | ||
290 | |||
291 | change_state: | ||
292 | spin_lock(&object->lock); | ||
293 | object->state = new_state; | ||
294 | spin_unlock(&object->lock); | ||
295 | |||
296 | done: | ||
297 | _leave(" [->%s]", fscache_object_states[object->state]); | ||
298 | return; | ||
299 | |||
300 | unsupported_event: | ||
301 | printk(KERN_ERR "FS-Cache:" | ||
302 | " Unsupported event %lx [mask %lx] in state %s\n", | ||
303 | object->events, object->event_mask, | ||
304 | fscache_object_states[object->state]); | ||
305 | BUG(); | ||
306 | } | ||
307 | |||
308 | /* | ||
309 | * execute an object | ||
310 | */ | ||
311 | static void fscache_object_slow_work_execute(struct slow_work *work) | ||
312 | { | ||
313 | struct fscache_object *object = | ||
314 | container_of(work, struct fscache_object, work); | ||
315 | unsigned long start; | ||
316 | |||
317 | _enter("{OBJ%x}", object->debug_id); | ||
318 | |||
319 | clear_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); | ||
320 | |||
321 | start = jiffies; | ||
322 | fscache_object_state_machine(object); | ||
323 | fscache_hist(fscache_objs_histogram, start); | ||
324 | if (object->events & object->event_mask) | ||
325 | fscache_enqueue_object(object); | ||
326 | } | ||
327 | |||
328 | /* | ||
329 | * initialise an object | ||
330 | * - check the specified object's parent to see if we can make use of it | ||
331 | * immediately to do a creation | ||
332 | * - we may need to start the process of creating a parent and we need to wait | ||
333 | * for the parent's lookup and creation to complete if it's not there yet | ||
334 | * - an object's cookie is pinned until we clear FSCACHE_COOKIE_CREATING on the | ||
335 | * leaf-most cookies of the object and all its children | ||
336 | */ | ||
337 | static void fscache_initialise_object(struct fscache_object *object) | ||
338 | { | ||
339 | struct fscache_object *parent; | ||
340 | |||
341 | _enter(""); | ||
342 | ASSERT(object->cookie != NULL); | ||
343 | ASSERT(object->cookie->parent != NULL); | ||
344 | ASSERT(list_empty(&object->work.link)); | ||
345 | |||
346 | if (object->events & ((1 << FSCACHE_OBJECT_EV_ERROR) | | ||
347 | (1 << FSCACHE_OBJECT_EV_RELEASE) | | ||
348 | (1 << FSCACHE_OBJECT_EV_RETIRE) | | ||
349 | (1 << FSCACHE_OBJECT_EV_WITHDRAW))) { | ||
350 | _debug("abort init %lx", object->events); | ||
351 | spin_lock(&object->lock); | ||
352 | object->state = FSCACHE_OBJECT_ABORT_INIT; | ||
353 | spin_unlock(&object->lock); | ||
354 | return; | ||
355 | } | ||
356 | |||
357 | spin_lock(&object->cookie->lock); | ||
358 | spin_lock_nested(&object->cookie->parent->lock, 1); | ||
359 | |||
360 | parent = object->parent; | ||
361 | if (!parent) { | ||
362 | _debug("no parent"); | ||
363 | set_bit(FSCACHE_OBJECT_EV_WITHDRAW, &object->events); | ||
364 | } else { | ||
365 | spin_lock(&object->lock); | ||
366 | spin_lock_nested(&parent->lock, 1); | ||
367 | _debug("parent %s", fscache_object_states[parent->state]); | ||
368 | |||
369 | if (parent->state >= FSCACHE_OBJECT_DYING) { | ||
370 | _debug("bad parent"); | ||
371 | set_bit(FSCACHE_OBJECT_EV_WITHDRAW, &object->events); | ||
372 | } else if (parent->state < FSCACHE_OBJECT_AVAILABLE) { | ||
373 | _debug("wait"); | ||
374 | |||
375 | /* we may get woken up in this state by child objects | ||
376 | * binding on to us, so we need to make sure we don't | ||
377 | * add ourself to the list multiple times */ | ||
378 | if (list_empty(&object->dep_link)) { | ||
379 | object->cache->ops->grab_object(object); | ||
380 | list_add(&object->dep_link, | ||
381 | &parent->dependents); | ||
382 | |||
383 | /* fscache_acquire_non_index_cookie() uses this | ||
384 | * to wake the chain up */ | ||
385 | if (parent->state == FSCACHE_OBJECT_INIT) | ||
386 | fscache_enqueue_object(parent); | ||
387 | } | ||
388 | } else { | ||
389 | _debug("go"); | ||
390 | parent->n_ops++; | ||
391 | parent->n_obj_ops++; | ||
392 | object->lookup_jif = jiffies; | ||
393 | object->state = FSCACHE_OBJECT_LOOKING_UP; | ||
394 | set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); | ||
395 | } | ||
396 | |||
397 | spin_unlock(&parent->lock); | ||
398 | spin_unlock(&object->lock); | ||
399 | } | ||
400 | |||
401 | spin_unlock(&object->cookie->parent->lock); | ||
402 | spin_unlock(&object->cookie->lock); | ||
403 | _leave(""); | ||
404 | } | ||
405 | |||
406 | /* | ||
407 | * look an object up in the cache from which it was allocated | ||
408 | * - we hold an "access lock" on the parent object, so the parent object cannot | ||
409 | * be withdrawn by either party till we've finished | ||
410 | * - an object's cookie is pinned until we clear FSCACHE_COOKIE_CREATING on the | ||
411 | * leaf-most cookies of the object and all its children | ||
412 | */ | ||
413 | static void fscache_lookup_object(struct fscache_object *object) | ||
414 | { | ||
415 | struct fscache_cookie *cookie = object->cookie; | ||
416 | struct fscache_object *parent; | ||
417 | |||
418 | _enter(""); | ||
419 | |||
420 | parent = object->parent; | ||
421 | ASSERT(parent != NULL); | ||
422 | ASSERTCMP(parent->n_ops, >, 0); | ||
423 | ASSERTCMP(parent->n_obj_ops, >, 0); | ||
424 | |||
425 | /* make sure the parent is still available */ | ||
426 | ASSERTCMP(parent->state, >=, FSCACHE_OBJECT_AVAILABLE); | ||
427 | |||
428 | if (parent->state >= FSCACHE_OBJECT_DYING || | ||
429 | test_bit(FSCACHE_IOERROR, &object->cache->flags)) { | ||
430 | _debug("unavailable"); | ||
431 | set_bit(FSCACHE_OBJECT_EV_WITHDRAW, &object->events); | ||
432 | _leave(""); | ||
433 | return; | ||
434 | } | ||
435 | |||
436 | _debug("LOOKUP \"%s/%s\" in \"%s\"", | ||
437 | parent->cookie->def->name, cookie->def->name, | ||
438 | object->cache->tag->name); | ||
439 | |||
440 | fscache_stat(&fscache_n_object_lookups); | ||
441 | object->cache->ops->lookup_object(object); | ||
442 | |||
443 | if (test_bit(FSCACHE_OBJECT_EV_ERROR, &object->events)) | ||
444 | set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags); | ||
445 | |||
446 | _leave(""); | ||
447 | } | ||
448 | |||
449 | /** | ||
450 | * fscache_object_lookup_negative - Note negative cookie lookup | ||
451 | * @object: Object pointing to cookie to mark | ||
452 | * | ||
453 | * Note negative lookup, permitting those waiting to read data from an already | ||
454 | * existing backing object to continue as there's no data for them to read. | ||
455 | */ | ||
456 | void fscache_object_lookup_negative(struct fscache_object *object) | ||
457 | { | ||
458 | struct fscache_cookie *cookie = object->cookie; | ||
459 | |||
460 | _enter("{OBJ%x,%s}", | ||
461 | object->debug_id, fscache_object_states[object->state]); | ||
462 | |||
463 | spin_lock(&object->lock); | ||
464 | if (object->state == FSCACHE_OBJECT_LOOKING_UP) { | ||
465 | fscache_stat(&fscache_n_object_lookups_negative); | ||
466 | |||
467 | /* transit here to allow write requests to begin stacking up | ||
468 | * and read requests to begin returning ENODATA */ | ||
469 | object->state = FSCACHE_OBJECT_CREATING; | ||
470 | spin_unlock(&object->lock); | ||
471 | |||
472 | set_bit(FSCACHE_COOKIE_PENDING_FILL, &cookie->flags); | ||
473 | set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); | ||
474 | |||
475 | _debug("wake up lookup %p", &cookie->flags); | ||
476 | smp_mb__before_clear_bit(); | ||
477 | clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags); | ||
478 | smp_mb__after_clear_bit(); | ||
479 | wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP); | ||
480 | set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); | ||
481 | } else { | ||
482 | ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING); | ||
483 | spin_unlock(&object->lock); | ||
484 | } | ||
485 | |||
486 | _leave(""); | ||
487 | } | ||
488 | EXPORT_SYMBOL(fscache_object_lookup_negative); | ||
489 | |||
490 | /** | ||
491 | * fscache_obtained_object - Note successful object lookup or creation | ||
492 | * @object: Object pointing to cookie to mark | ||
493 | * | ||
494 | * Note successful lookup and/or creation, permitting those waiting to write | ||
495 | * data to a backing object to continue. | ||
496 | * | ||
497 | * Note that after calling this, an object's cookie may be relinquished by the | ||
498 | * netfs, and so must be accessed with object lock held. | ||
499 | */ | ||
500 | void fscache_obtained_object(struct fscache_object *object) | ||
501 | { | ||
502 | struct fscache_cookie *cookie = object->cookie; | ||
503 | |||
504 | _enter("{OBJ%x,%s}", | ||
505 | object->debug_id, fscache_object_states[object->state]); | ||
506 | |||
507 | /* if we were still looking up, then we must have a positive lookup | ||
508 | * result, in which case there may be data available */ | ||
509 | spin_lock(&object->lock); | ||
510 | if (object->state == FSCACHE_OBJECT_LOOKING_UP) { | ||
511 | fscache_stat(&fscache_n_object_lookups_positive); | ||
512 | |||
513 | clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); | ||
514 | |||
515 | object->state = FSCACHE_OBJECT_AVAILABLE; | ||
516 | spin_unlock(&object->lock); | ||
517 | |||
518 | smp_mb__before_clear_bit(); | ||
519 | clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags); | ||
520 | smp_mb__after_clear_bit(); | ||
521 | wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP); | ||
522 | set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); | ||
523 | } else { | ||
524 | ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING); | ||
525 | fscache_stat(&fscache_n_object_created); | ||
526 | |||
527 | object->state = FSCACHE_OBJECT_AVAILABLE; | ||
528 | spin_unlock(&object->lock); | ||
529 | set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); | ||
530 | smp_wmb(); | ||
531 | } | ||
532 | |||
533 | if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) | ||
534 | wake_up_bit(&cookie->flags, FSCACHE_COOKIE_CREATING); | ||
535 | |||
536 | _leave(""); | ||
537 | } | ||
538 | EXPORT_SYMBOL(fscache_obtained_object); | ||
539 | |||
540 | /* | ||
541 | * handle an object that has just become available | ||
542 | */ | ||
543 | static void fscache_object_available(struct fscache_object *object) | ||
544 | { | ||
545 | _enter("{OBJ%x}", object->debug_id); | ||
546 | |||
547 | spin_lock(&object->lock); | ||
548 | |||
549 | if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, &object->cookie->flags)) | ||
550 | wake_up_bit(&object->cookie->flags, FSCACHE_COOKIE_CREATING); | ||
551 | |||
552 | fscache_done_parent_op(object); | ||
553 | if (object->n_in_progress == 0) { | ||
554 | if (object->n_ops > 0) { | ||
555 | ASSERTCMP(object->n_ops, >=, object->n_obj_ops); | ||
556 | ASSERTIF(object->n_ops > object->n_obj_ops, | ||
557 | !list_empty(&object->pending_ops)); | ||
558 | fscache_start_operations(object); | ||
559 | } else { | ||
560 | ASSERT(list_empty(&object->pending_ops)); | ||
561 | } | ||
562 | } | ||
563 | spin_unlock(&object->lock); | ||
564 | |||
565 | object->cache->ops->lookup_complete(object); | ||
566 | fscache_enqueue_dependents(object); | ||
567 | |||
568 | fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif); | ||
569 | fscache_stat(&fscache_n_object_avail); | ||
570 | |||
571 | _leave(""); | ||
572 | } | ||
573 | |||
574 | /* | ||
575 | * drop an object's attachments | ||
576 | */ | ||
577 | static void fscache_drop_object(struct fscache_object *object) | ||
578 | { | ||
579 | struct fscache_object *parent = object->parent; | ||
580 | struct fscache_cache *cache = object->cache; | ||
581 | |||
582 | _enter("{OBJ%x,%d}", object->debug_id, object->n_children); | ||
583 | |||
584 | spin_lock(&cache->object_list_lock); | ||
585 | list_del_init(&object->cache_link); | ||
586 | spin_unlock(&cache->object_list_lock); | ||
587 | |||
588 | cache->ops->drop_object(object); | ||
589 | |||
590 | if (parent) { | ||
591 | _debug("release parent OBJ%x {%d}", | ||
592 | parent->debug_id, parent->n_children); | ||
593 | |||
594 | spin_lock(&parent->lock); | ||
595 | parent->n_children--; | ||
596 | if (parent->n_children == 0) | ||
597 | fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED); | ||
598 | spin_unlock(&parent->lock); | ||
599 | object->parent = NULL; | ||
600 | } | ||
601 | |||
602 | /* this just shifts the object release to the slow work processor */ | ||
603 | object->cache->ops->put_object(object); | ||
604 | |||
605 | _leave(""); | ||
606 | } | ||
607 | |||
608 | /* | ||
609 | * release or recycle an object that the netfs has discarded | ||
610 | */ | ||
611 | static void fscache_release_object(struct fscache_object *object) | ||
612 | { | ||
613 | _enter(""); | ||
614 | |||
615 | fscache_drop_object(object); | ||
616 | } | ||
617 | |||
618 | /* | ||
619 | * withdraw an object from active service | ||
620 | */ | ||
621 | static void fscache_withdraw_object(struct fscache_object *object) | ||
622 | { | ||
623 | struct fscache_cookie *cookie; | ||
624 | bool detached; | ||
625 | |||
626 | _enter(""); | ||
627 | |||
628 | spin_lock(&object->lock); | ||
629 | cookie = object->cookie; | ||
630 | if (cookie) { | ||
631 | /* need to get the cookie lock before the object lock, starting | ||
632 | * from the object pointer */ | ||
633 | atomic_inc(&cookie->usage); | ||
634 | spin_unlock(&object->lock); | ||
635 | |||
636 | detached = false; | ||
637 | spin_lock(&cookie->lock); | ||
638 | spin_lock(&object->lock); | ||
639 | |||
640 | if (object->cookie == cookie) { | ||
641 | hlist_del_init(&object->cookie_link); | ||
642 | object->cookie = NULL; | ||
643 | detached = true; | ||
644 | } | ||
645 | spin_unlock(&cookie->lock); | ||
646 | fscache_cookie_put(cookie); | ||
647 | if (detached) | ||
648 | fscache_cookie_put(cookie); | ||
649 | } | ||
650 | |||
651 | spin_unlock(&object->lock); | ||
652 | |||
653 | fscache_drop_object(object); | ||
654 | } | ||
655 | |||
656 | /* | ||
657 | * withdraw an object from active service at the behest of the cache | ||
658 | * - need break the links to a cached object cookie | ||
659 | * - called under two situations: | ||
660 | * (1) recycler decides to reclaim an in-use object | ||
661 | * (2) a cache is unmounted | ||
662 | * - have to take care as the cookie can be being relinquished by the netfs | ||
663 | * simultaneously | ||
664 | * - the object is pinned by the caller holding a refcount on it | ||
665 | */ | ||
666 | void fscache_withdrawing_object(struct fscache_cache *cache, | ||
667 | struct fscache_object *object) | ||
668 | { | ||
669 | bool enqueue = false; | ||
670 | |||
671 | _enter(",OBJ%x", object->debug_id); | ||
672 | |||
673 | spin_lock(&object->lock); | ||
674 | if (object->state < FSCACHE_OBJECT_WITHDRAWING) { | ||
675 | object->state = FSCACHE_OBJECT_WITHDRAWING; | ||
676 | enqueue = true; | ||
677 | } | ||
678 | spin_unlock(&object->lock); | ||
679 | |||
680 | if (enqueue) | ||
681 | fscache_enqueue_object(object); | ||
682 | |||
683 | _leave(""); | ||
684 | } | ||
685 | |||
686 | /* | ||
687 | * allow the slow work item processor to get a ref on an object | ||
688 | */ | ||
689 | static int fscache_object_slow_work_get_ref(struct slow_work *work) | ||
690 | { | ||
691 | struct fscache_object *object = | ||
692 | container_of(work, struct fscache_object, work); | ||
693 | |||
694 | return object->cache->ops->grab_object(object) ? 0 : -EAGAIN; | ||
695 | } | ||
696 | |||
697 | /* | ||
698 | * allow the slow work item processor to discard a ref on a work item | ||
699 | */ | ||
700 | static void fscache_object_slow_work_put_ref(struct slow_work *work) | ||
701 | { | ||
702 | struct fscache_object *object = | ||
703 | container_of(work, struct fscache_object, work); | ||
704 | |||
705 | return object->cache->ops->put_object(object); | ||
706 | } | ||
707 | |||
708 | /* | ||
709 | * enqueue an object for metadata-type processing | ||
710 | */ | ||
711 | void fscache_enqueue_object(struct fscache_object *object) | ||
712 | { | ||
713 | _enter("{OBJ%x}", object->debug_id); | ||
714 | |||
715 | slow_work_enqueue(&object->work); | ||
716 | } | ||
717 | |||
718 | /* | ||
719 | * enqueue the dependents of an object for metadata-type processing | ||
720 | * - the caller must hold the object's lock | ||
721 | * - this may cause an already locked object to wind up being processed again | ||
722 | */ | ||
723 | static void fscache_enqueue_dependents(struct fscache_object *object) | ||
724 | { | ||
725 | struct fscache_object *dep; | ||
726 | |||
727 | _enter("{OBJ%x}", object->debug_id); | ||
728 | |||
729 | if (list_empty(&object->dependents)) | ||
730 | return; | ||
731 | |||
732 | spin_lock(&object->lock); | ||
733 | |||
734 | while (!list_empty(&object->dependents)) { | ||
735 | dep = list_entry(object->dependents.next, | ||
736 | struct fscache_object, dep_link); | ||
737 | list_del_init(&dep->dep_link); | ||
738 | |||
739 | |||
740 | /* sort onto appropriate lists */ | ||
741 | fscache_enqueue_object(dep); | ||
742 | dep->cache->ops->put_object(dep); | ||
743 | |||
744 | if (!list_empty(&object->dependents)) | ||
745 | cond_resched_lock(&object->lock); | ||
746 | } | ||
747 | |||
748 | spin_unlock(&object->lock); | ||
749 | } | ||
750 | |||
751 | /* | ||
752 | * remove an object from whatever queue it's waiting on | ||
753 | * - the caller must hold object->lock | ||
754 | */ | ||
755 | void fscache_dequeue_object(struct fscache_object *object) | ||
756 | { | ||
757 | _enter("{OBJ%x}", object->debug_id); | ||
758 | |||
759 | if (!list_empty(&object->dep_link)) { | ||
760 | spin_lock(&object->parent->lock); | ||
761 | list_del_init(&object->dep_link); | ||
762 | spin_unlock(&object->parent->lock); | ||
763 | } | ||
764 | |||
765 | _leave(""); | ||
766 | } | ||
767 | |||
768 | /** | ||
769 | * fscache_check_aux - Ask the netfs whether an object on disk is still valid | ||
770 | * @object: The object to ask about | ||
771 | * @data: The auxiliary data for the object | ||
772 | * @datalen: The size of the auxiliary data | ||
773 | * | ||
774 | * This function consults the netfs about the coherency state of an object | ||
775 | */ | ||
776 | enum fscache_checkaux fscache_check_aux(struct fscache_object *object, | ||
777 | const void *data, uint16_t datalen) | ||
778 | { | ||
779 | enum fscache_checkaux result; | ||
780 | |||
781 | if (!object->cookie->def->check_aux) { | ||
782 | fscache_stat(&fscache_n_checkaux_none); | ||
783 | return FSCACHE_CHECKAUX_OKAY; | ||
784 | } | ||
785 | |||
786 | result = object->cookie->def->check_aux(object->cookie->netfs_data, | ||
787 | data, datalen); | ||
788 | switch (result) { | ||
789 | /* entry okay as is */ | ||
790 | case FSCACHE_CHECKAUX_OKAY: | ||
791 | fscache_stat(&fscache_n_checkaux_okay); | ||
792 | break; | ||
793 | |||
794 | /* entry requires update */ | ||
795 | case FSCACHE_CHECKAUX_NEEDS_UPDATE: | ||
796 | fscache_stat(&fscache_n_checkaux_update); | ||
797 | break; | ||
798 | |||
799 | /* entry requires deletion */ | ||
800 | case FSCACHE_CHECKAUX_OBSOLETE: | ||
801 | fscache_stat(&fscache_n_checkaux_obsolete); | ||
802 | break; | ||
803 | |||
804 | default: | ||
805 | BUG(); | ||
806 | } | ||
807 | |||
808 | return result; | ||
809 | } | ||
810 | EXPORT_SYMBOL(fscache_check_aux); | ||
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c new file mode 100644 index 000000000000..e7f8d53b8b6b --- /dev/null +++ b/fs/fscache/operation.c | |||
@@ -0,0 +1,459 @@ | |||
1 | /* FS-Cache worker operation management routines | ||
2 | * | ||
3 | * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * See Documentation/filesystems/caching/operations.txt | ||
12 | */ | ||
13 | |||
14 | #define FSCACHE_DEBUG_LEVEL OPERATION | ||
15 | #include <linux/module.h> | ||
16 | #include "internal.h" | ||
17 | |||
18 | atomic_t fscache_op_debug_id; | ||
19 | EXPORT_SYMBOL(fscache_op_debug_id); | ||
20 | |||
21 | /** | ||
22 | * fscache_enqueue_operation - Enqueue an operation for processing | ||
23 | * @op: The operation to enqueue | ||
24 | * | ||
25 | * Enqueue an operation for processing by the FS-Cache thread pool. | ||
26 | * | ||
27 | * This will get its own ref on the object. | ||
28 | */ | ||
29 | void fscache_enqueue_operation(struct fscache_operation *op) | ||
30 | { | ||
31 | _enter("{OBJ%x OP%x,%u}", | ||
32 | op->object->debug_id, op->debug_id, atomic_read(&op->usage)); | ||
33 | |||
34 | ASSERT(op->processor != NULL); | ||
35 | ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE); | ||
36 | ASSERTCMP(atomic_read(&op->usage), >, 0); | ||
37 | |||
38 | if (list_empty(&op->pend_link)) { | ||
39 | switch (op->flags & FSCACHE_OP_TYPE) { | ||
40 | case FSCACHE_OP_FAST: | ||
41 | _debug("queue fast"); | ||
42 | atomic_inc(&op->usage); | ||
43 | if (!schedule_work(&op->fast_work)) | ||
44 | fscache_put_operation(op); | ||
45 | break; | ||
46 | case FSCACHE_OP_SLOW: | ||
47 | _debug("queue slow"); | ||
48 | slow_work_enqueue(&op->slow_work); | ||
49 | break; | ||
50 | case FSCACHE_OP_MYTHREAD: | ||
51 | _debug("queue for caller's attention"); | ||
52 | break; | ||
53 | default: | ||
54 | printk(KERN_ERR "FS-Cache: Unexpected op type %lx", | ||
55 | op->flags); | ||
56 | BUG(); | ||
57 | break; | ||
58 | } | ||
59 | fscache_stat(&fscache_n_op_enqueue); | ||
60 | } | ||
61 | } | ||
62 | EXPORT_SYMBOL(fscache_enqueue_operation); | ||
63 | |||
64 | /* | ||
65 | * start an op running | ||
66 | */ | ||
67 | static void fscache_run_op(struct fscache_object *object, | ||
68 | struct fscache_operation *op) | ||
69 | { | ||
70 | object->n_in_progress++; | ||
71 | if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) | ||
72 | wake_up_bit(&op->flags, FSCACHE_OP_WAITING); | ||
73 | if (op->processor) | ||
74 | fscache_enqueue_operation(op); | ||
75 | fscache_stat(&fscache_n_op_run); | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * submit an exclusive operation for an object | ||
80 | * - other ops are excluded from running simultaneously with this one | ||
81 | * - this gets any extra refs it needs on an op | ||
82 | */ | ||
83 | int fscache_submit_exclusive_op(struct fscache_object *object, | ||
84 | struct fscache_operation *op) | ||
85 | { | ||
86 | int ret; | ||
87 | |||
88 | _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id); | ||
89 | |||
90 | spin_lock(&object->lock); | ||
91 | ASSERTCMP(object->n_ops, >=, object->n_in_progress); | ||
92 | ASSERTCMP(object->n_ops, >=, object->n_exclusive); | ||
93 | |||
94 | ret = -ENOBUFS; | ||
95 | if (fscache_object_is_active(object)) { | ||
96 | op->object = object; | ||
97 | object->n_ops++; | ||
98 | object->n_exclusive++; /* reads and writes must wait */ | ||
99 | |||
100 | if (object->n_ops > 0) { | ||
101 | atomic_inc(&op->usage); | ||
102 | list_add_tail(&op->pend_link, &object->pending_ops); | ||
103 | fscache_stat(&fscache_n_op_pend); | ||
104 | } else if (!list_empty(&object->pending_ops)) { | ||
105 | atomic_inc(&op->usage); | ||
106 | list_add_tail(&op->pend_link, &object->pending_ops); | ||
107 | fscache_stat(&fscache_n_op_pend); | ||
108 | fscache_start_operations(object); | ||
109 | } else { | ||
110 | ASSERTCMP(object->n_in_progress, ==, 0); | ||
111 | fscache_run_op(object, op); | ||
112 | } | ||
113 | |||
114 | /* need to issue a new write op after this */ | ||
115 | clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); | ||
116 | ret = 0; | ||
117 | } else if (object->state == FSCACHE_OBJECT_CREATING) { | ||
118 | op->object = object; | ||
119 | object->n_ops++; | ||
120 | object->n_exclusive++; /* reads and writes must wait */ | ||
121 | atomic_inc(&op->usage); | ||
122 | list_add_tail(&op->pend_link, &object->pending_ops); | ||
123 | fscache_stat(&fscache_n_op_pend); | ||
124 | ret = 0; | ||
125 | } else { | ||
126 | /* not allowed to submit ops in any other state */ | ||
127 | BUG(); | ||
128 | } | ||
129 | |||
130 | spin_unlock(&object->lock); | ||
131 | return ret; | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * report an unexpected submission | ||
136 | */ | ||
137 | static void fscache_report_unexpected_submission(struct fscache_object *object, | ||
138 | struct fscache_operation *op, | ||
139 | unsigned long ostate) | ||
140 | { | ||
141 | static bool once_only; | ||
142 | struct fscache_operation *p; | ||
143 | unsigned n; | ||
144 | |||
145 | if (once_only) | ||
146 | return; | ||
147 | once_only = true; | ||
148 | |||
149 | kdebug("unexpected submission OP%x [OBJ%x %s]", | ||
150 | op->debug_id, object->debug_id, | ||
151 | fscache_object_states[object->state]); | ||
152 | kdebug("objstate=%s [%s]", | ||
153 | fscache_object_states[object->state], | ||
154 | fscache_object_states[ostate]); | ||
155 | kdebug("objflags=%lx", object->flags); | ||
156 | kdebug("objevent=%lx [%lx]", object->events, object->event_mask); | ||
157 | kdebug("ops=%u inp=%u exc=%u", | ||
158 | object->n_ops, object->n_in_progress, object->n_exclusive); | ||
159 | |||
160 | if (!list_empty(&object->pending_ops)) { | ||
161 | n = 0; | ||
162 | list_for_each_entry(p, &object->pending_ops, pend_link) { | ||
163 | ASSERTCMP(p->object, ==, object); | ||
164 | kdebug("%p %p", op->processor, op->release); | ||
165 | n++; | ||
166 | } | ||
167 | |||
168 | kdebug("n=%u", n); | ||
169 | } | ||
170 | |||
171 | dump_stack(); | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * submit an operation for an object | ||
176 | * - objects may be submitted only in the following states: | ||
177 | * - during object creation (write ops may be submitted) | ||
178 | * - whilst the object is active | ||
179 | * - after an I/O error incurred in one of the two above states (op rejected) | ||
180 | * - this gets any extra refs it needs on an op | ||
181 | */ | ||
182 | int fscache_submit_op(struct fscache_object *object, | ||
183 | struct fscache_operation *op) | ||
184 | { | ||
185 | unsigned long ostate; | ||
186 | int ret; | ||
187 | |||
188 | _enter("{OBJ%x OP%x},{%u}", | ||
189 | object->debug_id, op->debug_id, atomic_read(&op->usage)); | ||
190 | |||
191 | ASSERTCMP(atomic_read(&op->usage), >, 0); | ||
192 | |||
193 | spin_lock(&object->lock); | ||
194 | ASSERTCMP(object->n_ops, >=, object->n_in_progress); | ||
195 | ASSERTCMP(object->n_ops, >=, object->n_exclusive); | ||
196 | |||
197 | ostate = object->state; | ||
198 | smp_rmb(); | ||
199 | |||
200 | if (fscache_object_is_active(object)) { | ||
201 | op->object = object; | ||
202 | object->n_ops++; | ||
203 | |||
204 | if (object->n_exclusive > 0) { | ||
205 | atomic_inc(&op->usage); | ||
206 | list_add_tail(&op->pend_link, &object->pending_ops); | ||
207 | fscache_stat(&fscache_n_op_pend); | ||
208 | } else if (!list_empty(&object->pending_ops)) { | ||
209 | atomic_inc(&op->usage); | ||
210 | list_add_tail(&op->pend_link, &object->pending_ops); | ||
211 | fscache_stat(&fscache_n_op_pend); | ||
212 | fscache_start_operations(object); | ||
213 | } else { | ||
214 | ASSERTCMP(object->n_exclusive, ==, 0); | ||
215 | fscache_run_op(object, op); | ||
216 | } | ||
217 | ret = 0; | ||
218 | } else if (object->state == FSCACHE_OBJECT_CREATING) { | ||
219 | op->object = object; | ||
220 | object->n_ops++; | ||
221 | atomic_inc(&op->usage); | ||
222 | list_add_tail(&op->pend_link, &object->pending_ops); | ||
223 | fscache_stat(&fscache_n_op_pend); | ||
224 | ret = 0; | ||
225 | } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) { | ||
226 | fscache_report_unexpected_submission(object, op, ostate); | ||
227 | ASSERT(!fscache_object_is_active(object)); | ||
228 | ret = -ENOBUFS; | ||
229 | } else { | ||
230 | ret = -ENOBUFS; | ||
231 | } | ||
232 | |||
233 | spin_unlock(&object->lock); | ||
234 | return ret; | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * queue an object for withdrawal on error, aborting all following asynchronous | ||
239 | * operations | ||
240 | */ | ||
241 | void fscache_abort_object(struct fscache_object *object) | ||
242 | { | ||
243 | _enter("{OBJ%x}", object->debug_id); | ||
244 | |||
245 | fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR); | ||
246 | } | ||
247 | |||
248 | /* | ||
249 | * jump start the operation processing on an object | ||
250 | * - caller must hold object->lock | ||
251 | */ | ||
252 | void fscache_start_operations(struct fscache_object *object) | ||
253 | { | ||
254 | struct fscache_operation *op; | ||
255 | bool stop = false; | ||
256 | |||
257 | while (!list_empty(&object->pending_ops) && !stop) { | ||
258 | op = list_entry(object->pending_ops.next, | ||
259 | struct fscache_operation, pend_link); | ||
260 | |||
261 | if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) { | ||
262 | if (object->n_in_progress > 0) | ||
263 | break; | ||
264 | stop = true; | ||
265 | } | ||
266 | list_del_init(&op->pend_link); | ||
267 | object->n_in_progress++; | ||
268 | |||
269 | if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) | ||
270 | wake_up_bit(&op->flags, FSCACHE_OP_WAITING); | ||
271 | if (op->processor) | ||
272 | fscache_enqueue_operation(op); | ||
273 | |||
274 | /* the pending queue was holding a ref on the object */ | ||
275 | fscache_put_operation(op); | ||
276 | } | ||
277 | |||
278 | ASSERTCMP(object->n_in_progress, <=, object->n_ops); | ||
279 | |||
280 | _debug("woke %d ops on OBJ%x", | ||
281 | object->n_in_progress, object->debug_id); | ||
282 | } | ||
283 | |||
284 | /* | ||
285 | * release an operation | ||
286 | * - queues pending ops if this is the last in-progress op | ||
287 | */ | ||
288 | void fscache_put_operation(struct fscache_operation *op) | ||
289 | { | ||
290 | struct fscache_object *object; | ||
291 | struct fscache_cache *cache; | ||
292 | |||
293 | _enter("{OBJ%x OP%x,%d}", | ||
294 | op->object->debug_id, op->debug_id, atomic_read(&op->usage)); | ||
295 | |||
296 | ASSERTCMP(atomic_read(&op->usage), >, 0); | ||
297 | |||
298 | if (!atomic_dec_and_test(&op->usage)) | ||
299 | return; | ||
300 | |||
301 | _debug("PUT OP"); | ||
302 | if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags)) | ||
303 | BUG(); | ||
304 | |||
305 | fscache_stat(&fscache_n_op_release); | ||
306 | |||
307 | if (op->release) { | ||
308 | op->release(op); | ||
309 | op->release = NULL; | ||
310 | } | ||
311 | |||
312 | object = op->object; | ||
313 | |||
314 | /* now... we may get called with the object spinlock held, so we | ||
315 | * complete the cleanup here only if we can immediately acquire the | ||
316 | * lock, and defer it otherwise */ | ||
317 | if (!spin_trylock(&object->lock)) { | ||
318 | _debug("defer put"); | ||
319 | fscache_stat(&fscache_n_op_deferred_release); | ||
320 | |||
321 | cache = object->cache; | ||
322 | spin_lock(&cache->op_gc_list_lock); | ||
323 | list_add_tail(&op->pend_link, &cache->op_gc_list); | ||
324 | spin_unlock(&cache->op_gc_list_lock); | ||
325 | schedule_work(&cache->op_gc); | ||
326 | _leave(" [defer]"); | ||
327 | return; | ||
328 | } | ||
329 | |||
330 | if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) { | ||
331 | ASSERTCMP(object->n_exclusive, >, 0); | ||
332 | object->n_exclusive--; | ||
333 | } | ||
334 | |||
335 | ASSERTCMP(object->n_in_progress, >, 0); | ||
336 | object->n_in_progress--; | ||
337 | if (object->n_in_progress == 0) | ||
338 | fscache_start_operations(object); | ||
339 | |||
340 | ASSERTCMP(object->n_ops, >, 0); | ||
341 | object->n_ops--; | ||
342 | if (object->n_ops == 0) | ||
343 | fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED); | ||
344 | |||
345 | spin_unlock(&object->lock); | ||
346 | |||
347 | kfree(op); | ||
348 | _leave(" [done]"); | ||
349 | } | ||
350 | EXPORT_SYMBOL(fscache_put_operation); | ||
351 | |||
352 | /* | ||
353 | * garbage collect operations that have had their release deferred | ||
354 | */ | ||
355 | void fscache_operation_gc(struct work_struct *work) | ||
356 | { | ||
357 | struct fscache_operation *op; | ||
358 | struct fscache_object *object; | ||
359 | struct fscache_cache *cache = | ||
360 | container_of(work, struct fscache_cache, op_gc); | ||
361 | int count = 0; | ||
362 | |||
363 | _enter(""); | ||
364 | |||
365 | do { | ||
366 | spin_lock(&cache->op_gc_list_lock); | ||
367 | if (list_empty(&cache->op_gc_list)) { | ||
368 | spin_unlock(&cache->op_gc_list_lock); | ||
369 | break; | ||
370 | } | ||
371 | |||
372 | op = list_entry(cache->op_gc_list.next, | ||
373 | struct fscache_operation, pend_link); | ||
374 | list_del(&op->pend_link); | ||
375 | spin_unlock(&cache->op_gc_list_lock); | ||
376 | |||
377 | object = op->object; | ||
378 | |||
379 | _debug("GC DEFERRED REL OBJ%x OP%x", | ||
380 | object->debug_id, op->debug_id); | ||
381 | fscache_stat(&fscache_n_op_gc); | ||
382 | |||
383 | ASSERTCMP(atomic_read(&op->usage), ==, 0); | ||
384 | |||
385 | spin_lock(&object->lock); | ||
386 | if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) { | ||
387 | ASSERTCMP(object->n_exclusive, >, 0); | ||
388 | object->n_exclusive--; | ||
389 | } | ||
390 | |||
391 | ASSERTCMP(object->n_in_progress, >, 0); | ||
392 | object->n_in_progress--; | ||
393 | if (object->n_in_progress == 0) | ||
394 | fscache_start_operations(object); | ||
395 | |||
396 | ASSERTCMP(object->n_ops, >, 0); | ||
397 | object->n_ops--; | ||
398 | if (object->n_ops == 0) | ||
399 | fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED); | ||
400 | |||
401 | spin_unlock(&object->lock); | ||
402 | |||
403 | } while (count++ < 20); | ||
404 | |||
405 | if (!list_empty(&cache->op_gc_list)) | ||
406 | schedule_work(&cache->op_gc); | ||
407 | |||
408 | _leave(""); | ||
409 | } | ||
410 | |||
411 | /* | ||
412 | * allow the slow work item processor to get a ref on an operation | ||
413 | */ | ||
414 | static int fscache_op_get_ref(struct slow_work *work) | ||
415 | { | ||
416 | struct fscache_operation *op = | ||
417 | container_of(work, struct fscache_operation, slow_work); | ||
418 | |||
419 | atomic_inc(&op->usage); | ||
420 | return 0; | ||
421 | } | ||
422 | |||
423 | /* | ||
424 | * allow the slow work item processor to discard a ref on an operation | ||
425 | */ | ||
426 | static void fscache_op_put_ref(struct slow_work *work) | ||
427 | { | ||
428 | struct fscache_operation *op = | ||
429 | container_of(work, struct fscache_operation, slow_work); | ||
430 | |||
431 | fscache_put_operation(op); | ||
432 | } | ||
433 | |||
434 | /* | ||
435 | * execute an operation using the slow thread pool to provide processing context | ||
436 | * - the caller holds a ref to this object, so we don't need to hold one | ||
437 | */ | ||
438 | static void fscache_op_execute(struct slow_work *work) | ||
439 | { | ||
440 | struct fscache_operation *op = | ||
441 | container_of(work, struct fscache_operation, slow_work); | ||
442 | unsigned long start; | ||
443 | |||
444 | _enter("{OBJ%x OP%x,%d}", | ||
445 | op->object->debug_id, op->debug_id, atomic_read(&op->usage)); | ||
446 | |||
447 | ASSERT(op->processor != NULL); | ||
448 | start = jiffies; | ||
449 | op->processor(op); | ||
450 | fscache_hist(fscache_ops_histogram, start); | ||
451 | |||
452 | _leave(""); | ||
453 | } | ||
454 | |||
455 | const struct slow_work_ops fscache_op_slow_work_ops = { | ||
456 | .get_ref = fscache_op_get_ref, | ||
457 | .put_ref = fscache_op_put_ref, | ||
458 | .execute = fscache_op_execute, | ||
459 | }; | ||
diff --git a/fs/fscache/page.c b/fs/fscache/page.c new file mode 100644 index 000000000000..2568e0eb644f --- /dev/null +++ b/fs/fscache/page.c | |||
@@ -0,0 +1,816 @@ | |||
1 | /* Cache page management and data I/O routines | ||
2 | * | ||
3 | * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #define FSCACHE_DEBUG_LEVEL PAGE | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/fscache-cache.h> | ||
15 | #include <linux/buffer_head.h> | ||
16 | #include <linux/pagevec.h> | ||
17 | #include "internal.h" | ||
18 | |||
19 | /* | ||
20 | * check to see if a page is being written to the cache | ||
21 | */ | ||
22 | bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page) | ||
23 | { | ||
24 | void *val; | ||
25 | |||
26 | rcu_read_lock(); | ||
27 | val = radix_tree_lookup(&cookie->stores, page->index); | ||
28 | rcu_read_unlock(); | ||
29 | |||
30 | return val != NULL; | ||
31 | } | ||
32 | EXPORT_SYMBOL(__fscache_check_page_write); | ||
33 | |||
34 | /* | ||
35 | * wait for a page to finish being written to the cache | ||
36 | */ | ||
37 | void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page) | ||
38 | { | ||
39 | wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0); | ||
40 | |||
41 | wait_event(*wq, !__fscache_check_page_write(cookie, page)); | ||
42 | } | ||
43 | EXPORT_SYMBOL(__fscache_wait_on_page_write); | ||
44 | |||
45 | /* | ||
46 | * note that a page has finished being written to the cache | ||
47 | */ | ||
48 | static void fscache_end_page_write(struct fscache_cookie *cookie, struct page *page) | ||
49 | { | ||
50 | struct page *xpage; | ||
51 | |||
52 | spin_lock(&cookie->lock); | ||
53 | xpage = radix_tree_delete(&cookie->stores, page->index); | ||
54 | spin_unlock(&cookie->lock); | ||
55 | ASSERT(xpage != NULL); | ||
56 | |||
57 | wake_up_bit(&cookie->flags, 0); | ||
58 | } | ||
59 | |||
60 | /* | ||
61 | * actually apply the changed attributes to a cache object | ||
62 | */ | ||
63 | static void fscache_attr_changed_op(struct fscache_operation *op) | ||
64 | { | ||
65 | struct fscache_object *object = op->object; | ||
66 | |||
67 | _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id); | ||
68 | |||
69 | fscache_stat(&fscache_n_attr_changed_calls); | ||
70 | |||
71 | if (fscache_object_is_active(object) && | ||
72 | object->cache->ops->attr_changed(object) < 0) | ||
73 | fscache_abort_object(object); | ||
74 | |||
75 | _leave(""); | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * notification that the attributes on an object have changed | ||
80 | */ | ||
81 | int __fscache_attr_changed(struct fscache_cookie *cookie) | ||
82 | { | ||
83 | struct fscache_operation *op; | ||
84 | struct fscache_object *object; | ||
85 | |||
86 | _enter("%p", cookie); | ||
87 | |||
88 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); | ||
89 | |||
90 | fscache_stat(&fscache_n_attr_changed); | ||
91 | |||
92 | op = kzalloc(sizeof(*op), GFP_KERNEL); | ||
93 | if (!op) { | ||
94 | fscache_stat(&fscache_n_attr_changed_nomem); | ||
95 | _leave(" = -ENOMEM"); | ||
96 | return -ENOMEM; | ||
97 | } | ||
98 | |||
99 | fscache_operation_init(op, NULL); | ||
100 | fscache_operation_init_slow(op, fscache_attr_changed_op); | ||
101 | op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE); | ||
102 | |||
103 | spin_lock(&cookie->lock); | ||
104 | |||
105 | if (hlist_empty(&cookie->backing_objects)) | ||
106 | goto nobufs; | ||
107 | object = hlist_entry(cookie->backing_objects.first, | ||
108 | struct fscache_object, cookie_link); | ||
109 | |||
110 | if (fscache_submit_exclusive_op(object, op) < 0) | ||
111 | goto nobufs; | ||
112 | spin_unlock(&cookie->lock); | ||
113 | fscache_stat(&fscache_n_attr_changed_ok); | ||
114 | fscache_put_operation(op); | ||
115 | _leave(" = 0"); | ||
116 | return 0; | ||
117 | |||
118 | nobufs: | ||
119 | spin_unlock(&cookie->lock); | ||
120 | kfree(op); | ||
121 | fscache_stat(&fscache_n_attr_changed_nobufs); | ||
122 | _leave(" = %d", -ENOBUFS); | ||
123 | return -ENOBUFS; | ||
124 | } | ||
125 | EXPORT_SYMBOL(__fscache_attr_changed); | ||
126 | |||
127 | /* | ||
128 | * handle secondary execution given to a retrieval op on behalf of the | ||
129 | * cache | ||
130 | */ | ||
131 | static void fscache_retrieval_work(struct work_struct *work) | ||
132 | { | ||
133 | struct fscache_retrieval *op = | ||
134 | container_of(work, struct fscache_retrieval, op.fast_work); | ||
135 | unsigned long start; | ||
136 | |||
137 | _enter("{OP%x}", op->op.debug_id); | ||
138 | |||
139 | start = jiffies; | ||
140 | op->op.processor(&op->op); | ||
141 | fscache_hist(fscache_ops_histogram, start); | ||
142 | fscache_put_operation(&op->op); | ||
143 | } | ||
144 | |||
145 | /* | ||
146 | * release a retrieval op reference | ||
147 | */ | ||
148 | static void fscache_release_retrieval_op(struct fscache_operation *_op) | ||
149 | { | ||
150 | struct fscache_retrieval *op = | ||
151 | container_of(_op, struct fscache_retrieval, op); | ||
152 | |||
153 | _enter("{OP%x}", op->op.debug_id); | ||
154 | |||
155 | fscache_hist(fscache_retrieval_histogram, op->start_time); | ||
156 | if (op->context) | ||
157 | fscache_put_context(op->op.object->cookie, op->context); | ||
158 | |||
159 | _leave(""); | ||
160 | } | ||
161 | |||
162 | /* | ||
163 | * allocate a retrieval op | ||
164 | */ | ||
165 | static struct fscache_retrieval *fscache_alloc_retrieval( | ||
166 | struct address_space *mapping, | ||
167 | fscache_rw_complete_t end_io_func, | ||
168 | void *context) | ||
169 | { | ||
170 | struct fscache_retrieval *op; | ||
171 | |||
172 | /* allocate a retrieval operation and attempt to submit it */ | ||
173 | op = kzalloc(sizeof(*op), GFP_NOIO); | ||
174 | if (!op) { | ||
175 | fscache_stat(&fscache_n_retrievals_nomem); | ||
176 | return NULL; | ||
177 | } | ||
178 | |||
179 | fscache_operation_init(&op->op, fscache_release_retrieval_op); | ||
180 | op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING); | ||
181 | op->mapping = mapping; | ||
182 | op->end_io_func = end_io_func; | ||
183 | op->context = context; | ||
184 | op->start_time = jiffies; | ||
185 | INIT_WORK(&op->op.fast_work, fscache_retrieval_work); | ||
186 | INIT_LIST_HEAD(&op->to_do); | ||
187 | return op; | ||
188 | } | ||
189 | |||
190 | /* | ||
191 | * wait for a deferred lookup to complete | ||
192 | */ | ||
193 | static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie) | ||
194 | { | ||
195 | unsigned long jif; | ||
196 | |||
197 | _enter(""); | ||
198 | |||
199 | if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) { | ||
200 | _leave(" = 0 [imm]"); | ||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | fscache_stat(&fscache_n_retrievals_wait); | ||
205 | |||
206 | jif = jiffies; | ||
207 | if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP, | ||
208 | fscache_wait_bit_interruptible, | ||
209 | TASK_INTERRUPTIBLE) != 0) { | ||
210 | fscache_stat(&fscache_n_retrievals_intr); | ||
211 | _leave(" = -ERESTARTSYS"); | ||
212 | return -ERESTARTSYS; | ||
213 | } | ||
214 | |||
215 | ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)); | ||
216 | |||
217 | smp_rmb(); | ||
218 | fscache_hist(fscache_retrieval_delay_histogram, jif); | ||
219 | _leave(" = 0 [dly]"); | ||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | /* | ||
224 | * read a page from the cache or allocate a block in which to store it | ||
225 | * - we return: | ||
226 | * -ENOMEM - out of memory, nothing done | ||
227 | * -ERESTARTSYS - interrupted | ||
228 | * -ENOBUFS - no backing object available in which to cache the block | ||
229 | * -ENODATA - no data available in the backing object for this block | ||
230 | * 0 - dispatched a read - it'll call end_io_func() when finished | ||
231 | */ | ||
232 | int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, | ||
233 | struct page *page, | ||
234 | fscache_rw_complete_t end_io_func, | ||
235 | void *context, | ||
236 | gfp_t gfp) | ||
237 | { | ||
238 | struct fscache_retrieval *op; | ||
239 | struct fscache_object *object; | ||
240 | int ret; | ||
241 | |||
242 | _enter("%p,%p,,,", cookie, page); | ||
243 | |||
244 | fscache_stat(&fscache_n_retrievals); | ||
245 | |||
246 | if (hlist_empty(&cookie->backing_objects)) | ||
247 | goto nobufs; | ||
248 | |||
249 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); | ||
250 | ASSERTCMP(page, !=, NULL); | ||
251 | |||
252 | if (fscache_wait_for_deferred_lookup(cookie) < 0) | ||
253 | return -ERESTARTSYS; | ||
254 | |||
255 | op = fscache_alloc_retrieval(page->mapping, end_io_func, context); | ||
256 | if (!op) { | ||
257 | _leave(" = -ENOMEM"); | ||
258 | return -ENOMEM; | ||
259 | } | ||
260 | |||
261 | spin_lock(&cookie->lock); | ||
262 | |||
263 | if (hlist_empty(&cookie->backing_objects)) | ||
264 | goto nobufs_unlock; | ||
265 | object = hlist_entry(cookie->backing_objects.first, | ||
266 | struct fscache_object, cookie_link); | ||
267 | |||
268 | ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP); | ||
269 | |||
270 | if (fscache_submit_op(object, &op->op) < 0) | ||
271 | goto nobufs_unlock; | ||
272 | spin_unlock(&cookie->lock); | ||
273 | |||
274 | fscache_stat(&fscache_n_retrieval_ops); | ||
275 | |||
276 | /* pin the netfs read context in case we need to do the actual netfs | ||
277 | * read because we've encountered a cache read failure */ | ||
278 | fscache_get_context(object->cookie, op->context); | ||
279 | |||
280 | /* we wait for the operation to become active, and then process it | ||
281 | * *here*, in this thread, and not in the thread pool */ | ||
282 | if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { | ||
283 | _debug(">>> WT"); | ||
284 | fscache_stat(&fscache_n_retrieval_op_waits); | ||
285 | wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, | ||
286 | fscache_wait_bit, TASK_UNINTERRUPTIBLE); | ||
287 | _debug("<<< GO"); | ||
288 | } | ||
289 | |||
290 | /* ask the cache to honour the operation */ | ||
291 | if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { | ||
292 | ret = object->cache->ops->allocate_page(op, page, gfp); | ||
293 | if (ret == 0) | ||
294 | ret = -ENODATA; | ||
295 | } else { | ||
296 | ret = object->cache->ops->read_or_alloc_page(op, page, gfp); | ||
297 | } | ||
298 | |||
299 | if (ret == -ENOMEM) | ||
300 | fscache_stat(&fscache_n_retrievals_nomem); | ||
301 | else if (ret == -ERESTARTSYS) | ||
302 | fscache_stat(&fscache_n_retrievals_intr); | ||
303 | else if (ret == -ENODATA) | ||
304 | fscache_stat(&fscache_n_retrievals_nodata); | ||
305 | else if (ret < 0) | ||
306 | fscache_stat(&fscache_n_retrievals_nobufs); | ||
307 | else | ||
308 | fscache_stat(&fscache_n_retrievals_ok); | ||
309 | |||
310 | fscache_put_retrieval(op); | ||
311 | _leave(" = %d", ret); | ||
312 | return ret; | ||
313 | |||
314 | nobufs_unlock: | ||
315 | spin_unlock(&cookie->lock); | ||
316 | kfree(op); | ||
317 | nobufs: | ||
318 | fscache_stat(&fscache_n_retrievals_nobufs); | ||
319 | _leave(" = -ENOBUFS"); | ||
320 | return -ENOBUFS; | ||
321 | } | ||
322 | EXPORT_SYMBOL(__fscache_read_or_alloc_page); | ||
323 | |||
324 | /* | ||
325 | * read a list of page from the cache or allocate a block in which to store | ||
326 | * them | ||
327 | * - we return: | ||
328 | * -ENOMEM - out of memory, some pages may be being read | ||
329 | * -ERESTARTSYS - interrupted, some pages may be being read | ||
330 | * -ENOBUFS - no backing object or space available in which to cache any | ||
331 | * pages not being read | ||
332 | * -ENODATA - no data available in the backing object for some or all of | ||
333 | * the pages | ||
334 | * 0 - dispatched a read on all pages | ||
335 | * | ||
336 | * end_io_func() will be called for each page read from the cache as it is | ||
337 | * finishes being read | ||
338 | * | ||
339 | * any pages for which a read is dispatched will be removed from pages and | ||
340 | * nr_pages | ||
341 | */ | ||
342 | int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, | ||
343 | struct address_space *mapping, | ||
344 | struct list_head *pages, | ||
345 | unsigned *nr_pages, | ||
346 | fscache_rw_complete_t end_io_func, | ||
347 | void *context, | ||
348 | gfp_t gfp) | ||
349 | { | ||
350 | fscache_pages_retrieval_func_t func; | ||
351 | struct fscache_retrieval *op; | ||
352 | struct fscache_object *object; | ||
353 | int ret; | ||
354 | |||
355 | _enter("%p,,%d,,,", cookie, *nr_pages); | ||
356 | |||
357 | fscache_stat(&fscache_n_retrievals); | ||
358 | |||
359 | if (hlist_empty(&cookie->backing_objects)) | ||
360 | goto nobufs; | ||
361 | |||
362 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); | ||
363 | ASSERTCMP(*nr_pages, >, 0); | ||
364 | ASSERT(!list_empty(pages)); | ||
365 | |||
366 | if (fscache_wait_for_deferred_lookup(cookie) < 0) | ||
367 | return -ERESTARTSYS; | ||
368 | |||
369 | op = fscache_alloc_retrieval(mapping, end_io_func, context); | ||
370 | if (!op) | ||
371 | return -ENOMEM; | ||
372 | |||
373 | spin_lock(&cookie->lock); | ||
374 | |||
375 | if (hlist_empty(&cookie->backing_objects)) | ||
376 | goto nobufs_unlock; | ||
377 | object = hlist_entry(cookie->backing_objects.first, | ||
378 | struct fscache_object, cookie_link); | ||
379 | |||
380 | if (fscache_submit_op(object, &op->op) < 0) | ||
381 | goto nobufs_unlock; | ||
382 | spin_unlock(&cookie->lock); | ||
383 | |||
384 | fscache_stat(&fscache_n_retrieval_ops); | ||
385 | |||
386 | /* pin the netfs read context in case we need to do the actual netfs | ||
387 | * read because we've encountered a cache read failure */ | ||
388 | fscache_get_context(object->cookie, op->context); | ||
389 | |||
390 | /* we wait for the operation to become active, and then process it | ||
391 | * *here*, in this thread, and not in the thread pool */ | ||
392 | if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { | ||
393 | _debug(">>> WT"); | ||
394 | fscache_stat(&fscache_n_retrieval_op_waits); | ||
395 | wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, | ||
396 | fscache_wait_bit, TASK_UNINTERRUPTIBLE); | ||
397 | _debug("<<< GO"); | ||
398 | } | ||
399 | |||
400 | /* ask the cache to honour the operation */ | ||
401 | if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) | ||
402 | func = object->cache->ops->allocate_pages; | ||
403 | else | ||
404 | func = object->cache->ops->read_or_alloc_pages; | ||
405 | ret = func(op, pages, nr_pages, gfp); | ||
406 | |||
407 | if (ret == -ENOMEM) | ||
408 | fscache_stat(&fscache_n_retrievals_nomem); | ||
409 | else if (ret == -ERESTARTSYS) | ||
410 | fscache_stat(&fscache_n_retrievals_intr); | ||
411 | else if (ret == -ENODATA) | ||
412 | fscache_stat(&fscache_n_retrievals_nodata); | ||
413 | else if (ret < 0) | ||
414 | fscache_stat(&fscache_n_retrievals_nobufs); | ||
415 | else | ||
416 | fscache_stat(&fscache_n_retrievals_ok); | ||
417 | |||
418 | fscache_put_retrieval(op); | ||
419 | _leave(" = %d", ret); | ||
420 | return ret; | ||
421 | |||
422 | nobufs_unlock: | ||
423 | spin_unlock(&cookie->lock); | ||
424 | kfree(op); | ||
425 | nobufs: | ||
426 | fscache_stat(&fscache_n_retrievals_nobufs); | ||
427 | _leave(" = -ENOBUFS"); | ||
428 | return -ENOBUFS; | ||
429 | } | ||
430 | EXPORT_SYMBOL(__fscache_read_or_alloc_pages); | ||
431 | |||
432 | /* | ||
433 | * allocate a block in the cache on which to store a page | ||
434 | * - we return: | ||
435 | * -ENOMEM - out of memory, nothing done | ||
436 | * -ERESTARTSYS - interrupted | ||
437 | * -ENOBUFS - no backing object available in which to cache the block | ||
438 | * 0 - block allocated | ||
439 | */ | ||
440 | int __fscache_alloc_page(struct fscache_cookie *cookie, | ||
441 | struct page *page, | ||
442 | gfp_t gfp) | ||
443 | { | ||
444 | struct fscache_retrieval *op; | ||
445 | struct fscache_object *object; | ||
446 | int ret; | ||
447 | |||
448 | _enter("%p,%p,,,", cookie, page); | ||
449 | |||
450 | fscache_stat(&fscache_n_allocs); | ||
451 | |||
452 | if (hlist_empty(&cookie->backing_objects)) | ||
453 | goto nobufs; | ||
454 | |||
455 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); | ||
456 | ASSERTCMP(page, !=, NULL); | ||
457 | |||
458 | if (fscache_wait_for_deferred_lookup(cookie) < 0) | ||
459 | return -ERESTARTSYS; | ||
460 | |||
461 | op = fscache_alloc_retrieval(page->mapping, NULL, NULL); | ||
462 | if (!op) | ||
463 | return -ENOMEM; | ||
464 | |||
465 | spin_lock(&cookie->lock); | ||
466 | |||
467 | if (hlist_empty(&cookie->backing_objects)) | ||
468 | goto nobufs_unlock; | ||
469 | object = hlist_entry(cookie->backing_objects.first, | ||
470 | struct fscache_object, cookie_link); | ||
471 | |||
472 | if (fscache_submit_op(object, &op->op) < 0) | ||
473 | goto nobufs_unlock; | ||
474 | spin_unlock(&cookie->lock); | ||
475 | |||
476 | fscache_stat(&fscache_n_alloc_ops); | ||
477 | |||
478 | if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { | ||
479 | _debug(">>> WT"); | ||
480 | fscache_stat(&fscache_n_alloc_op_waits); | ||
481 | wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, | ||
482 | fscache_wait_bit, TASK_UNINTERRUPTIBLE); | ||
483 | _debug("<<< GO"); | ||
484 | } | ||
485 | |||
486 | /* ask the cache to honour the operation */ | ||
487 | ret = object->cache->ops->allocate_page(op, page, gfp); | ||
488 | |||
489 | if (ret < 0) | ||
490 | fscache_stat(&fscache_n_allocs_nobufs); | ||
491 | else | ||
492 | fscache_stat(&fscache_n_allocs_ok); | ||
493 | |||
494 | fscache_put_retrieval(op); | ||
495 | _leave(" = %d", ret); | ||
496 | return ret; | ||
497 | |||
498 | nobufs_unlock: | ||
499 | spin_unlock(&cookie->lock); | ||
500 | kfree(op); | ||
501 | nobufs: | ||
502 | fscache_stat(&fscache_n_allocs_nobufs); | ||
503 | _leave(" = -ENOBUFS"); | ||
504 | return -ENOBUFS; | ||
505 | } | ||
506 | EXPORT_SYMBOL(__fscache_alloc_page); | ||
507 | |||
508 | /* | ||
509 | * release a write op reference | ||
510 | */ | ||
511 | static void fscache_release_write_op(struct fscache_operation *_op) | ||
512 | { | ||
513 | _enter("{OP%x}", _op->debug_id); | ||
514 | } | ||
515 | |||
516 | /* | ||
517 | * perform the background storage of a page into the cache | ||
518 | */ | ||
519 | static void fscache_write_op(struct fscache_operation *_op) | ||
520 | { | ||
521 | struct fscache_storage *op = | ||
522 | container_of(_op, struct fscache_storage, op); | ||
523 | struct fscache_object *object = op->op.object; | ||
524 | struct fscache_cookie *cookie = object->cookie; | ||
525 | struct page *page; | ||
526 | unsigned n; | ||
527 | void *results[1]; | ||
528 | int ret; | ||
529 | |||
530 | _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage)); | ||
531 | |||
532 | spin_lock(&cookie->lock); | ||
533 | spin_lock(&object->lock); | ||
534 | |||
535 | if (!fscache_object_is_active(object)) { | ||
536 | spin_unlock(&object->lock); | ||
537 | spin_unlock(&cookie->lock); | ||
538 | _leave(""); | ||
539 | return; | ||
540 | } | ||
541 | |||
542 | fscache_stat(&fscache_n_store_calls); | ||
543 | |||
544 | /* find a page to store */ | ||
545 | page = NULL; | ||
546 | n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1, | ||
547 | FSCACHE_COOKIE_PENDING_TAG); | ||
548 | if (n != 1) | ||
549 | goto superseded; | ||
550 | page = results[0]; | ||
551 | _debug("gang %d [%lx]", n, page->index); | ||
552 | if (page->index > op->store_limit) | ||
553 | goto superseded; | ||
554 | |||
555 | radix_tree_tag_clear(&cookie->stores, page->index, | ||
556 | FSCACHE_COOKIE_PENDING_TAG); | ||
557 | |||
558 | spin_unlock(&object->lock); | ||
559 | spin_unlock(&cookie->lock); | ||
560 | |||
561 | if (page) { | ||
562 | ret = object->cache->ops->write_page(op, page); | ||
563 | fscache_end_page_write(cookie, page); | ||
564 | page_cache_release(page); | ||
565 | if (ret < 0) | ||
566 | fscache_abort_object(object); | ||
567 | else | ||
568 | fscache_enqueue_operation(&op->op); | ||
569 | } | ||
570 | |||
571 | _leave(""); | ||
572 | return; | ||
573 | |||
574 | superseded: | ||
575 | /* this writer is going away and there aren't any more things to | ||
576 | * write */ | ||
577 | _debug("cease"); | ||
578 | clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); | ||
579 | spin_unlock(&object->lock); | ||
580 | spin_unlock(&cookie->lock); | ||
581 | _leave(""); | ||
582 | } | ||
583 | |||
584 | /* | ||
585 | * request a page be stored in the cache | ||
586 | * - returns: | ||
587 | * -ENOMEM - out of memory, nothing done | ||
588 | * -ENOBUFS - no backing object available in which to cache the page | ||
589 | * 0 - dispatched a write - it'll call end_io_func() when finished | ||
590 | * | ||
591 | * if the cookie still has a backing object at this point, that object can be | ||
592 | * in one of a few states with respect to storage processing: | ||
593 | * | ||
594 | * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is | ||
595 | * set) | ||
596 | * | ||
597 | * (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred | ||
598 | * fill op) | ||
599 | * | ||
600 | * (b) writes deferred till post-creation (mark page for writing and | ||
601 | * return immediately) | ||
602 | * | ||
603 | * (2) negative lookup, object created, initial fill being made from netfs | ||
604 | * (FSCACHE_COOKIE_INITIAL_FILL is set) | ||
605 | * | ||
606 | * (a) fill point not yet reached this page (mark page for writing and | ||
607 | * return) | ||
608 | * | ||
609 | * (b) fill point passed this page (queue op to store this page) | ||
610 | * | ||
611 | * (3) object extant (queue op to store this page) | ||
612 | * | ||
613 | * any other state is invalid | ||
614 | */ | ||
615 | int __fscache_write_page(struct fscache_cookie *cookie, | ||
616 | struct page *page, | ||
617 | gfp_t gfp) | ||
618 | { | ||
619 | struct fscache_storage *op; | ||
620 | struct fscache_object *object; | ||
621 | int ret; | ||
622 | |||
623 | _enter("%p,%x,", cookie, (u32) page->flags); | ||
624 | |||
625 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); | ||
626 | ASSERT(PageFsCache(page)); | ||
627 | |||
628 | fscache_stat(&fscache_n_stores); | ||
629 | |||
630 | op = kzalloc(sizeof(*op), GFP_NOIO); | ||
631 | if (!op) | ||
632 | goto nomem; | ||
633 | |||
634 | fscache_operation_init(&op->op, fscache_release_write_op); | ||
635 | fscache_operation_init_slow(&op->op, fscache_write_op); | ||
636 | op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING); | ||
637 | |||
638 | ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM); | ||
639 | if (ret < 0) | ||
640 | goto nomem_free; | ||
641 | |||
642 | ret = -ENOBUFS; | ||
643 | spin_lock(&cookie->lock); | ||
644 | |||
645 | if (hlist_empty(&cookie->backing_objects)) | ||
646 | goto nobufs; | ||
647 | object = hlist_entry(cookie->backing_objects.first, | ||
648 | struct fscache_object, cookie_link); | ||
649 | if (test_bit(FSCACHE_IOERROR, &object->cache->flags)) | ||
650 | goto nobufs; | ||
651 | |||
652 | /* add the page to the pending-storage radix tree on the backing | ||
653 | * object */ | ||
654 | spin_lock(&object->lock); | ||
655 | |||
656 | _debug("store limit %llx", (unsigned long long) object->store_limit); | ||
657 | |||
658 | ret = radix_tree_insert(&cookie->stores, page->index, page); | ||
659 | if (ret < 0) { | ||
660 | if (ret == -EEXIST) | ||
661 | goto already_queued; | ||
662 | _debug("insert failed %d", ret); | ||
663 | goto nobufs_unlock_obj; | ||
664 | } | ||
665 | |||
666 | radix_tree_tag_set(&cookie->stores, page->index, | ||
667 | FSCACHE_COOKIE_PENDING_TAG); | ||
668 | page_cache_get(page); | ||
669 | |||
670 | /* we only want one writer at a time, but we do need to queue new | ||
671 | * writers after exclusive ops */ | ||
672 | if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags)) | ||
673 | goto already_pending; | ||
674 | |||
675 | spin_unlock(&object->lock); | ||
676 | |||
677 | op->op.debug_id = atomic_inc_return(&fscache_op_debug_id); | ||
678 | op->store_limit = object->store_limit; | ||
679 | |||
680 | if (fscache_submit_op(object, &op->op) < 0) | ||
681 | goto submit_failed; | ||
682 | |||
683 | spin_unlock(&cookie->lock); | ||
684 | radix_tree_preload_end(); | ||
685 | fscache_stat(&fscache_n_store_ops); | ||
686 | fscache_stat(&fscache_n_stores_ok); | ||
687 | |||
688 | /* the slow work queue now carries its own ref on the object */ | ||
689 | fscache_put_operation(&op->op); | ||
690 | _leave(" = 0"); | ||
691 | return 0; | ||
692 | |||
693 | already_queued: | ||
694 | fscache_stat(&fscache_n_stores_again); | ||
695 | already_pending: | ||
696 | spin_unlock(&object->lock); | ||
697 | spin_unlock(&cookie->lock); | ||
698 | radix_tree_preload_end(); | ||
699 | kfree(op); | ||
700 | fscache_stat(&fscache_n_stores_ok); | ||
701 | _leave(" = 0"); | ||
702 | return 0; | ||
703 | |||
704 | submit_failed: | ||
705 | radix_tree_delete(&cookie->stores, page->index); | ||
706 | page_cache_release(page); | ||
707 | ret = -ENOBUFS; | ||
708 | goto nobufs; | ||
709 | |||
710 | nobufs_unlock_obj: | ||
711 | spin_unlock(&object->lock); | ||
712 | nobufs: | ||
713 | spin_unlock(&cookie->lock); | ||
714 | radix_tree_preload_end(); | ||
715 | kfree(op); | ||
716 | fscache_stat(&fscache_n_stores_nobufs); | ||
717 | _leave(" = -ENOBUFS"); | ||
718 | return -ENOBUFS; | ||
719 | |||
720 | nomem_free: | ||
721 | kfree(op); | ||
722 | nomem: | ||
723 | fscache_stat(&fscache_n_stores_oom); | ||
724 | _leave(" = -ENOMEM"); | ||
725 | return -ENOMEM; | ||
726 | } | ||
727 | EXPORT_SYMBOL(__fscache_write_page); | ||
728 | |||
729 | /* | ||
730 | * remove a page from the cache | ||
731 | */ | ||
732 | void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page) | ||
733 | { | ||
734 | struct fscache_object *object; | ||
735 | |||
736 | _enter(",%p", page); | ||
737 | |||
738 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); | ||
739 | ASSERTCMP(page, !=, NULL); | ||
740 | |||
741 | fscache_stat(&fscache_n_uncaches); | ||
742 | |||
743 | /* cache withdrawal may beat us to it */ | ||
744 | if (!PageFsCache(page)) | ||
745 | goto done; | ||
746 | |||
747 | /* get the object */ | ||
748 | spin_lock(&cookie->lock); | ||
749 | |||
750 | if (hlist_empty(&cookie->backing_objects)) { | ||
751 | ClearPageFsCache(page); | ||
752 | goto done_unlock; | ||
753 | } | ||
754 | |||
755 | object = hlist_entry(cookie->backing_objects.first, | ||
756 | struct fscache_object, cookie_link); | ||
757 | |||
758 | /* there might now be stuff on disk we could read */ | ||
759 | clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); | ||
760 | |||
761 | /* only invoke the cache backend if we managed to mark the page | ||
762 | * uncached here; this deals with synchronisation vs withdrawal */ | ||
763 | if (TestClearPageFsCache(page) && | ||
764 | object->cache->ops->uncache_page) { | ||
765 | /* the cache backend releases the cookie lock */ | ||
766 | object->cache->ops->uncache_page(object, page); | ||
767 | goto done; | ||
768 | } | ||
769 | |||
770 | done_unlock: | ||
771 | spin_unlock(&cookie->lock); | ||
772 | done: | ||
773 | _leave(""); | ||
774 | } | ||
775 | EXPORT_SYMBOL(__fscache_uncache_page); | ||
776 | |||
777 | /** | ||
778 | * fscache_mark_pages_cached - Mark pages as being cached | ||
779 | * @op: The retrieval op pages are being marked for | ||
780 | * @pagevec: The pages to be marked | ||
781 | * | ||
782 | * Mark a bunch of netfs pages as being cached. After this is called, | ||
783 | * the netfs must call fscache_uncache_page() to remove the mark. | ||
784 | */ | ||
785 | void fscache_mark_pages_cached(struct fscache_retrieval *op, | ||
786 | struct pagevec *pagevec) | ||
787 | { | ||
788 | struct fscache_cookie *cookie = op->op.object->cookie; | ||
789 | unsigned long loop; | ||
790 | |||
791 | #ifdef CONFIG_FSCACHE_STATS | ||
792 | atomic_add(pagevec->nr, &fscache_n_marks); | ||
793 | #endif | ||
794 | |||
795 | for (loop = 0; loop < pagevec->nr; loop++) { | ||
796 | struct page *page = pagevec->pages[loop]; | ||
797 | |||
798 | _debug("- mark %p{%lx}", page, page->index); | ||
799 | if (TestSetPageFsCache(page)) { | ||
800 | static bool once_only; | ||
801 | if (!once_only) { | ||
802 | once_only = true; | ||
803 | printk(KERN_WARNING "FS-Cache:" | ||
804 | " Cookie type %s marked page %lx" | ||
805 | " multiple times\n", | ||
806 | cookie->def->name, page->index); | ||
807 | } | ||
808 | } | ||
809 | } | ||
810 | |||
811 | if (cookie->def->mark_pages_cached) | ||
812 | cookie->def->mark_pages_cached(cookie->netfs_data, | ||
813 | op->mapping, pagevec); | ||
814 | pagevec_reinit(pagevec); | ||
815 | } | ||
816 | EXPORT_SYMBOL(fscache_mark_pages_cached); | ||
diff --git a/fs/fscache/proc.c b/fs/fscache/proc.c new file mode 100644 index 000000000000..beeab44bc31a --- /dev/null +++ b/fs/fscache/proc.c | |||
@@ -0,0 +1,68 @@ | |||
1 | /* FS-Cache statistics viewing interface | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #define FSCACHE_DEBUG_LEVEL OPERATION | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/proc_fs.h> | ||
15 | #include <linux/seq_file.h> | ||
16 | #include "internal.h" | ||
17 | |||
18 | /* | ||
19 | * initialise the /proc/fs/fscache/ directory | ||
20 | */ | ||
21 | int __init fscache_proc_init(void) | ||
22 | { | ||
23 | _enter(""); | ||
24 | |||
25 | if (!proc_mkdir("fs/fscache", NULL)) | ||
26 | goto error_dir; | ||
27 | |||
28 | #ifdef CONFIG_FSCACHE_STATS | ||
29 | if (!proc_create("fs/fscache/stats", S_IFREG | 0444, NULL, | ||
30 | &fscache_stats_fops)) | ||
31 | goto error_stats; | ||
32 | #endif | ||
33 | |||
34 | #ifdef CONFIG_FSCACHE_HISTOGRAM | ||
35 | if (!proc_create("fs/fscache/histogram", S_IFREG | 0444, NULL, | ||
36 | &fscache_histogram_fops)) | ||
37 | goto error_histogram; | ||
38 | #endif | ||
39 | |||
40 | _leave(" = 0"); | ||
41 | return 0; | ||
42 | |||
43 | #ifdef CONFIG_FSCACHE_HISTOGRAM | ||
44 | error_histogram: | ||
45 | #endif | ||
46 | #ifdef CONFIG_FSCACHE_STATS | ||
47 | remove_proc_entry("fs/fscache/stats", NULL); | ||
48 | error_stats: | ||
49 | #endif | ||
50 | remove_proc_entry("fs/fscache", NULL); | ||
51 | error_dir: | ||
52 | _leave(" = -ENOMEM"); | ||
53 | return -ENOMEM; | ||
54 | } | ||
55 | |||
56 | /* | ||
57 | * clean up the /proc/fs/fscache/ directory | ||
58 | */ | ||
59 | void fscache_proc_cleanup(void) | ||
60 | { | ||
61 | #ifdef CONFIG_FSCACHE_HISTOGRAM | ||
62 | remove_proc_entry("fs/fscache/histogram", NULL); | ||
63 | #endif | ||
64 | #ifdef CONFIG_FSCACHE_STATS | ||
65 | remove_proc_entry("fs/fscache/stats", NULL); | ||
66 | #endif | ||
67 | remove_proc_entry("fs/fscache", NULL); | ||
68 | } | ||
diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c new file mode 100644 index 000000000000..65deb99e756b --- /dev/null +++ b/fs/fscache/stats.c | |||
@@ -0,0 +1,212 @@ | |||
1 | /* FS-Cache statistics | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #define FSCACHE_DEBUG_LEVEL THREAD | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/proc_fs.h> | ||
15 | #include <linux/seq_file.h> | ||
16 | #include "internal.h" | ||
17 | |||
18 | /* | ||
19 | * operation counters | ||
20 | */ | ||
21 | atomic_t fscache_n_op_pend; | ||
22 | atomic_t fscache_n_op_run; | ||
23 | atomic_t fscache_n_op_enqueue; | ||
24 | atomic_t fscache_n_op_requeue; | ||
25 | atomic_t fscache_n_op_deferred_release; | ||
26 | atomic_t fscache_n_op_release; | ||
27 | atomic_t fscache_n_op_gc; | ||
28 | |||
29 | atomic_t fscache_n_attr_changed; | ||
30 | atomic_t fscache_n_attr_changed_ok; | ||
31 | atomic_t fscache_n_attr_changed_nobufs; | ||
32 | atomic_t fscache_n_attr_changed_nomem; | ||
33 | atomic_t fscache_n_attr_changed_calls; | ||
34 | |||
35 | atomic_t fscache_n_allocs; | ||
36 | atomic_t fscache_n_allocs_ok; | ||
37 | atomic_t fscache_n_allocs_wait; | ||
38 | atomic_t fscache_n_allocs_nobufs; | ||
39 | atomic_t fscache_n_alloc_ops; | ||
40 | atomic_t fscache_n_alloc_op_waits; | ||
41 | |||
42 | atomic_t fscache_n_retrievals; | ||
43 | atomic_t fscache_n_retrievals_ok; | ||
44 | atomic_t fscache_n_retrievals_wait; | ||
45 | atomic_t fscache_n_retrievals_nodata; | ||
46 | atomic_t fscache_n_retrievals_nobufs; | ||
47 | atomic_t fscache_n_retrievals_intr; | ||
48 | atomic_t fscache_n_retrievals_nomem; | ||
49 | atomic_t fscache_n_retrieval_ops; | ||
50 | atomic_t fscache_n_retrieval_op_waits; | ||
51 | |||
52 | atomic_t fscache_n_stores; | ||
53 | atomic_t fscache_n_stores_ok; | ||
54 | atomic_t fscache_n_stores_again; | ||
55 | atomic_t fscache_n_stores_nobufs; | ||
56 | atomic_t fscache_n_stores_oom; | ||
57 | atomic_t fscache_n_store_ops; | ||
58 | atomic_t fscache_n_store_calls; | ||
59 | |||
60 | atomic_t fscache_n_marks; | ||
61 | atomic_t fscache_n_uncaches; | ||
62 | |||
63 | atomic_t fscache_n_acquires; | ||
64 | atomic_t fscache_n_acquires_null; | ||
65 | atomic_t fscache_n_acquires_no_cache; | ||
66 | atomic_t fscache_n_acquires_ok; | ||
67 | atomic_t fscache_n_acquires_nobufs; | ||
68 | atomic_t fscache_n_acquires_oom; | ||
69 | |||
70 | atomic_t fscache_n_updates; | ||
71 | atomic_t fscache_n_updates_null; | ||
72 | atomic_t fscache_n_updates_run; | ||
73 | |||
74 | atomic_t fscache_n_relinquishes; | ||
75 | atomic_t fscache_n_relinquishes_null; | ||
76 | atomic_t fscache_n_relinquishes_waitcrt; | ||
77 | |||
78 | atomic_t fscache_n_cookie_index; | ||
79 | atomic_t fscache_n_cookie_data; | ||
80 | atomic_t fscache_n_cookie_special; | ||
81 | |||
82 | atomic_t fscache_n_object_alloc; | ||
83 | atomic_t fscache_n_object_no_alloc; | ||
84 | atomic_t fscache_n_object_lookups; | ||
85 | atomic_t fscache_n_object_lookups_negative; | ||
86 | atomic_t fscache_n_object_lookups_positive; | ||
87 | atomic_t fscache_n_object_created; | ||
88 | atomic_t fscache_n_object_avail; | ||
89 | atomic_t fscache_n_object_dead; | ||
90 | |||
91 | atomic_t fscache_n_checkaux_none; | ||
92 | atomic_t fscache_n_checkaux_okay; | ||
93 | atomic_t fscache_n_checkaux_update; | ||
94 | atomic_t fscache_n_checkaux_obsolete; | ||
95 | |||
96 | /* | ||
97 | * display the general statistics | ||
98 | */ | ||
99 | static int fscache_stats_show(struct seq_file *m, void *v) | ||
100 | { | ||
101 | seq_puts(m, "FS-Cache statistics\n"); | ||
102 | |||
103 | seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n", | ||
104 | atomic_read(&fscache_n_cookie_index), | ||
105 | atomic_read(&fscache_n_cookie_data), | ||
106 | atomic_read(&fscache_n_cookie_special)); | ||
107 | |||
108 | seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n", | ||
109 | atomic_read(&fscache_n_object_alloc), | ||
110 | atomic_read(&fscache_n_object_no_alloc), | ||
111 | atomic_read(&fscache_n_object_avail), | ||
112 | atomic_read(&fscache_n_object_dead)); | ||
113 | seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n", | ||
114 | atomic_read(&fscache_n_checkaux_none), | ||
115 | atomic_read(&fscache_n_checkaux_okay), | ||
116 | atomic_read(&fscache_n_checkaux_update), | ||
117 | atomic_read(&fscache_n_checkaux_obsolete)); | ||
118 | |||
119 | seq_printf(m, "Pages : mrk=%u unc=%u\n", | ||
120 | atomic_read(&fscache_n_marks), | ||
121 | atomic_read(&fscache_n_uncaches)); | ||
122 | |||
123 | seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u" | ||
124 | " oom=%u\n", | ||
125 | atomic_read(&fscache_n_acquires), | ||
126 | atomic_read(&fscache_n_acquires_null), | ||
127 | atomic_read(&fscache_n_acquires_no_cache), | ||
128 | atomic_read(&fscache_n_acquires_ok), | ||
129 | atomic_read(&fscache_n_acquires_nobufs), | ||
130 | atomic_read(&fscache_n_acquires_oom)); | ||
131 | |||
132 | seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u\n", | ||
133 | atomic_read(&fscache_n_object_lookups), | ||
134 | atomic_read(&fscache_n_object_lookups_negative), | ||
135 | atomic_read(&fscache_n_object_lookups_positive), | ||
136 | atomic_read(&fscache_n_object_created)); | ||
137 | |||
138 | seq_printf(m, "Updates: n=%u nul=%u run=%u\n", | ||
139 | atomic_read(&fscache_n_updates), | ||
140 | atomic_read(&fscache_n_updates_null), | ||
141 | atomic_read(&fscache_n_updates_run)); | ||
142 | |||
143 | seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u\n", | ||
144 | atomic_read(&fscache_n_relinquishes), | ||
145 | atomic_read(&fscache_n_relinquishes_null), | ||
146 | atomic_read(&fscache_n_relinquishes_waitcrt)); | ||
147 | |||
148 | seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n", | ||
149 | atomic_read(&fscache_n_attr_changed), | ||
150 | atomic_read(&fscache_n_attr_changed_ok), | ||
151 | atomic_read(&fscache_n_attr_changed_nobufs), | ||
152 | atomic_read(&fscache_n_attr_changed_nomem), | ||
153 | atomic_read(&fscache_n_attr_changed_calls)); | ||
154 | |||
155 | seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u\n", | ||
156 | atomic_read(&fscache_n_allocs), | ||
157 | atomic_read(&fscache_n_allocs_ok), | ||
158 | atomic_read(&fscache_n_allocs_wait), | ||
159 | atomic_read(&fscache_n_allocs_nobufs)); | ||
160 | seq_printf(m, "Allocs : ops=%u owt=%u\n", | ||
161 | atomic_read(&fscache_n_alloc_ops), | ||
162 | atomic_read(&fscache_n_alloc_op_waits)); | ||
163 | |||
164 | seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u" | ||
165 | " int=%u oom=%u\n", | ||
166 | atomic_read(&fscache_n_retrievals), | ||
167 | atomic_read(&fscache_n_retrievals_ok), | ||
168 | atomic_read(&fscache_n_retrievals_wait), | ||
169 | atomic_read(&fscache_n_retrievals_nodata), | ||
170 | atomic_read(&fscache_n_retrievals_nobufs), | ||
171 | atomic_read(&fscache_n_retrievals_intr), | ||
172 | atomic_read(&fscache_n_retrievals_nomem)); | ||
173 | seq_printf(m, "Retrvls: ops=%u owt=%u\n", | ||
174 | atomic_read(&fscache_n_retrieval_ops), | ||
175 | atomic_read(&fscache_n_retrieval_op_waits)); | ||
176 | |||
177 | seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n", | ||
178 | atomic_read(&fscache_n_stores), | ||
179 | atomic_read(&fscache_n_stores_ok), | ||
180 | atomic_read(&fscache_n_stores_again), | ||
181 | atomic_read(&fscache_n_stores_nobufs), | ||
182 | atomic_read(&fscache_n_stores_oom)); | ||
183 | seq_printf(m, "Stores : ops=%u run=%u\n", | ||
184 | atomic_read(&fscache_n_store_ops), | ||
185 | atomic_read(&fscache_n_store_calls)); | ||
186 | |||
187 | seq_printf(m, "Ops : pend=%u run=%u enq=%u\n", | ||
188 | atomic_read(&fscache_n_op_pend), | ||
189 | atomic_read(&fscache_n_op_run), | ||
190 | atomic_read(&fscache_n_op_enqueue)); | ||
191 | seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n", | ||
192 | atomic_read(&fscache_n_op_deferred_release), | ||
193 | atomic_read(&fscache_n_op_release), | ||
194 | atomic_read(&fscache_n_op_gc)); | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | /* | ||
199 | * open "/proc/fs/fscache/stats" allowing provision of a statistical summary | ||
200 | */ | ||
201 | static int fscache_stats_open(struct inode *inode, struct file *file) | ||
202 | { | ||
203 | return single_open(file, fscache_stats_show, NULL); | ||
204 | } | ||
205 | |||
206 | const struct file_operations fscache_stats_fops = { | ||
207 | .owner = THIS_MODULE, | ||
208 | .open = fscache_stats_open, | ||
209 | .read = seq_read, | ||
210 | .llseek = seq_lseek, | ||
211 | .release = seq_release, | ||
212 | }; | ||