diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /net/sunrpc/cache.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'net/sunrpc/cache.c')
-rw-r--r-- | net/sunrpc/cache.c | 1189 |
1 files changed, 1189 insertions, 0 deletions
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c new file mode 100644 index 000000000000..900f5bc7e336 --- /dev/null +++ b/net/sunrpc/cache.c | |||
@@ -0,0 +1,1189 @@ | |||
1 | /* | ||
2 | * net/sunrpc/cache.c | ||
3 | * | ||
4 | * Generic code for various authentication-related caches | ||
5 | * used by sunrpc clients and servers. | ||
6 | * | ||
7 | * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au> | ||
8 | * | ||
9 | * Released under terms in GPL version 2. See COPYING. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/types.h> | ||
14 | #include <linux/fs.h> | ||
15 | #include <linux/file.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/signal.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/kmod.h> | ||
20 | #include <linux/list.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/ctype.h> | ||
23 | #include <asm/uaccess.h> | ||
24 | #include <linux/poll.h> | ||
25 | #include <linux/seq_file.h> | ||
26 | #include <linux/proc_fs.h> | ||
27 | #include <linux/net.h> | ||
28 | #include <linux/workqueue.h> | ||
29 | #include <asm/ioctls.h> | ||
30 | #include <linux/sunrpc/types.h> | ||
31 | #include <linux/sunrpc/cache.h> | ||
32 | #include <linux/sunrpc/stats.h> | ||
33 | |||
34 | #define RPCDBG_FACILITY RPCDBG_CACHE | ||
35 | |||
36 | static void cache_defer_req(struct cache_req *req, struct cache_head *item); | ||
37 | static void cache_revisit_request(struct cache_head *item); | ||
38 | |||
39 | void cache_init(struct cache_head *h) | ||
40 | { | ||
41 | time_t now = get_seconds(); | ||
42 | h->next = NULL; | ||
43 | h->flags = 0; | ||
44 | atomic_set(&h->refcnt, 1); | ||
45 | h->expiry_time = now + CACHE_NEW_EXPIRY; | ||
46 | h->last_refresh = now; | ||
47 | } | ||
48 | |||
49 | |||
50 | static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h); | ||
51 | /* | ||
52 | * This is the generic cache management routine for all | ||
53 | * the authentication caches. | ||
54 | * It checks the currency of a cache item and will (later) | ||
55 | * initiate an upcall to fill it if needed. | ||
56 | * | ||
57 | * | ||
58 | * Returns 0 if the cache_head can be used, or cache_puts it and returns | ||
59 | * -EAGAIN if upcall is pending, | ||
60 | * -ENOENT if cache entry was negative | ||
61 | */ | ||
62 | int cache_check(struct cache_detail *detail, | ||
63 | struct cache_head *h, struct cache_req *rqstp) | ||
64 | { | ||
65 | int rv; | ||
66 | long refresh_age, age; | ||
67 | |||
68 | /* First decide return status as best we can */ | ||
69 | if (!test_bit(CACHE_VALID, &h->flags) || | ||
70 | h->expiry_time < get_seconds()) | ||
71 | rv = -EAGAIN; | ||
72 | else if (detail->flush_time > h->last_refresh) | ||
73 | rv = -EAGAIN; | ||
74 | else { | ||
75 | /* entry is valid */ | ||
76 | if (test_bit(CACHE_NEGATIVE, &h->flags)) | ||
77 | rv = -ENOENT; | ||
78 | else rv = 0; | ||
79 | } | ||
80 | |||
81 | /* now see if we want to start an upcall */ | ||
82 | refresh_age = (h->expiry_time - h->last_refresh); | ||
83 | age = get_seconds() - h->last_refresh; | ||
84 | |||
85 | if (rqstp == NULL) { | ||
86 | if (rv == -EAGAIN) | ||
87 | rv = -ENOENT; | ||
88 | } else if (rv == -EAGAIN || age > refresh_age/2) { | ||
89 | dprintk("Want update, refage=%ld, age=%ld\n", refresh_age, age); | ||
90 | if (!test_and_set_bit(CACHE_PENDING, &h->flags)) { | ||
91 | switch (cache_make_upcall(detail, h)) { | ||
92 | case -EINVAL: | ||
93 | clear_bit(CACHE_PENDING, &h->flags); | ||
94 | if (rv == -EAGAIN) { | ||
95 | set_bit(CACHE_NEGATIVE, &h->flags); | ||
96 | cache_fresh(detail, h, get_seconds()+CACHE_NEW_EXPIRY); | ||
97 | rv = -ENOENT; | ||
98 | } | ||
99 | break; | ||
100 | |||
101 | case -EAGAIN: | ||
102 | clear_bit(CACHE_PENDING, &h->flags); | ||
103 | cache_revisit_request(h); | ||
104 | break; | ||
105 | } | ||
106 | } | ||
107 | } | ||
108 | |||
109 | if (rv == -EAGAIN) | ||
110 | cache_defer_req(rqstp, h); | ||
111 | |||
112 | if (rv && h) | ||
113 | detail->cache_put(h, detail); | ||
114 | return rv; | ||
115 | } | ||
116 | |||
117 | static void queue_loose(struct cache_detail *detail, struct cache_head *ch); | ||
118 | |||
119 | void cache_fresh(struct cache_detail *detail, | ||
120 | struct cache_head *head, time_t expiry) | ||
121 | { | ||
122 | |||
123 | head->expiry_time = expiry; | ||
124 | head->last_refresh = get_seconds(); | ||
125 | if (!test_and_set_bit(CACHE_VALID, &head->flags)) | ||
126 | cache_revisit_request(head); | ||
127 | if (test_and_clear_bit(CACHE_PENDING, &head->flags)) | ||
128 | queue_loose(detail, head); | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * caches need to be periodically cleaned. | ||
133 | * For this we maintain a list of cache_detail and | ||
134 | * a current pointer into that list and into the table | ||
135 | * for that entry. | ||
136 | * | ||
137 | * Each time clean_cache is called it finds the next non-empty entry | ||
138 | * in the current table and walks the list in that entry | ||
139 | * looking for entries that can be removed. | ||
140 | * | ||
141 | * An entry gets removed if: | ||
142 | * - The expiry is before current time | ||
143 | * - The last_refresh time is before the flush_time for that cache | ||
144 | * | ||
145 | * later we might drop old entries with non-NEVER expiry if that table | ||
146 | * is getting 'full' for some definition of 'full' | ||
147 | * | ||
148 | * The question of "how often to scan a table" is an interesting one | ||
149 | * and is answered in part by the use of the "nextcheck" field in the | ||
150 | * cache_detail. | ||
151 | * When a scan of a table begins, the nextcheck field is set to a time | ||
152 | * that is well into the future. | ||
153 | * While scanning, if an expiry time is found that is earlier than the | ||
154 | * current nextcheck time, nextcheck is set to that expiry time. | ||
155 | * If the flush_time is ever set to a time earlier than the nextcheck | ||
156 | * time, the nextcheck time is then set to that flush_time. | ||
157 | * | ||
158 | * A table is then only scanned if the current time is at least | ||
159 | * the nextcheck time. | ||
160 | * | ||
161 | */ | ||
162 | |||
163 | static LIST_HEAD(cache_list); | ||
164 | static DEFINE_SPINLOCK(cache_list_lock); | ||
165 | static struct cache_detail *current_detail; | ||
166 | static int current_index; | ||
167 | |||
168 | static struct file_operations cache_file_operations; | ||
169 | static struct file_operations content_file_operations; | ||
170 | static struct file_operations cache_flush_operations; | ||
171 | |||
172 | static void do_cache_clean(void *data); | ||
173 | static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL); | ||
174 | |||
175 | void cache_register(struct cache_detail *cd) | ||
176 | { | ||
177 | cd->proc_ent = proc_mkdir(cd->name, proc_net_rpc); | ||
178 | if (cd->proc_ent) { | ||
179 | struct proc_dir_entry *p; | ||
180 | cd->proc_ent->owner = THIS_MODULE; | ||
181 | cd->channel_ent = cd->content_ent = NULL; | ||
182 | |||
183 | p = create_proc_entry("flush", S_IFREG|S_IRUSR|S_IWUSR, | ||
184 | cd->proc_ent); | ||
185 | cd->flush_ent = p; | ||
186 | if (p) { | ||
187 | p->proc_fops = &cache_flush_operations; | ||
188 | p->owner = THIS_MODULE; | ||
189 | p->data = cd; | ||
190 | } | ||
191 | |||
192 | if (cd->cache_request || cd->cache_parse) { | ||
193 | p = create_proc_entry("channel", S_IFREG|S_IRUSR|S_IWUSR, | ||
194 | cd->proc_ent); | ||
195 | cd->channel_ent = p; | ||
196 | if (p) { | ||
197 | p->proc_fops = &cache_file_operations; | ||
198 | p->owner = THIS_MODULE; | ||
199 | p->data = cd; | ||
200 | } | ||
201 | } | ||
202 | if (cd->cache_show) { | ||
203 | p = create_proc_entry("content", S_IFREG|S_IRUSR|S_IWUSR, | ||
204 | cd->proc_ent); | ||
205 | cd->content_ent = p; | ||
206 | if (p) { | ||
207 | p->proc_fops = &content_file_operations; | ||
208 | p->owner = THIS_MODULE; | ||
209 | p->data = cd; | ||
210 | } | ||
211 | } | ||
212 | } | ||
213 | rwlock_init(&cd->hash_lock); | ||
214 | INIT_LIST_HEAD(&cd->queue); | ||
215 | spin_lock(&cache_list_lock); | ||
216 | cd->nextcheck = 0; | ||
217 | cd->entries = 0; | ||
218 | atomic_set(&cd->readers, 0); | ||
219 | cd->last_close = 0; | ||
220 | cd->last_warn = -1; | ||
221 | list_add(&cd->others, &cache_list); | ||
222 | spin_unlock(&cache_list_lock); | ||
223 | |||
224 | /* start the cleaning process */ | ||
225 | schedule_work(&cache_cleaner); | ||
226 | } | ||
227 | |||
228 | int cache_unregister(struct cache_detail *cd) | ||
229 | { | ||
230 | cache_purge(cd); | ||
231 | spin_lock(&cache_list_lock); | ||
232 | write_lock(&cd->hash_lock); | ||
233 | if (cd->entries || atomic_read(&cd->inuse)) { | ||
234 | write_unlock(&cd->hash_lock); | ||
235 | spin_unlock(&cache_list_lock); | ||
236 | return -EBUSY; | ||
237 | } | ||
238 | if (current_detail == cd) | ||
239 | current_detail = NULL; | ||
240 | list_del_init(&cd->others); | ||
241 | write_unlock(&cd->hash_lock); | ||
242 | spin_unlock(&cache_list_lock); | ||
243 | if (cd->proc_ent) { | ||
244 | if (cd->flush_ent) | ||
245 | remove_proc_entry("flush", cd->proc_ent); | ||
246 | if (cd->channel_ent) | ||
247 | remove_proc_entry("channel", cd->proc_ent); | ||
248 | if (cd->content_ent) | ||
249 | remove_proc_entry("content", cd->proc_ent); | ||
250 | |||
251 | cd->proc_ent = NULL; | ||
252 | remove_proc_entry(cd->name, proc_net_rpc); | ||
253 | } | ||
254 | if (list_empty(&cache_list)) { | ||
255 | /* module must be being unloaded so its safe to kill the worker */ | ||
256 | cancel_delayed_work(&cache_cleaner); | ||
257 | flush_scheduled_work(); | ||
258 | } | ||
259 | return 0; | ||
260 | } | ||
261 | |||
262 | /* clean cache tries to find something to clean | ||
263 | * and cleans it. | ||
264 | * It returns 1 if it cleaned something, | ||
265 | * 0 if it didn't find anything this time | ||
266 | * -1 if it fell off the end of the list. | ||
267 | */ | ||
268 | static int cache_clean(void) | ||
269 | { | ||
270 | int rv = 0; | ||
271 | struct list_head *next; | ||
272 | |||
273 | spin_lock(&cache_list_lock); | ||
274 | |||
275 | /* find a suitable table if we don't already have one */ | ||
276 | while (current_detail == NULL || | ||
277 | current_index >= current_detail->hash_size) { | ||
278 | if (current_detail) | ||
279 | next = current_detail->others.next; | ||
280 | else | ||
281 | next = cache_list.next; | ||
282 | if (next == &cache_list) { | ||
283 | current_detail = NULL; | ||
284 | spin_unlock(&cache_list_lock); | ||
285 | return -1; | ||
286 | } | ||
287 | current_detail = list_entry(next, struct cache_detail, others); | ||
288 | if (current_detail->nextcheck > get_seconds()) | ||
289 | current_index = current_detail->hash_size; | ||
290 | else { | ||
291 | current_index = 0; | ||
292 | current_detail->nextcheck = get_seconds()+30*60; | ||
293 | } | ||
294 | } | ||
295 | |||
296 | /* find a non-empty bucket in the table */ | ||
297 | while (current_detail && | ||
298 | current_index < current_detail->hash_size && | ||
299 | current_detail->hash_table[current_index] == NULL) | ||
300 | current_index++; | ||
301 | |||
302 | /* find a cleanable entry in the bucket and clean it, or set to next bucket */ | ||
303 | |||
304 | if (current_detail && current_index < current_detail->hash_size) { | ||
305 | struct cache_head *ch, **cp; | ||
306 | struct cache_detail *d; | ||
307 | |||
308 | write_lock(¤t_detail->hash_lock); | ||
309 | |||
310 | /* Ok, now to clean this strand */ | ||
311 | |||
312 | cp = & current_detail->hash_table[current_index]; | ||
313 | ch = *cp; | ||
314 | for (; ch; cp= & ch->next, ch= *cp) { | ||
315 | if (current_detail->nextcheck > ch->expiry_time) | ||
316 | current_detail->nextcheck = ch->expiry_time+1; | ||
317 | if (ch->expiry_time >= get_seconds() | ||
318 | && ch->last_refresh >= current_detail->flush_time | ||
319 | ) | ||
320 | continue; | ||
321 | if (test_and_clear_bit(CACHE_PENDING, &ch->flags)) | ||
322 | queue_loose(current_detail, ch); | ||
323 | |||
324 | if (atomic_read(&ch->refcnt) == 1) | ||
325 | break; | ||
326 | } | ||
327 | if (ch) { | ||
328 | *cp = ch->next; | ||
329 | ch->next = NULL; | ||
330 | current_detail->entries--; | ||
331 | rv = 1; | ||
332 | } | ||
333 | write_unlock(¤t_detail->hash_lock); | ||
334 | d = current_detail; | ||
335 | if (!ch) | ||
336 | current_index ++; | ||
337 | spin_unlock(&cache_list_lock); | ||
338 | if (ch) | ||
339 | d->cache_put(ch, d); | ||
340 | } else | ||
341 | spin_unlock(&cache_list_lock); | ||
342 | |||
343 | return rv; | ||
344 | } | ||
345 | |||
346 | /* | ||
347 | * We want to regularly clean the cache, so we need to schedule some work ... | ||
348 | */ | ||
349 | static void do_cache_clean(void *data) | ||
350 | { | ||
351 | int delay = 5; | ||
352 | if (cache_clean() == -1) | ||
353 | delay = 30*HZ; | ||
354 | |||
355 | if (list_empty(&cache_list)) | ||
356 | delay = 0; | ||
357 | |||
358 | if (delay) | ||
359 | schedule_delayed_work(&cache_cleaner, delay); | ||
360 | } | ||
361 | |||
362 | |||
363 | /* | ||
364 | * Clean all caches promptly. This just calls cache_clean | ||
365 | * repeatedly until we are sure that every cache has had a chance to | ||
366 | * be fully cleaned | ||
367 | */ | ||
368 | void cache_flush(void) | ||
369 | { | ||
370 | while (cache_clean() != -1) | ||
371 | cond_resched(); | ||
372 | while (cache_clean() != -1) | ||
373 | cond_resched(); | ||
374 | } | ||
375 | |||
376 | void cache_purge(struct cache_detail *detail) | ||
377 | { | ||
378 | detail->flush_time = LONG_MAX; | ||
379 | detail->nextcheck = get_seconds(); | ||
380 | cache_flush(); | ||
381 | detail->flush_time = 1; | ||
382 | } | ||
383 | |||
384 | |||
385 | |||
386 | /* | ||
387 | * Deferral and Revisiting of Requests. | ||
388 | * | ||
389 | * If a cache lookup finds a pending entry, we | ||
390 | * need to defer the request and revisit it later. | ||
391 | * All deferred requests are stored in a hash table, | ||
392 | * indexed by "struct cache_head *". | ||
393 | * As it may be wasteful to store a whole request | ||
394 | * structure, we allow the request to provide a | ||
395 | * deferred form, which must contain a | ||
396 | * 'struct cache_deferred_req' | ||
397 | * This cache_deferred_req contains a method to allow | ||
398 | * it to be revisited when cache info is available | ||
399 | */ | ||
400 | |||
401 | #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head)) | ||
402 | #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE) | ||
403 | |||
404 | #define DFR_MAX 300 /* ??? */ | ||
405 | |||
406 | static DEFINE_SPINLOCK(cache_defer_lock); | ||
407 | static LIST_HEAD(cache_defer_list); | ||
408 | static struct list_head cache_defer_hash[DFR_HASHSIZE]; | ||
409 | static int cache_defer_cnt; | ||
410 | |||
411 | static void cache_defer_req(struct cache_req *req, struct cache_head *item) | ||
412 | { | ||
413 | struct cache_deferred_req *dreq; | ||
414 | int hash = DFR_HASH(item); | ||
415 | |||
416 | dreq = req->defer(req); | ||
417 | if (dreq == NULL) | ||
418 | return; | ||
419 | |||
420 | dreq->item = item; | ||
421 | dreq->recv_time = get_seconds(); | ||
422 | |||
423 | spin_lock(&cache_defer_lock); | ||
424 | |||
425 | list_add(&dreq->recent, &cache_defer_list); | ||
426 | |||
427 | if (cache_defer_hash[hash].next == NULL) | ||
428 | INIT_LIST_HEAD(&cache_defer_hash[hash]); | ||
429 | list_add(&dreq->hash, &cache_defer_hash[hash]); | ||
430 | |||
431 | /* it is in, now maybe clean up */ | ||
432 | dreq = NULL; | ||
433 | if (++cache_defer_cnt > DFR_MAX) { | ||
434 | /* too much in the cache, randomly drop | ||
435 | * first or last | ||
436 | */ | ||
437 | if (net_random()&1) | ||
438 | dreq = list_entry(cache_defer_list.next, | ||
439 | struct cache_deferred_req, | ||
440 | recent); | ||
441 | else | ||
442 | dreq = list_entry(cache_defer_list.prev, | ||
443 | struct cache_deferred_req, | ||
444 | recent); | ||
445 | list_del(&dreq->recent); | ||
446 | list_del(&dreq->hash); | ||
447 | cache_defer_cnt--; | ||
448 | } | ||
449 | spin_unlock(&cache_defer_lock); | ||
450 | |||
451 | if (dreq) { | ||
452 | /* there was one too many */ | ||
453 | dreq->revisit(dreq, 1); | ||
454 | } | ||
455 | if (test_bit(CACHE_VALID, &item->flags)) { | ||
456 | /* must have just been validated... */ | ||
457 | cache_revisit_request(item); | ||
458 | } | ||
459 | } | ||
460 | |||
461 | static void cache_revisit_request(struct cache_head *item) | ||
462 | { | ||
463 | struct cache_deferred_req *dreq; | ||
464 | struct list_head pending; | ||
465 | |||
466 | struct list_head *lp; | ||
467 | int hash = DFR_HASH(item); | ||
468 | |||
469 | INIT_LIST_HEAD(&pending); | ||
470 | spin_lock(&cache_defer_lock); | ||
471 | |||
472 | lp = cache_defer_hash[hash].next; | ||
473 | if (lp) { | ||
474 | while (lp != &cache_defer_hash[hash]) { | ||
475 | dreq = list_entry(lp, struct cache_deferred_req, hash); | ||
476 | lp = lp->next; | ||
477 | if (dreq->item == item) { | ||
478 | list_del(&dreq->hash); | ||
479 | list_move(&dreq->recent, &pending); | ||
480 | cache_defer_cnt--; | ||
481 | } | ||
482 | } | ||
483 | } | ||
484 | spin_unlock(&cache_defer_lock); | ||
485 | |||
486 | while (!list_empty(&pending)) { | ||
487 | dreq = list_entry(pending.next, struct cache_deferred_req, recent); | ||
488 | list_del_init(&dreq->recent); | ||
489 | dreq->revisit(dreq, 0); | ||
490 | } | ||
491 | } | ||
492 | |||
493 | void cache_clean_deferred(void *owner) | ||
494 | { | ||
495 | struct cache_deferred_req *dreq, *tmp; | ||
496 | struct list_head pending; | ||
497 | |||
498 | |||
499 | INIT_LIST_HEAD(&pending); | ||
500 | spin_lock(&cache_defer_lock); | ||
501 | |||
502 | list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { | ||
503 | if (dreq->owner == owner) { | ||
504 | list_del(&dreq->hash); | ||
505 | list_move(&dreq->recent, &pending); | ||
506 | cache_defer_cnt--; | ||
507 | } | ||
508 | } | ||
509 | spin_unlock(&cache_defer_lock); | ||
510 | |||
511 | while (!list_empty(&pending)) { | ||
512 | dreq = list_entry(pending.next, struct cache_deferred_req, recent); | ||
513 | list_del_init(&dreq->recent); | ||
514 | dreq->revisit(dreq, 1); | ||
515 | } | ||
516 | } | ||
517 | |||
518 | /* | ||
519 | * communicate with user-space | ||
520 | * | ||
521 | * We have a magic /proc file - /proc/sunrpc/cache | ||
522 | * On read, you get a full request, or block | ||
523 | * On write, an update request is processed | ||
524 | * Poll works if anything to read, and always allows write | ||
525 | * | ||
526 | * Implemented by linked list of requests. Each open file has | ||
527 | * a ->private that also exists in this list. New request are added | ||
528 | * to the end and may wakeup and preceding readers. | ||
529 | * New readers are added to the head. If, on read, an item is found with | ||
530 | * CACHE_UPCALLING clear, we free it from the list. | ||
531 | * | ||
532 | */ | ||
533 | |||
534 | static DEFINE_SPINLOCK(queue_lock); | ||
535 | static DECLARE_MUTEX(queue_io_sem); | ||
536 | |||
537 | struct cache_queue { | ||
538 | struct list_head list; | ||
539 | int reader; /* if 0, then request */ | ||
540 | }; | ||
541 | struct cache_request { | ||
542 | struct cache_queue q; | ||
543 | struct cache_head *item; | ||
544 | char * buf; | ||
545 | int len; | ||
546 | int readers; | ||
547 | }; | ||
548 | struct cache_reader { | ||
549 | struct cache_queue q; | ||
550 | int offset; /* if non-0, we have a refcnt on next request */ | ||
551 | }; | ||
552 | |||
553 | static ssize_t | ||
554 | cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) | ||
555 | { | ||
556 | struct cache_reader *rp = filp->private_data; | ||
557 | struct cache_request *rq; | ||
558 | struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data; | ||
559 | int err; | ||
560 | |||
561 | if (count == 0) | ||
562 | return 0; | ||
563 | |||
564 | down(&queue_io_sem); /* protect against multiple concurrent | ||
565 | * readers on this file */ | ||
566 | again: | ||
567 | spin_lock(&queue_lock); | ||
568 | /* need to find next request */ | ||
569 | while (rp->q.list.next != &cd->queue && | ||
570 | list_entry(rp->q.list.next, struct cache_queue, list) | ||
571 | ->reader) { | ||
572 | struct list_head *next = rp->q.list.next; | ||
573 | list_move(&rp->q.list, next); | ||
574 | } | ||
575 | if (rp->q.list.next == &cd->queue) { | ||
576 | spin_unlock(&queue_lock); | ||
577 | up(&queue_io_sem); | ||
578 | if (rp->offset) | ||
579 | BUG(); | ||
580 | return 0; | ||
581 | } | ||
582 | rq = container_of(rp->q.list.next, struct cache_request, q.list); | ||
583 | if (rq->q.reader) BUG(); | ||
584 | if (rp->offset == 0) | ||
585 | rq->readers++; | ||
586 | spin_unlock(&queue_lock); | ||
587 | |||
588 | if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) { | ||
589 | err = -EAGAIN; | ||
590 | spin_lock(&queue_lock); | ||
591 | list_move(&rp->q.list, &rq->q.list); | ||
592 | spin_unlock(&queue_lock); | ||
593 | } else { | ||
594 | if (rp->offset + count > rq->len) | ||
595 | count = rq->len - rp->offset; | ||
596 | err = -EFAULT; | ||
597 | if (copy_to_user(buf, rq->buf + rp->offset, count)) | ||
598 | goto out; | ||
599 | rp->offset += count; | ||
600 | if (rp->offset >= rq->len) { | ||
601 | rp->offset = 0; | ||
602 | spin_lock(&queue_lock); | ||
603 | list_move(&rp->q.list, &rq->q.list); | ||
604 | spin_unlock(&queue_lock); | ||
605 | } | ||
606 | err = 0; | ||
607 | } | ||
608 | out: | ||
609 | if (rp->offset == 0) { | ||
610 | /* need to release rq */ | ||
611 | spin_lock(&queue_lock); | ||
612 | rq->readers--; | ||
613 | if (rq->readers == 0 && | ||
614 | !test_bit(CACHE_PENDING, &rq->item->flags)) { | ||
615 | list_del(&rq->q.list); | ||
616 | spin_unlock(&queue_lock); | ||
617 | cd->cache_put(rq->item, cd); | ||
618 | kfree(rq->buf); | ||
619 | kfree(rq); | ||
620 | } else | ||
621 | spin_unlock(&queue_lock); | ||
622 | } | ||
623 | if (err == -EAGAIN) | ||
624 | goto again; | ||
625 | up(&queue_io_sem); | ||
626 | return err ? err : count; | ||
627 | } | ||
628 | |||
629 | static char write_buf[8192]; /* protected by queue_io_sem */ | ||
630 | |||
631 | static ssize_t | ||
632 | cache_write(struct file *filp, const char __user *buf, size_t count, | ||
633 | loff_t *ppos) | ||
634 | { | ||
635 | int err; | ||
636 | struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data; | ||
637 | |||
638 | if (count == 0) | ||
639 | return 0; | ||
640 | if (count >= sizeof(write_buf)) | ||
641 | return -EINVAL; | ||
642 | |||
643 | down(&queue_io_sem); | ||
644 | |||
645 | if (copy_from_user(write_buf, buf, count)) { | ||
646 | up(&queue_io_sem); | ||
647 | return -EFAULT; | ||
648 | } | ||
649 | write_buf[count] = '\0'; | ||
650 | if (cd->cache_parse) | ||
651 | err = cd->cache_parse(cd, write_buf, count); | ||
652 | else | ||
653 | err = -EINVAL; | ||
654 | |||
655 | up(&queue_io_sem); | ||
656 | return err ? err : count; | ||
657 | } | ||
658 | |||
659 | static DECLARE_WAIT_QUEUE_HEAD(queue_wait); | ||
660 | |||
661 | static unsigned int | ||
662 | cache_poll(struct file *filp, poll_table *wait) | ||
663 | { | ||
664 | unsigned int mask; | ||
665 | struct cache_reader *rp = filp->private_data; | ||
666 | struct cache_queue *cq; | ||
667 | struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data; | ||
668 | |||
669 | poll_wait(filp, &queue_wait, wait); | ||
670 | |||
671 | /* alway allow write */ | ||
672 | mask = POLL_OUT | POLLWRNORM; | ||
673 | |||
674 | if (!rp) | ||
675 | return mask; | ||
676 | |||
677 | spin_lock(&queue_lock); | ||
678 | |||
679 | for (cq= &rp->q; &cq->list != &cd->queue; | ||
680 | cq = list_entry(cq->list.next, struct cache_queue, list)) | ||
681 | if (!cq->reader) { | ||
682 | mask |= POLLIN | POLLRDNORM; | ||
683 | break; | ||
684 | } | ||
685 | spin_unlock(&queue_lock); | ||
686 | return mask; | ||
687 | } | ||
688 | |||
689 | static int | ||
690 | cache_ioctl(struct inode *ino, struct file *filp, | ||
691 | unsigned int cmd, unsigned long arg) | ||
692 | { | ||
693 | int len = 0; | ||
694 | struct cache_reader *rp = filp->private_data; | ||
695 | struct cache_queue *cq; | ||
696 | struct cache_detail *cd = PDE(ino)->data; | ||
697 | |||
698 | if (cmd != FIONREAD || !rp) | ||
699 | return -EINVAL; | ||
700 | |||
701 | spin_lock(&queue_lock); | ||
702 | |||
703 | /* only find the length remaining in current request, | ||
704 | * or the length of the next request | ||
705 | */ | ||
706 | for (cq= &rp->q; &cq->list != &cd->queue; | ||
707 | cq = list_entry(cq->list.next, struct cache_queue, list)) | ||
708 | if (!cq->reader) { | ||
709 | struct cache_request *cr = | ||
710 | container_of(cq, struct cache_request, q); | ||
711 | len = cr->len - rp->offset; | ||
712 | break; | ||
713 | } | ||
714 | spin_unlock(&queue_lock); | ||
715 | |||
716 | return put_user(len, (int __user *)arg); | ||
717 | } | ||
718 | |||
719 | static int | ||
720 | cache_open(struct inode *inode, struct file *filp) | ||
721 | { | ||
722 | struct cache_reader *rp = NULL; | ||
723 | |||
724 | nonseekable_open(inode, filp); | ||
725 | if (filp->f_mode & FMODE_READ) { | ||
726 | struct cache_detail *cd = PDE(inode)->data; | ||
727 | |||
728 | rp = kmalloc(sizeof(*rp), GFP_KERNEL); | ||
729 | if (!rp) | ||
730 | return -ENOMEM; | ||
731 | rp->offset = 0; | ||
732 | rp->q.reader = 1; | ||
733 | atomic_inc(&cd->readers); | ||
734 | spin_lock(&queue_lock); | ||
735 | list_add(&rp->q.list, &cd->queue); | ||
736 | spin_unlock(&queue_lock); | ||
737 | } | ||
738 | filp->private_data = rp; | ||
739 | return 0; | ||
740 | } | ||
741 | |||
742 | static int | ||
743 | cache_release(struct inode *inode, struct file *filp) | ||
744 | { | ||
745 | struct cache_reader *rp = filp->private_data; | ||
746 | struct cache_detail *cd = PDE(inode)->data; | ||
747 | |||
748 | if (rp) { | ||
749 | spin_lock(&queue_lock); | ||
750 | if (rp->offset) { | ||
751 | struct cache_queue *cq; | ||
752 | for (cq= &rp->q; &cq->list != &cd->queue; | ||
753 | cq = list_entry(cq->list.next, struct cache_queue, list)) | ||
754 | if (!cq->reader) { | ||
755 | container_of(cq, struct cache_request, q) | ||
756 | ->readers--; | ||
757 | break; | ||
758 | } | ||
759 | rp->offset = 0; | ||
760 | } | ||
761 | list_del(&rp->q.list); | ||
762 | spin_unlock(&queue_lock); | ||
763 | |||
764 | filp->private_data = NULL; | ||
765 | kfree(rp); | ||
766 | |||
767 | cd->last_close = get_seconds(); | ||
768 | atomic_dec(&cd->readers); | ||
769 | } | ||
770 | return 0; | ||
771 | } | ||
772 | |||
773 | |||
774 | |||
775 | static struct file_operations cache_file_operations = { | ||
776 | .owner = THIS_MODULE, | ||
777 | .llseek = no_llseek, | ||
778 | .read = cache_read, | ||
779 | .write = cache_write, | ||
780 | .poll = cache_poll, | ||
781 | .ioctl = cache_ioctl, /* for FIONREAD */ | ||
782 | .open = cache_open, | ||
783 | .release = cache_release, | ||
784 | }; | ||
785 | |||
786 | |||
787 | static void queue_loose(struct cache_detail *detail, struct cache_head *ch) | ||
788 | { | ||
789 | struct cache_queue *cq; | ||
790 | spin_lock(&queue_lock); | ||
791 | list_for_each_entry(cq, &detail->queue, list) | ||
792 | if (!cq->reader) { | ||
793 | struct cache_request *cr = container_of(cq, struct cache_request, q); | ||
794 | if (cr->item != ch) | ||
795 | continue; | ||
796 | if (cr->readers != 0) | ||
797 | break; | ||
798 | list_del(&cr->q.list); | ||
799 | spin_unlock(&queue_lock); | ||
800 | detail->cache_put(cr->item, detail); | ||
801 | kfree(cr->buf); | ||
802 | kfree(cr); | ||
803 | return; | ||
804 | } | ||
805 | spin_unlock(&queue_lock); | ||
806 | } | ||
807 | |||
808 | /* | ||
809 | * Support routines for text-based upcalls. | ||
810 | * Fields are separated by spaces. | ||
811 | * Fields are either mangled to quote space tab newline slosh with slosh | ||
812 | * or a hexified with a leading \x | ||
813 | * Record is terminated with newline. | ||
814 | * | ||
815 | */ | ||
816 | |||
817 | void qword_add(char **bpp, int *lp, char *str) | ||
818 | { | ||
819 | char *bp = *bpp; | ||
820 | int len = *lp; | ||
821 | char c; | ||
822 | |||
823 | if (len < 0) return; | ||
824 | |||
825 | while ((c=*str++) && len) | ||
826 | switch(c) { | ||
827 | case ' ': | ||
828 | case '\t': | ||
829 | case '\n': | ||
830 | case '\\': | ||
831 | if (len >= 4) { | ||
832 | *bp++ = '\\'; | ||
833 | *bp++ = '0' + ((c & 0300)>>6); | ||
834 | *bp++ = '0' + ((c & 0070)>>3); | ||
835 | *bp++ = '0' + ((c & 0007)>>0); | ||
836 | } | ||
837 | len -= 4; | ||
838 | break; | ||
839 | default: | ||
840 | *bp++ = c; | ||
841 | len--; | ||
842 | } | ||
843 | if (c || len <1) len = -1; | ||
844 | else { | ||
845 | *bp++ = ' '; | ||
846 | len--; | ||
847 | } | ||
848 | *bpp = bp; | ||
849 | *lp = len; | ||
850 | } | ||
851 | |||
852 | void qword_addhex(char **bpp, int *lp, char *buf, int blen) | ||
853 | { | ||
854 | char *bp = *bpp; | ||
855 | int len = *lp; | ||
856 | |||
857 | if (len < 0) return; | ||
858 | |||
859 | if (len > 2) { | ||
860 | *bp++ = '\\'; | ||
861 | *bp++ = 'x'; | ||
862 | len -= 2; | ||
863 | while (blen && len >= 2) { | ||
864 | unsigned char c = *buf++; | ||
865 | *bp++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1); | ||
866 | *bp++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1); | ||
867 | len -= 2; | ||
868 | blen--; | ||
869 | } | ||
870 | } | ||
871 | if (blen || len<1) len = -1; | ||
872 | else { | ||
873 | *bp++ = ' '; | ||
874 | len--; | ||
875 | } | ||
876 | *bpp = bp; | ||
877 | *lp = len; | ||
878 | } | ||
879 | |||
880 | static void warn_no_listener(struct cache_detail *detail) | ||
881 | { | ||
882 | if (detail->last_warn != detail->last_close) { | ||
883 | detail->last_warn = detail->last_close; | ||
884 | if (detail->warn_no_listener) | ||
885 | detail->warn_no_listener(detail); | ||
886 | } | ||
887 | } | ||
888 | |||
889 | /* | ||
890 | * register an upcall request to user-space. | ||
891 | * Each request is at most one page long. | ||
892 | */ | ||
893 | static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h) | ||
894 | { | ||
895 | |||
896 | char *buf; | ||
897 | struct cache_request *crq; | ||
898 | char *bp; | ||
899 | int len; | ||
900 | |||
901 | if (detail->cache_request == NULL) | ||
902 | return -EINVAL; | ||
903 | |||
904 | if (atomic_read(&detail->readers) == 0 && | ||
905 | detail->last_close < get_seconds() - 30) { | ||
906 | warn_no_listener(detail); | ||
907 | return -EINVAL; | ||
908 | } | ||
909 | |||
910 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
911 | if (!buf) | ||
912 | return -EAGAIN; | ||
913 | |||
914 | crq = kmalloc(sizeof (*crq), GFP_KERNEL); | ||
915 | if (!crq) { | ||
916 | kfree(buf); | ||
917 | return -EAGAIN; | ||
918 | } | ||
919 | |||
920 | bp = buf; len = PAGE_SIZE; | ||
921 | |||
922 | detail->cache_request(detail, h, &bp, &len); | ||
923 | |||
924 | if (len < 0) { | ||
925 | kfree(buf); | ||
926 | kfree(crq); | ||
927 | return -EAGAIN; | ||
928 | } | ||
929 | crq->q.reader = 0; | ||
930 | crq->item = cache_get(h); | ||
931 | crq->buf = buf; | ||
932 | crq->len = PAGE_SIZE - len; | ||
933 | crq->readers = 0; | ||
934 | spin_lock(&queue_lock); | ||
935 | list_add_tail(&crq->q.list, &detail->queue); | ||
936 | spin_unlock(&queue_lock); | ||
937 | wake_up(&queue_wait); | ||
938 | return 0; | ||
939 | } | ||
940 | |||
941 | /* | ||
942 | * parse a message from user-space and pass it | ||
943 | * to an appropriate cache | ||
944 | * Messages are, like requests, separated into fields by | ||
945 | * spaces and dequotes as \xHEXSTRING or embedded \nnn octal | ||
946 | * | ||
947 | * Message is | ||
948 | * reply cachename expiry key ... content.... | ||
949 | * | ||
950 | * key and content are both parsed by cache | ||
951 | */ | ||
952 | |||
953 | #define isodigit(c) (isdigit(c) && c <= '7') | ||
954 | int qword_get(char **bpp, char *dest, int bufsize) | ||
955 | { | ||
956 | /* return bytes copied, or -1 on error */ | ||
957 | char *bp = *bpp; | ||
958 | int len = 0; | ||
959 | |||
960 | while (*bp == ' ') bp++; | ||
961 | |||
962 | if (bp[0] == '\\' && bp[1] == 'x') { | ||
963 | /* HEX STRING */ | ||
964 | bp += 2; | ||
965 | while (isxdigit(bp[0]) && isxdigit(bp[1]) && len < bufsize) { | ||
966 | int byte = isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10; | ||
967 | bp++; | ||
968 | byte <<= 4; | ||
969 | byte |= isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10; | ||
970 | *dest++ = byte; | ||
971 | bp++; | ||
972 | len++; | ||
973 | } | ||
974 | } else { | ||
975 | /* text with \nnn octal quoting */ | ||
976 | while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) { | ||
977 | if (*bp == '\\' && | ||
978 | isodigit(bp[1]) && (bp[1] <= '3') && | ||
979 | isodigit(bp[2]) && | ||
980 | isodigit(bp[3])) { | ||
981 | int byte = (*++bp -'0'); | ||
982 | bp++; | ||
983 | byte = (byte << 3) | (*bp++ - '0'); | ||
984 | byte = (byte << 3) | (*bp++ - '0'); | ||
985 | *dest++ = byte; | ||
986 | len++; | ||
987 | } else { | ||
988 | *dest++ = *bp++; | ||
989 | len++; | ||
990 | } | ||
991 | } | ||
992 | } | ||
993 | |||
994 | if (*bp != ' ' && *bp != '\n' && *bp != '\0') | ||
995 | return -1; | ||
996 | while (*bp == ' ') bp++; | ||
997 | *bpp = bp; | ||
998 | *dest = '\0'; | ||
999 | return len; | ||
1000 | } | ||
1001 | |||
1002 | |||
1003 | /* | ||
1004 | * support /proc/sunrpc/cache/$CACHENAME/content | ||
1005 | * as a seqfile. | ||
1006 | * We call ->cache_show passing NULL for the item to | ||
1007 | * get a header, then pass each real item in the cache | ||
1008 | */ | ||
1009 | |||
1010 | struct handle { | ||
1011 | struct cache_detail *cd; | ||
1012 | }; | ||
1013 | |||
1014 | static void *c_start(struct seq_file *m, loff_t *pos) | ||
1015 | { | ||
1016 | loff_t n = *pos; | ||
1017 | unsigned hash, entry; | ||
1018 | struct cache_head *ch; | ||
1019 | struct cache_detail *cd = ((struct handle*)m->private)->cd; | ||
1020 | |||
1021 | |||
1022 | read_lock(&cd->hash_lock); | ||
1023 | if (!n--) | ||
1024 | return SEQ_START_TOKEN; | ||
1025 | hash = n >> 32; | ||
1026 | entry = n & ((1LL<<32) - 1); | ||
1027 | |||
1028 | for (ch=cd->hash_table[hash]; ch; ch=ch->next) | ||
1029 | if (!entry--) | ||
1030 | return ch; | ||
1031 | n &= ~((1LL<<32) - 1); | ||
1032 | do { | ||
1033 | hash++; | ||
1034 | n += 1LL<<32; | ||
1035 | } while(hash < cd->hash_size && | ||
1036 | cd->hash_table[hash]==NULL); | ||
1037 | if (hash >= cd->hash_size) | ||
1038 | return NULL; | ||
1039 | *pos = n+1; | ||
1040 | return cd->hash_table[hash]; | ||
1041 | } | ||
1042 | |||
1043 | static void *c_next(struct seq_file *m, void *p, loff_t *pos) | ||
1044 | { | ||
1045 | struct cache_head *ch = p; | ||
1046 | int hash = (*pos >> 32); | ||
1047 | struct cache_detail *cd = ((struct handle*)m->private)->cd; | ||
1048 | |||
1049 | if (p == SEQ_START_TOKEN) | ||
1050 | hash = 0; | ||
1051 | else if (ch->next == NULL) { | ||
1052 | hash++; | ||
1053 | *pos += 1LL<<32; | ||
1054 | } else { | ||
1055 | ++*pos; | ||
1056 | return ch->next; | ||
1057 | } | ||
1058 | *pos &= ~((1LL<<32) - 1); | ||
1059 | while (hash < cd->hash_size && | ||
1060 | cd->hash_table[hash] == NULL) { | ||
1061 | hash++; | ||
1062 | *pos += 1LL<<32; | ||
1063 | } | ||
1064 | if (hash >= cd->hash_size) | ||
1065 | return NULL; | ||
1066 | ++*pos; | ||
1067 | return cd->hash_table[hash]; | ||
1068 | } | ||
1069 | |||
1070 | static void c_stop(struct seq_file *m, void *p) | ||
1071 | { | ||
1072 | struct cache_detail *cd = ((struct handle*)m->private)->cd; | ||
1073 | read_unlock(&cd->hash_lock); | ||
1074 | } | ||
1075 | |||
1076 | static int c_show(struct seq_file *m, void *p) | ||
1077 | { | ||
1078 | struct cache_head *cp = p; | ||
1079 | struct cache_detail *cd = ((struct handle*)m->private)->cd; | ||
1080 | |||
1081 | if (p == SEQ_START_TOKEN) | ||
1082 | return cd->cache_show(m, cd, NULL); | ||
1083 | |||
1084 | ifdebug(CACHE) | ||
1085 | seq_printf(m, "# expiry=%ld refcnt=%d\n", | ||
1086 | cp->expiry_time, atomic_read(&cp->refcnt)); | ||
1087 | cache_get(cp); | ||
1088 | if (cache_check(cd, cp, NULL)) | ||
1089 | /* cache_check does a cache_put on failure */ | ||
1090 | seq_printf(m, "# "); | ||
1091 | else | ||
1092 | cache_put(cp, cd); | ||
1093 | |||
1094 | return cd->cache_show(m, cd, cp); | ||
1095 | } | ||
1096 | |||
1097 | static struct seq_operations cache_content_op = { | ||
1098 | .start = c_start, | ||
1099 | .next = c_next, | ||
1100 | .stop = c_stop, | ||
1101 | .show = c_show, | ||
1102 | }; | ||
1103 | |||
1104 | static int content_open(struct inode *inode, struct file *file) | ||
1105 | { | ||
1106 | int res; | ||
1107 | struct handle *han; | ||
1108 | struct cache_detail *cd = PDE(inode)->data; | ||
1109 | |||
1110 | han = kmalloc(sizeof(*han), GFP_KERNEL); | ||
1111 | if (han == NULL) | ||
1112 | return -ENOMEM; | ||
1113 | |||
1114 | han->cd = cd; | ||
1115 | |||
1116 | res = seq_open(file, &cache_content_op); | ||
1117 | if (res) | ||
1118 | kfree(han); | ||
1119 | else | ||
1120 | ((struct seq_file *)file->private_data)->private = han; | ||
1121 | |||
1122 | return res; | ||
1123 | } | ||
1124 | static int content_release(struct inode *inode, struct file *file) | ||
1125 | { | ||
1126 | struct seq_file *m = (struct seq_file *)file->private_data; | ||
1127 | struct handle *han = m->private; | ||
1128 | kfree(han); | ||
1129 | m->private = NULL; | ||
1130 | return seq_release(inode, file); | ||
1131 | } | ||
1132 | |||
1133 | static struct file_operations content_file_operations = { | ||
1134 | .open = content_open, | ||
1135 | .read = seq_read, | ||
1136 | .llseek = seq_lseek, | ||
1137 | .release = content_release, | ||
1138 | }; | ||
1139 | |||
1140 | static ssize_t read_flush(struct file *file, char __user *buf, | ||
1141 | size_t count, loff_t *ppos) | ||
1142 | { | ||
1143 | struct cache_detail *cd = PDE(file->f_dentry->d_inode)->data; | ||
1144 | char tbuf[20]; | ||
1145 | unsigned long p = *ppos; | ||
1146 | int len; | ||
1147 | |||
1148 | sprintf(tbuf, "%lu\n", cd->flush_time); | ||
1149 | len = strlen(tbuf); | ||
1150 | if (p >= len) | ||
1151 | return 0; | ||
1152 | len -= p; | ||
1153 | if (len > count) len = count; | ||
1154 | if (copy_to_user(buf, (void*)(tbuf+p), len)) | ||
1155 | len = -EFAULT; | ||
1156 | else | ||
1157 | *ppos += len; | ||
1158 | return len; | ||
1159 | } | ||
1160 | |||
1161 | static ssize_t write_flush(struct file * file, const char __user * buf, | ||
1162 | size_t count, loff_t *ppos) | ||
1163 | { | ||
1164 | struct cache_detail *cd = PDE(file->f_dentry->d_inode)->data; | ||
1165 | char tbuf[20]; | ||
1166 | char *ep; | ||
1167 | long flushtime; | ||
1168 | if (*ppos || count > sizeof(tbuf)-1) | ||
1169 | return -EINVAL; | ||
1170 | if (copy_from_user(tbuf, buf, count)) | ||
1171 | return -EFAULT; | ||
1172 | tbuf[count] = 0; | ||
1173 | flushtime = simple_strtoul(tbuf, &ep, 0); | ||
1174 | if (*ep && *ep != '\n') | ||
1175 | return -EINVAL; | ||
1176 | |||
1177 | cd->flush_time = flushtime; | ||
1178 | cd->nextcheck = get_seconds(); | ||
1179 | cache_flush(); | ||
1180 | |||
1181 | *ppos += count; | ||
1182 | return count; | ||
1183 | } | ||
1184 | |||
1185 | static struct file_operations cache_flush_operations = { | ||
1186 | .open = nonseekable_open, | ||
1187 | .read = read_flush, | ||
1188 | .write = write_flush, | ||
1189 | }; | ||