aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sunrpc/cache.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/linux/sunrpc/cache.h
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'include/linux/sunrpc/cache.h')
-rw-r--r--include/linux/sunrpc/cache.h312
1 files changed, 312 insertions, 0 deletions
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
new file mode 100644
index 000000000000..6864063d1b9f
--- /dev/null
+++ b/include/linux/sunrpc/cache.h
@@ -0,0 +1,312 @@
1/*
2 * include/linux/sunrpc/cache.h
3 *
4 * Generic code for various authentication-related caches
5 * used by sunrpc clients and servers.
6 *
7 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
8 *
9 * Released under terms in GPL version 2. See COPYING.
10 *
11 */
12
13#ifndef _LINUX_SUNRPC_CACHE_H_
14#define _LINUX_SUNRPC_CACHE_H_
15
16#include <linux/slab.h>
17#include <asm/atomic.h>
18#include <linux/proc_fs.h>
19
20/*
21 * Each cache requires:
22 * - A 'struct cache_detail' which contains information specific to the cache
23 * for common code to use.
24 * - An item structure that must contain a "struct cache_head"
25 * - A lookup function defined using DefineCacheLookup
26 * - A 'put' function that can release a cache item. It will only
27 * be called after cache_put has succeed, so there are guarantee
28 * to be no references.
29 * - A function to calculate a hash of an item's key.
30 *
31 * as well as assorted code fragments (e.g. compare keys) and numbers
32 * (e.g. hash size, goal_age, etc).
33 *
34 * Each cache must be registered so that it can be cleaned regularly.
35 * When the cache is unregistered, it is flushed completely.
36 *
37 * Entries have a ref count and a 'hashed' flag which counts the existance
38 * in the hash table.
39 * We only expire entries when refcount is zero.
40 * Existance in the cache is counted the refcount.
41 */
42
43/* Every cache item has a common header that is used
44 * for expiring and refreshing entries.
45 *
46 */
47struct cache_head {
48 struct cache_head * next;
49 time_t expiry_time; /* After time time, don't use the data */
50 time_t last_refresh; /* If CACHE_PENDING, this is when upcall
51 * was sent, else this is when update was received
52 */
53 atomic_t refcnt;
54 unsigned long flags;
55};
56#define CACHE_VALID 0 /* Entry contains valid data */
57#define CACHE_NEGATIVE 1 /* Negative entry - there is no match for the key */
58#define CACHE_PENDING 2 /* An upcall has been sent but no reply received yet*/
59
60#define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */
61
62struct cache_detail {
63 int hash_size;
64 struct cache_head ** hash_table;
65 rwlock_t hash_lock;
66
67 atomic_t inuse; /* active user-space update or lookup */
68
69 char *name;
70 void (*cache_put)(struct cache_head *,
71 struct cache_detail*);
72
73 void (*cache_request)(struct cache_detail *cd,
74 struct cache_head *h,
75 char **bpp, int *blen);
76 int (*cache_parse)(struct cache_detail *,
77 char *buf, int len);
78
79 int (*cache_show)(struct seq_file *m,
80 struct cache_detail *cd,
81 struct cache_head *h);
82
83 /* fields below this comment are for internal use
84 * and should not be touched by cache owners
85 */
86 time_t flush_time; /* flush all cache items with last_refresh
87 * earlier than this */
88 struct list_head others;
89 time_t nextcheck;
90 int entries;
91
92 /* fields for communication over channel */
93 struct list_head queue;
94 struct proc_dir_entry *proc_ent;
95 struct proc_dir_entry *flush_ent, *channel_ent, *content_ent;
96
97 atomic_t readers; /* how many time is /chennel open */
98 time_t last_close; /* if no readers, when did last close */
99 time_t last_warn; /* when we last warned about no readers */
100 void (*warn_no_listener)(struct cache_detail *cd);
101};
102
103
104/* this must be embedded in any request structure that
105 * identifies an object that will want a callback on
106 * a cache fill
107 */
108struct cache_req {
109 struct cache_deferred_req *(*defer)(struct cache_req *req);
110};
111/* this must be embedded in a deferred_request that is being
112 * delayed awaiting cache-fill
113 */
114struct cache_deferred_req {
115 struct list_head hash; /* on hash chain */
116 struct list_head recent; /* on fifo */
117 struct cache_head *item; /* cache item we wait on */
118 time_t recv_time;
119 void *owner; /* we might need to discard all defered requests
120 * owned by someone */
121 void (*revisit)(struct cache_deferred_req *req,
122 int too_many);
123};
124
125/*
126 * just like a template in C++, this macro does cache lookup
127 * for us.
128 * The function is passed some sort of HANDLE from which a cache_detail
129 * structure can be determined (via SETUP, DETAIL), a template
130 * cache entry (type RTN*), and a "set" flag. Using the HASHFN and the
131 * TEST, the function will try to find a matching cache entry in the cache.
132 * If "set" == 0 :
133 * If an entry is found, it is returned
134 * If no entry is found, a new non-VALID entry is created.
135 * If "set" == 1 and INPLACE == 0 :
136 * If no entry is found a new one is inserted with data from "template"
137 * If a non-CACHE_VALID entry is found, it is updated from template using UPDATE
138 * If a CACHE_VALID entry is found, a new entry is swapped in with data
139 * from "template"
140 * If set == 1, and INPLACE == 1 :
141 * As above, except that if a CACHE_VALID entry is found, we UPDATE in place
142 * instead of swapping in a new entry.
143 *
144 * If the passed handle has the CACHE_NEGATIVE flag set, then UPDATE is not
145 * run but insteead CACHE_NEGATIVE is set in any new item.
146
147 * In any case, the new entry is returned with a reference count.
148 *
149 *
150 * RTN is a struct type for a cache entry
151 * MEMBER is the member of the cache which is cache_head, which must be first
152 * FNAME is the name for the function
153 * ARGS are arguments to function and must contain RTN *item, int set. May
154 * also contain something to be usedby SETUP or DETAIL to find cache_detail.
155 * SETUP locates the cache detail and makes it available as...
156 * DETAIL identifies the cache detail, possibly set up by SETUP
157 * HASHFN returns a hash value of the cache entry "item"
158 * TEST tests if "tmp" matches "item"
159 * INIT copies key information from "item" to "new"
160 * UPDATE copies content information from "item" to "tmp"
161 * INPLACE is true if updates can happen inplace rather than allocating a new structure
162 *
163 * WARNING: any substantial changes to this must be reflected in
164 * net/sunrpc/svcauth.c(auth_domain_lookup)
165 * which is a similar routine that is open-coded.
166 */
167#define DefineCacheLookup(RTN,MEMBER,FNAME,ARGS,SETUP,DETAIL,HASHFN,TEST,INIT,UPDATE,INPLACE) \
168RTN *FNAME ARGS \
169{ \
170 RTN *tmp, *new=NULL; \
171 struct cache_head **hp, **head; \
172 SETUP; \
173 head = &(DETAIL)->hash_table[HASHFN]; \
174 retry: \
175 if (set||new) write_lock(&(DETAIL)->hash_lock); \
176 else read_lock(&(DETAIL)->hash_lock); \
177 for(hp=head; *hp != NULL; hp = &tmp->MEMBER.next) { \
178 tmp = container_of(*hp, RTN, MEMBER); \
179 if (TEST) { /* found a match */ \
180 \
181 if (set && !INPLACE && test_bit(CACHE_VALID, &tmp->MEMBER.flags) && !new) \
182 break; \
183 \
184 if (new) \
185 {INIT;} \
186 if (set) { \
187 if (!INPLACE && test_bit(CACHE_VALID, &tmp->MEMBER.flags))\
188 { /* need to swap in new */ \
189 RTN *t2; \
190 \
191 new->MEMBER.next = tmp->MEMBER.next; \
192 *hp = &new->MEMBER; \
193 tmp->MEMBER.next = NULL; \
194 t2 = tmp; tmp = new; new = t2; \
195 } \
196 if (test_bit(CACHE_NEGATIVE, &item->MEMBER.flags)) \
197 set_bit(CACHE_NEGATIVE, &tmp->MEMBER.flags); \
198 else { \
199 UPDATE; \
200 clear_bit(CACHE_NEGATIVE, &tmp->MEMBER.flags); \
201 } \
202 } \
203 cache_get(&tmp->MEMBER); \
204 if (set||new) write_unlock(&(DETAIL)->hash_lock); \
205 else read_unlock(&(DETAIL)->hash_lock); \
206 if (set) \
207 cache_fresh(DETAIL, &tmp->MEMBER, item->MEMBER.expiry_time); \
208 if (set && !INPLACE && new) cache_fresh(DETAIL, &new->MEMBER, 0); \
209 if (new) (DETAIL)->cache_put(&new->MEMBER, DETAIL); \
210 return tmp; \
211 } \
212 } \
213 /* Didn't find anything */ \
214 if (new) { \
215 INIT; \
216 new->MEMBER.next = *head; \
217 *head = &new->MEMBER; \
218 (DETAIL)->entries ++; \
219 cache_get(&new->MEMBER); \
220 if (set) { \
221 tmp = new; \
222 if (test_bit(CACHE_NEGATIVE, &item->MEMBER.flags)) \
223 set_bit(CACHE_NEGATIVE, &tmp->MEMBER.flags); \
224 else {UPDATE;} \
225 } \
226 } \
227 if (set||new) write_unlock(&(DETAIL)->hash_lock); \
228 else read_unlock(&(DETAIL)->hash_lock); \
229 if (new && set) \
230 cache_fresh(DETAIL, &new->MEMBER, item->MEMBER.expiry_time); \
231 if (new) \
232 return new; \
233 new = kmalloc(sizeof(*new), GFP_KERNEL); \
234 if (new) { \
235 cache_init(&new->MEMBER); \
236 goto retry; \
237 } \
238 return NULL; \
239}
240
241#define DefineSimpleCacheLookup(STRUCT,INPLACE) \
242 DefineCacheLookup(struct STRUCT, h, STRUCT##_lookup, (struct STRUCT *item, int set), /*no setup */, \
243 & STRUCT##_cache, STRUCT##_hash(item), STRUCT##_match(item, tmp),\
244 STRUCT##_init(new, item), STRUCT##_update(tmp, item),INPLACE)
245
246#define cache_for_each(pos, detail, index, member) \
247 for (({read_lock(&(detail)->hash_lock); index = (detail)->hash_size;}) ; \
248 ({if (index==0)read_unlock(&(detail)->hash_lock); index--;}); \
249 ) \
250 for (pos = container_of((detail)->hash_table[index], typeof(*pos), member); \
251 &pos->member; \
252 pos = container_of(pos->member.next, typeof(*pos), member))
253
254
255
256extern void cache_clean_deferred(void *owner);
257
258static inline struct cache_head *cache_get(struct cache_head *h)
259{
260 atomic_inc(&h->refcnt);
261 return h;
262}
263
264
265static inline int cache_put(struct cache_head *h, struct cache_detail *cd)
266{
267 if (atomic_read(&h->refcnt) <= 2 &&
268 h->expiry_time < cd->nextcheck)
269 cd->nextcheck = h->expiry_time;
270 return atomic_dec_and_test(&h->refcnt);
271}
272
273extern void cache_init(struct cache_head *h);
274extern void cache_fresh(struct cache_detail *detail,
275 struct cache_head *head, time_t expiry);
276extern int cache_check(struct cache_detail *detail,
277 struct cache_head *h, struct cache_req *rqstp);
278extern void cache_flush(void);
279extern void cache_purge(struct cache_detail *detail);
280#define NEVER (0x7FFFFFFF)
281extern void cache_register(struct cache_detail *cd);
282extern int cache_unregister(struct cache_detail *cd);
283
284extern void qword_add(char **bpp, int *lp, char *str);
285extern void qword_addhex(char **bpp, int *lp, char *buf, int blen);
286extern int qword_get(char **bpp, char *dest, int bufsize);
287
288static inline int get_int(char **bpp, int *anint)
289{
290 char buf[50];
291 char *ep;
292 int rv;
293 int len = qword_get(bpp, buf, 50);
294 if (len < 0) return -EINVAL;
295 if (len ==0) return -ENOENT;
296 rv = simple_strtol(buf, &ep, 0);
297 if (*ep) return -EINVAL;
298 *anint = rv;
299 return 0;
300}
301
302static inline time_t get_expiry(char **bpp)
303{
304 int rv;
305 if (get_int(bpp, &rv))
306 return 0;
307 if (rv < 0)
308 return 0;
309 return rv;
310}
311
312#endif /* _LINUX_SUNRPC_CACHE_H_ */