diff options
Diffstat (limited to 'include/linux/sunrpc/cache.h')
-rw-r--r-- | include/linux/sunrpc/cache.h | 145 |
1 files changed, 17 insertions, 128 deletions
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index c4e3ea7cf154..b5612c958cce 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h | |||
@@ -50,7 +50,7 @@ struct cache_head { | |||
50 | time_t last_refresh; /* If CACHE_PENDING, this is when upcall | 50 | time_t last_refresh; /* If CACHE_PENDING, this is when upcall |
51 | * was sent, else this is when update was received | 51 | * was sent, else this is when update was received |
52 | */ | 52 | */ |
53 | atomic_t refcnt; | 53 | struct kref ref; |
54 | unsigned long flags; | 54 | unsigned long flags; |
55 | }; | 55 | }; |
56 | #define CACHE_VALID 0 /* Entry contains valid data */ | 56 | #define CACHE_VALID 0 /* Entry contains valid data */ |
@@ -68,8 +68,7 @@ struct cache_detail { | |||
68 | atomic_t inuse; /* active user-space update or lookup */ | 68 | atomic_t inuse; /* active user-space update or lookup */ |
69 | 69 | ||
70 | char *name; | 70 | char *name; |
71 | void (*cache_put)(struct cache_head *, | 71 | void (*cache_put)(struct kref *); |
72 | struct cache_detail*); | ||
73 | 72 | ||
74 | void (*cache_request)(struct cache_detail *cd, | 73 | void (*cache_request)(struct cache_detail *cd, |
75 | struct cache_head *h, | 74 | struct cache_head *h, |
@@ -81,6 +80,11 @@ struct cache_detail { | |||
81 | struct cache_detail *cd, | 80 | struct cache_detail *cd, |
82 | struct cache_head *h); | 81 | struct cache_head *h); |
83 | 82 | ||
83 | struct cache_head * (*alloc)(void); | ||
84 | int (*match)(struct cache_head *orig, struct cache_head *new); | ||
85 | void (*init)(struct cache_head *orig, struct cache_head *new); | ||
86 | void (*update)(struct cache_head *orig, struct cache_head *new); | ||
87 | |||
84 | /* fields below this comment are for internal use | 88 | /* fields below this comment are for internal use |
85 | * and should not be touched by cache owners | 89 | * and should not be touched by cache owners |
86 | */ | 90 | */ |
@@ -123,126 +127,14 @@ struct cache_deferred_req { | |||
123 | int too_many); | 127 | int too_many); |
124 | }; | 128 | }; |
125 | 129 | ||
126 | /* | ||
127 | * just like a template in C++, this macro does cache lookup | ||
128 | * for us. | ||
129 | * The function is passed some sort of HANDLE from which a cache_detail | ||
130 | * structure can be determined (via SETUP, DETAIL), a template | ||
131 | * cache entry (type RTN*), and a "set" flag. Using the HASHFN and the | ||
132 | * TEST, the function will try to find a matching cache entry in the cache. | ||
133 | * If "set" == 0 : | ||
134 | * If an entry is found, it is returned | ||
135 | * If no entry is found, a new non-VALID entry is created. | ||
136 | * If "set" == 1 and INPLACE == 0 : | ||
137 | * If no entry is found a new one is inserted with data from "template" | ||
138 | * If a non-CACHE_VALID entry is found, it is updated from template using UPDATE | ||
139 | * If a CACHE_VALID entry is found, a new entry is swapped in with data | ||
140 | * from "template" | ||
141 | * If set == 1, and INPLACE == 1 : | ||
142 | * As above, except that if a CACHE_VALID entry is found, we UPDATE in place | ||
143 | * instead of swapping in a new entry. | ||
144 | * | ||
145 | * If the passed handle has the CACHE_NEGATIVE flag set, then UPDATE is not | ||
146 | * run but insteead CACHE_NEGATIVE is set in any new item. | ||
147 | 130 | ||
148 | * In any case, the new entry is returned with a reference count. | 131 | extern struct cache_head * |
149 | * | 132 | sunrpc_cache_lookup(struct cache_detail *detail, |
150 | * | 133 | struct cache_head *key, int hash); |
151 | * RTN is a struct type for a cache entry | 134 | extern struct cache_head * |
152 | * MEMBER is the member of the cache which is cache_head, which must be first | 135 | sunrpc_cache_update(struct cache_detail *detail, |
153 | * FNAME is the name for the function | 136 | struct cache_head *new, struct cache_head *old, int hash); |
154 | * ARGS are arguments to function and must contain RTN *item, int set. May | ||
155 | * also contain something to be usedby SETUP or DETAIL to find cache_detail. | ||
156 | * SETUP locates the cache detail and makes it available as... | ||
157 | * DETAIL identifies the cache detail, possibly set up by SETUP | ||
158 | * HASHFN returns a hash value of the cache entry "item" | ||
159 | * TEST tests if "tmp" matches "item" | ||
160 | * INIT copies key information from "item" to "new" | ||
161 | * UPDATE copies content information from "item" to "tmp" | ||
162 | * INPLACE is true if updates can happen inplace rather than allocating a new structure | ||
163 | * | ||
164 | * WARNING: any substantial changes to this must be reflected in | ||
165 | * net/sunrpc/svcauth.c(auth_domain_lookup) | ||
166 | * which is a similar routine that is open-coded. | ||
167 | */ | ||
168 | #define DefineCacheLookup(RTN,MEMBER,FNAME,ARGS,SETUP,DETAIL,HASHFN,TEST,INIT,UPDATE,INPLACE) \ | ||
169 | RTN *FNAME ARGS \ | ||
170 | { \ | ||
171 | RTN *tmp, *new=NULL; \ | ||
172 | struct cache_head **hp, **head; \ | ||
173 | SETUP; \ | ||
174 | head = &(DETAIL)->hash_table[HASHFN]; \ | ||
175 | retry: \ | ||
176 | if (set||new) write_lock(&(DETAIL)->hash_lock); \ | ||
177 | else read_lock(&(DETAIL)->hash_lock); \ | ||
178 | for(hp=head; *hp != NULL; hp = &tmp->MEMBER.next) { \ | ||
179 | tmp = container_of(*hp, RTN, MEMBER); \ | ||
180 | if (TEST) { /* found a match */ \ | ||
181 | \ | ||
182 | if (set && !INPLACE && test_bit(CACHE_VALID, &tmp->MEMBER.flags) && !new) \ | ||
183 | break; \ | ||
184 | \ | ||
185 | if (new) \ | ||
186 | {INIT;} \ | ||
187 | if (set) { \ | ||
188 | if (!INPLACE && test_bit(CACHE_VALID, &tmp->MEMBER.flags))\ | ||
189 | { /* need to swap in new */ \ | ||
190 | RTN *t2; \ | ||
191 | \ | ||
192 | new->MEMBER.next = tmp->MEMBER.next; \ | ||
193 | *hp = &new->MEMBER; \ | ||
194 | tmp->MEMBER.next = NULL; \ | ||
195 | t2 = tmp; tmp = new; new = t2; \ | ||
196 | } \ | ||
197 | if (test_bit(CACHE_NEGATIVE, &item->MEMBER.flags)) \ | ||
198 | set_bit(CACHE_NEGATIVE, &tmp->MEMBER.flags); \ | ||
199 | else { \ | ||
200 | UPDATE; \ | ||
201 | clear_bit(CACHE_NEGATIVE, &tmp->MEMBER.flags); \ | ||
202 | } \ | ||
203 | } \ | ||
204 | cache_get(&tmp->MEMBER); \ | ||
205 | if (set||new) write_unlock(&(DETAIL)->hash_lock); \ | ||
206 | else read_unlock(&(DETAIL)->hash_lock); \ | ||
207 | if (set) \ | ||
208 | cache_fresh(DETAIL, &tmp->MEMBER, item->MEMBER.expiry_time); \ | ||
209 | if (set && !INPLACE && new) cache_fresh(DETAIL, &new->MEMBER, 0); \ | ||
210 | if (new) (DETAIL)->cache_put(&new->MEMBER, DETAIL); \ | ||
211 | return tmp; \ | ||
212 | } \ | ||
213 | } \ | ||
214 | /* Didn't find anything */ \ | ||
215 | if (new) { \ | ||
216 | INIT; \ | ||
217 | new->MEMBER.next = *head; \ | ||
218 | *head = &new->MEMBER; \ | ||
219 | (DETAIL)->entries ++; \ | ||
220 | cache_get(&new->MEMBER); \ | ||
221 | if (set) { \ | ||
222 | tmp = new; \ | ||
223 | if (test_bit(CACHE_NEGATIVE, &item->MEMBER.flags)) \ | ||
224 | set_bit(CACHE_NEGATIVE, &tmp->MEMBER.flags); \ | ||
225 | else {UPDATE;} \ | ||
226 | } \ | ||
227 | } \ | ||
228 | if (set||new) write_unlock(&(DETAIL)->hash_lock); \ | ||
229 | else read_unlock(&(DETAIL)->hash_lock); \ | ||
230 | if (new && set) \ | ||
231 | cache_fresh(DETAIL, &new->MEMBER, item->MEMBER.expiry_time); \ | ||
232 | if (new) \ | ||
233 | return new; \ | ||
234 | new = kmalloc(sizeof(*new), GFP_KERNEL); \ | ||
235 | if (new) { \ | ||
236 | cache_init(&new->MEMBER); \ | ||
237 | goto retry; \ | ||
238 | } \ | ||
239 | return NULL; \ | ||
240 | } | ||
241 | 137 | ||
242 | #define DefineSimpleCacheLookup(STRUCT,INPLACE) \ | ||
243 | DefineCacheLookup(struct STRUCT, h, STRUCT##_lookup, (struct STRUCT *item, int set), /*no setup */, \ | ||
244 | & STRUCT##_cache, STRUCT##_hash(item), STRUCT##_match(item, tmp),\ | ||
245 | STRUCT##_init(new, item), STRUCT##_update(tmp, item),INPLACE) | ||
246 | 138 | ||
247 | #define cache_for_each(pos, detail, index, member) \ | 139 | #define cache_for_each(pos, detail, index, member) \ |
248 | for (({read_lock(&(detail)->hash_lock); index = (detail)->hash_size;}) ; \ | 140 | for (({read_lock(&(detail)->hash_lock); index = (detail)->hash_size;}) ; \ |
@@ -258,22 +150,19 @@ extern void cache_clean_deferred(void *owner); | |||
258 | 150 | ||
259 | static inline struct cache_head *cache_get(struct cache_head *h) | 151 | static inline struct cache_head *cache_get(struct cache_head *h) |
260 | { | 152 | { |
261 | atomic_inc(&h->refcnt); | 153 | kref_get(&h->ref); |
262 | return h; | 154 | return h; |
263 | } | 155 | } |
264 | 156 | ||
265 | 157 | ||
266 | static inline int cache_put(struct cache_head *h, struct cache_detail *cd) | 158 | static inline void cache_put(struct cache_head *h, struct cache_detail *cd) |
267 | { | 159 | { |
268 | if (atomic_read(&h->refcnt) <= 2 && | 160 | if (atomic_read(&h->ref.refcount) <= 2 && |
269 | h->expiry_time < cd->nextcheck) | 161 | h->expiry_time < cd->nextcheck) |
270 | cd->nextcheck = h->expiry_time; | 162 | cd->nextcheck = h->expiry_time; |
271 | return atomic_dec_and_test(&h->refcnt); | 163 | kref_put(&h->ref, cd->cache_put); |
272 | } | 164 | } |
273 | 165 | ||
274 | extern void cache_init(struct cache_head *h); | ||
275 | extern void cache_fresh(struct cache_detail *detail, | ||
276 | struct cache_head *head, time_t expiry); | ||
277 | extern int cache_check(struct cache_detail *detail, | 166 | extern int cache_check(struct cache_detail *detail, |
278 | struct cache_head *h, struct cache_req *rqstp); | 167 | struct cache_head *h, struct cache_req *rqstp); |
279 | extern void cache_flush(void); | 168 | extern void cache_flush(void); |