aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc/cache.c')
-rw-r--r--net/sunrpc/cache.c76
1 files changed, 38 insertions, 38 deletions
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 9e72223487fa..f02f24ae9468 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -275,7 +275,7 @@ int cache_check(struct cache_detail *detail,
275 * 275 *
276 * A table is then only scanned if the current time is at least 276 * A table is then only scanned if the current time is at least
277 * the nextcheck time. 277 * the nextcheck time.
278 * 278 *
279 */ 279 */
280 280
281static LIST_HEAD(cache_list); 281static LIST_HEAD(cache_list);
@@ -283,9 +283,9 @@ static DEFINE_SPINLOCK(cache_list_lock);
283static struct cache_detail *current_detail; 283static struct cache_detail *current_detail;
284static int current_index; 284static int current_index;
285 285
286static struct file_operations cache_file_operations; 286static const struct file_operations cache_file_operations;
287static struct file_operations content_file_operations; 287static const struct file_operations content_file_operations;
288static struct file_operations cache_flush_operations; 288static const struct file_operations cache_flush_operations;
289 289
290static void do_cache_clean(struct work_struct *work); 290static void do_cache_clean(struct work_struct *work);
291static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean); 291static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean);
@@ -297,16 +297,16 @@ void cache_register(struct cache_detail *cd)
297 struct proc_dir_entry *p; 297 struct proc_dir_entry *p;
298 cd->proc_ent->owner = cd->owner; 298 cd->proc_ent->owner = cd->owner;
299 cd->channel_ent = cd->content_ent = NULL; 299 cd->channel_ent = cd->content_ent = NULL;
300 300
301 p = create_proc_entry("flush", S_IFREG|S_IRUSR|S_IWUSR, 301 p = create_proc_entry("flush", S_IFREG|S_IRUSR|S_IWUSR,
302 cd->proc_ent); 302 cd->proc_ent);
303 cd->flush_ent = p; 303 cd->flush_ent = p;
304 if (p) { 304 if (p) {
305 p->proc_fops = &cache_flush_operations; 305 p->proc_fops = &cache_flush_operations;
306 p->owner = cd->owner; 306 p->owner = cd->owner;
307 p->data = cd; 307 p->data = cd;
308 } 308 }
309 309
310 if (cd->cache_request || cd->cache_parse) { 310 if (cd->cache_request || cd->cache_parse) {
311 p = create_proc_entry("channel", S_IFREG|S_IRUSR|S_IWUSR, 311 p = create_proc_entry("channel", S_IFREG|S_IRUSR|S_IWUSR,
312 cd->proc_ent); 312 cd->proc_ent);
@@ -317,16 +317,16 @@ void cache_register(struct cache_detail *cd)
317 p->data = cd; 317 p->data = cd;
318 } 318 }
319 } 319 }
320 if (cd->cache_show) { 320 if (cd->cache_show) {
321 p = create_proc_entry("content", S_IFREG|S_IRUSR|S_IWUSR, 321 p = create_proc_entry("content", S_IFREG|S_IRUSR|S_IWUSR,
322 cd->proc_ent); 322 cd->proc_ent);
323 cd->content_ent = p; 323 cd->content_ent = p;
324 if (p) { 324 if (p) {
325 p->proc_fops = &content_file_operations; 325 p->proc_fops = &content_file_operations;
326 p->owner = cd->owner; 326 p->owner = cd->owner;
327 p->data = cd; 327 p->data = cd;
328 } 328 }
329 } 329 }
330 } 330 }
331 rwlock_init(&cd->hash_lock); 331 rwlock_init(&cd->hash_lock);
332 INIT_LIST_HEAD(&cd->queue); 332 INIT_LIST_HEAD(&cd->queue);
@@ -418,15 +418,15 @@ static int cache_clean(void)
418 current_index++; 418 current_index++;
419 419
420 /* find a cleanable entry in the bucket and clean it, or set to next bucket */ 420 /* find a cleanable entry in the bucket and clean it, or set to next bucket */
421 421
422 if (current_detail && current_index < current_detail->hash_size) { 422 if (current_detail && current_index < current_detail->hash_size) {
423 struct cache_head *ch, **cp; 423 struct cache_head *ch, **cp;
424 struct cache_detail *d; 424 struct cache_detail *d;
425 425
426 write_lock(&current_detail->hash_lock); 426 write_lock(&current_detail->hash_lock);
427 427
428 /* Ok, now to clean this strand */ 428 /* Ok, now to clean this strand */
429 429
430 cp = & current_detail->hash_table[current_index]; 430 cp = & current_detail->hash_table[current_index];
431 ch = *cp; 431 ch = *cp;
432 for (; ch; cp= & ch->next, ch= *cp) { 432 for (; ch; cp= & ch->next, ch= *cp) {
@@ -478,9 +478,9 @@ static void do_cache_clean(struct work_struct *work)
478} 478}
479 479
480 480
481/* 481/*
482 * Clean all caches promptly. This just calls cache_clean 482 * Clean all caches promptly. This just calls cache_clean
483 * repeatedly until we are sure that every cache has had a chance to 483 * repeatedly until we are sure that every cache has had a chance to
484 * be fully cleaned 484 * be fully cleaned
485 */ 485 */
486void cache_flush(void) 486void cache_flush(void)
@@ -509,7 +509,7 @@ void cache_purge(struct cache_detail *detail)
509 * All deferred requests are stored in a hash table, 509 * All deferred requests are stored in a hash table,
510 * indexed by "struct cache_head *". 510 * indexed by "struct cache_head *".
511 * As it may be wasteful to store a whole request 511 * As it may be wasteful to store a whole request
512 * structure, we allow the request to provide a 512 * structure, we allow the request to provide a
513 * deferred form, which must contain a 513 * deferred form, which must contain a
514 * 'struct cache_deferred_req' 514 * 'struct cache_deferred_req'
515 * This cache_deferred_req contains a method to allow 515 * This cache_deferred_req contains a method to allow
@@ -585,7 +585,7 @@ static void cache_revisit_request(struct cache_head *item)
585 585
586 INIT_LIST_HEAD(&pending); 586 INIT_LIST_HEAD(&pending);
587 spin_lock(&cache_defer_lock); 587 spin_lock(&cache_defer_lock);
588 588
589 lp = cache_defer_hash[hash].next; 589 lp = cache_defer_hash[hash].next;
590 if (lp) { 590 if (lp) {
591 while (lp != &cache_defer_hash[hash]) { 591 while (lp != &cache_defer_hash[hash]) {
@@ -615,7 +615,7 @@ void cache_clean_deferred(void *owner)
615 615
616 INIT_LIST_HEAD(&pending); 616 INIT_LIST_HEAD(&pending);
617 spin_lock(&cache_defer_lock); 617 spin_lock(&cache_defer_lock);
618 618
619 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { 619 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
620 if (dreq->owner == owner) { 620 if (dreq->owner == owner) {
621 list_del(&dreq->hash); 621 list_del(&dreq->hash);
@@ -640,7 +640,7 @@ void cache_clean_deferred(void *owner)
640 * On write, an update request is processed 640 * On write, an update request is processed
641 * Poll works if anything to read, and always allows write 641 * Poll works if anything to read, and always allows write
642 * 642 *
643 * Implemented by linked list of requests. Each open file has 643 * Implemented by linked list of requests. Each open file has
644 * a ->private that also exists in this list. New request are added 644 * a ->private that also exists in this list. New request are added
645 * to the end and may wakeup and preceding readers. 645 * to the end and may wakeup and preceding readers.
646 * New readers are added to the head. If, on read, an item is found with 646 * New readers are added to the head. If, on read, an item is found with
@@ -888,7 +888,7 @@ cache_release(struct inode *inode, struct file *filp)
888 888
889 889
890 890
891static struct file_operations cache_file_operations = { 891static const struct file_operations cache_file_operations = {
892 .owner = THIS_MODULE, 892 .owner = THIS_MODULE,
893 .llseek = no_llseek, 893 .llseek = no_llseek,
894 .read = cache_read, 894 .read = cache_read,
@@ -1060,10 +1060,10 @@ static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h)
1060 * Messages are, like requests, separated into fields by 1060 * Messages are, like requests, separated into fields by
1061 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal 1061 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1062 * 1062 *
1063 * Message is 1063 * Message is
1064 * reply cachename expiry key ... content.... 1064 * reply cachename expiry key ... content....
1065 * 1065 *
1066 * key and content are both parsed by cache 1066 * key and content are both parsed by cache
1067 */ 1067 */
1068 1068
1069#define isodigit(c) (isdigit(c) && c <= '7') 1069#define isodigit(c) (isdigit(c) && c <= '7')
@@ -1133,7 +1133,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
1133 unsigned hash, entry; 1133 unsigned hash, entry;
1134 struct cache_head *ch; 1134 struct cache_head *ch;
1135 struct cache_detail *cd = ((struct handle*)m->private)->cd; 1135 struct cache_detail *cd = ((struct handle*)m->private)->cd;
1136 1136
1137 1137
1138 read_lock(&cd->hash_lock); 1138 read_lock(&cd->hash_lock);
1139 if (!n--) 1139 if (!n--)
@@ -1148,7 +1148,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
1148 do { 1148 do {
1149 hash++; 1149 hash++;
1150 n += 1LL<<32; 1150 n += 1LL<<32;
1151 } while(hash < cd->hash_size && 1151 } while(hash < cd->hash_size &&
1152 cd->hash_table[hash]==NULL); 1152 cd->hash_table[hash]==NULL);
1153 if (hash >= cd->hash_size) 1153 if (hash >= cd->hash_size)
1154 return NULL; 1154 return NULL;
@@ -1246,7 +1246,7 @@ static int content_release(struct inode *inode, struct file *file)
1246 return seq_release(inode, file); 1246 return seq_release(inode, file);
1247} 1247}
1248 1248
1249static struct file_operations content_file_operations = { 1249static const struct file_operations content_file_operations = {
1250 .open = content_open, 1250 .open = content_open,
1251 .read = seq_read, 1251 .read = seq_read,
1252 .llseek = seq_lseek, 1252 .llseek = seq_lseek,
@@ -1298,7 +1298,7 @@ static ssize_t write_flush(struct file * file, const char __user * buf,
1298 return count; 1298 return count;
1299} 1299}
1300 1300
1301static struct file_operations cache_flush_operations = { 1301static const struct file_operations cache_flush_operations = {
1302 .open = nonseekable_open, 1302 .open = nonseekable_open,
1303 .read = read_flush, 1303 .read = read_flush,
1304 .write = write_flush, 1304 .write = write_flush,