4 * Generic code for various authentication-related caches
5 * used by sunrpc clients and servers.
7 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
9 * Released under terms in GPL version 2. See COPYING.
13 #include <linux/types.h>
15 #include <linux/file.h>
16 #include <linux/slab.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <asm/uaccess.h>
24 #include <linux/poll.h>
25 #include <linux/seq_file.h>
26 #include <linux/proc_fs.h>
27 #include <linux/net.h>
28 #include <linux/workqueue.h>
29 #include <linux/mutex.h>
30 #include <linux/pagemap.h>
31 #include <asm/ioctls.h>
32 #include <linux/sunrpc/types.h>
33 #include <linux/sunrpc/cache.h>
34 #include <linux/sunrpc/stats.h>
35 #include <linux/sunrpc/rpc_pipe_fs.h>
37 #define RPCDBG_FACILITY RPCDBG_CACHE
39 static int cache_defer_req(struct cache_req
*req
, struct cache_head
*item
);
40 static void cache_revisit_request(struct cache_head
*item
);
42 static void cache_init(struct cache_head
*h
)
44 time_t now
= get_seconds();
48 h
->expiry_time
= now
+ CACHE_NEW_EXPIRY
;
49 h
->last_refresh
= now
;
52 static inline int cache_is_expired(struct cache_detail
*detail
, struct cache_head
*h
)
54 return (h
->expiry_time
< get_seconds()) ||
55 (detail
->flush_time
> h
->last_refresh
);
58 struct cache_head
*sunrpc_cache_lookup(struct cache_detail
*detail
,
59 struct cache_head
*key
, int hash
)
61 struct cache_head
**head
, **hp
;
62 struct cache_head
*new = NULL
, *freeme
= NULL
;
64 head
= &detail
->hash_table
[hash
];
66 read_lock(&detail
->hash_lock
);
68 for (hp
=head
; *hp
!= NULL
; hp
= &(*hp
)->next
) {
69 struct cache_head
*tmp
= *hp
;
70 if (detail
->match(tmp
, key
)) {
71 if (cache_is_expired(detail
, tmp
))
72 /* This entry is expired, we will discard it. */
75 read_unlock(&detail
->hash_lock
);
79 read_unlock(&detail
->hash_lock
);
80 /* Didn't find anything, insert an empty entry */
82 new = detail
->alloc();
85 /* must fully initialise 'new', else
86 * we might get lose if we need to
90 detail
->init(new, key
);
92 write_lock(&detail
->hash_lock
);
94 /* check if entry appeared while we slept */
95 for (hp
=head
; *hp
!= NULL
; hp
= &(*hp
)->next
) {
96 struct cache_head
*tmp
= *hp
;
97 if (detail
->match(tmp
, key
)) {
98 if (cache_is_expired(detail
, tmp
)) {
106 write_unlock(&detail
->hash_lock
);
107 cache_put(new, detail
);
115 write_unlock(&detail
->hash_lock
);
118 cache_put(freeme
, detail
);
121 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup
);
124 static void cache_dequeue(struct cache_detail
*detail
, struct cache_head
*ch
);
126 static void cache_fresh_locked(struct cache_head
*head
, time_t expiry
)
128 head
->expiry_time
= expiry
;
129 head
->last_refresh
= get_seconds();
130 set_bit(CACHE_VALID
, &head
->flags
);
133 static void cache_fresh_unlocked(struct cache_head
*head
,
134 struct cache_detail
*detail
)
136 if (test_and_clear_bit(CACHE_PENDING
, &head
->flags
)) {
137 cache_revisit_request(head
);
138 cache_dequeue(detail
, head
);
142 struct cache_head
*sunrpc_cache_update(struct cache_detail
*detail
,
143 struct cache_head
*new, struct cache_head
*old
, int hash
)
145 /* The 'old' entry is to be replaced by 'new'.
146 * If 'old' is not VALID, we update it directly,
147 * otherwise we need to replace it
149 struct cache_head
**head
;
150 struct cache_head
*tmp
;
152 if (!test_bit(CACHE_VALID
, &old
->flags
)) {
153 write_lock(&detail
->hash_lock
);
154 if (!test_bit(CACHE_VALID
, &old
->flags
)) {
155 if (test_bit(CACHE_NEGATIVE
, &new->flags
))
156 set_bit(CACHE_NEGATIVE
, &old
->flags
);
158 detail
->update(old
, new);
159 cache_fresh_locked(old
, new->expiry_time
);
160 write_unlock(&detail
->hash_lock
);
161 cache_fresh_unlocked(old
, detail
);
164 write_unlock(&detail
->hash_lock
);
166 /* We need to insert a new entry */
167 tmp
= detail
->alloc();
169 cache_put(old
, detail
);
173 detail
->init(tmp
, old
);
174 head
= &detail
->hash_table
[hash
];
176 write_lock(&detail
->hash_lock
);
177 if (test_bit(CACHE_NEGATIVE
, &new->flags
))
178 set_bit(CACHE_NEGATIVE
, &tmp
->flags
);
180 detail
->update(tmp
, new);
185 cache_fresh_locked(tmp
, new->expiry_time
);
186 cache_fresh_locked(old
, 0);
187 write_unlock(&detail
->hash_lock
);
188 cache_fresh_unlocked(tmp
, detail
);
189 cache_fresh_unlocked(old
, detail
);
190 cache_put(old
, detail
);
193 EXPORT_SYMBOL_GPL(sunrpc_cache_update
);
195 static int cache_make_upcall(struct cache_detail
*cd
, struct cache_head
*h
)
197 if (!cd
->cache_upcall
)
199 return cd
->cache_upcall(cd
, h
);
202 static inline int cache_is_valid(struct cache_detail
*detail
, struct cache_head
*h
)
204 if (!test_bit(CACHE_VALID
, &h
->flags
))
208 if (test_bit(CACHE_NEGATIVE
, &h
->flags
))
216 * This is the generic cache management routine for all
217 * the authentication caches.
218 * It checks the currency of a cache item and will (later)
219 * initiate an upcall to fill it if needed.
222 * Returns 0 if the cache_head can be used, or cache_puts it and returns
223 * -EAGAIN if upcall is pending and request has been queued
224 * -ETIMEDOUT if upcall failed or request could not be queue or
225 * upcall completed but item is still invalid (implying that
226 * the cache item has been replaced with a newer one).
227 * -ENOENT if cache entry was negative
229 int cache_check(struct cache_detail
*detail
,
230 struct cache_head
*h
, struct cache_req
*rqstp
)
233 long refresh_age
, age
;
235 /* First decide return status as best we can */
236 rv
= cache_is_valid(detail
, h
);
238 /* now see if we want to start an upcall */
239 refresh_age
= (h
->expiry_time
- h
->last_refresh
);
240 age
= get_seconds() - h
->last_refresh
;
245 } else if (rv
== -EAGAIN
|| age
> refresh_age
/2) {
246 dprintk("RPC: Want update, refage=%ld, age=%ld\n",
248 if (!test_and_set_bit(CACHE_PENDING
, &h
->flags
)) {
249 switch (cache_make_upcall(detail
, h
)) {
251 clear_bit(CACHE_PENDING
, &h
->flags
);
252 cache_revisit_request(h
);
254 set_bit(CACHE_NEGATIVE
, &h
->flags
);
255 cache_fresh_locked(h
, get_seconds()+CACHE_NEW_EXPIRY
);
256 cache_fresh_unlocked(h
, detail
);
262 clear_bit(CACHE_PENDING
, &h
->flags
);
263 cache_revisit_request(h
);
270 if (cache_defer_req(rqstp
, h
) < 0) {
271 /* Request is not deferred */
272 rv
= cache_is_valid(detail
, h
);
278 cache_put(h
, detail
);
281 EXPORT_SYMBOL_GPL(cache_check
);
284 * caches need to be periodically cleaned.
285 * For this we maintain a list of cache_detail and
286 * a current pointer into that list and into the table
289 * Each time clean_cache is called it finds the next non-empty entry
290 * in the current table and walks the list in that entry
291 * looking for entries that can be removed.
293 * An entry gets removed if:
294 * - The expiry is before current time
295 * - The last_refresh time is before the flush_time for that cache
297 * later we might drop old entries with non-NEVER expiry if that table
298 * is getting 'full' for some definition of 'full'
300 * The question of "how often to scan a table" is an interesting one
301 * and is answered in part by the use of the "nextcheck" field in the
303 * When a scan of a table begins, the nextcheck field is set to a time
304 * that is well into the future.
305 * While scanning, if an expiry time is found that is earlier than the
306 * current nextcheck time, nextcheck is set to that expiry time.
307 * If the flush_time is ever set to a time earlier than the nextcheck
308 * time, the nextcheck time is then set to that flush_time.
310 * A table is then only scanned if the current time is at least
311 * the nextcheck time.
315 static LIST_HEAD(cache_list
);
316 static DEFINE_SPINLOCK(cache_list_lock
);
317 static struct cache_detail
*current_detail
;
318 static int current_index
;
320 static void do_cache_clean(struct work_struct
*work
);
321 static struct delayed_work cache_cleaner
;
323 static void sunrpc_init_cache_detail(struct cache_detail
*cd
)
325 rwlock_init(&cd
->hash_lock
);
326 INIT_LIST_HEAD(&cd
->queue
);
327 spin_lock(&cache_list_lock
);
330 atomic_set(&cd
->readers
, 0);
333 list_add(&cd
->others
, &cache_list
);
334 spin_unlock(&cache_list_lock
);
336 /* start the cleaning process */
337 schedule_delayed_work(&cache_cleaner
, 0);
340 static void sunrpc_destroy_cache_detail(struct cache_detail
*cd
)
343 spin_lock(&cache_list_lock
);
344 write_lock(&cd
->hash_lock
);
345 if (cd
->entries
|| atomic_read(&cd
->inuse
)) {
346 write_unlock(&cd
->hash_lock
);
347 spin_unlock(&cache_list_lock
);
350 if (current_detail
== cd
)
351 current_detail
= NULL
;
352 list_del_init(&cd
->others
);
353 write_unlock(&cd
->hash_lock
);
354 spin_unlock(&cache_list_lock
);
355 if (list_empty(&cache_list
)) {
356 /* module must be being unloaded so its safe to kill the worker */
357 cancel_delayed_work_sync(&cache_cleaner
);
361 printk(KERN_ERR
"nfsd: failed to unregister %s cache\n", cd
->name
);
364 /* clean cache tries to find something to clean
366 * It returns 1 if it cleaned something,
367 * 0 if it didn't find anything this time
368 * -1 if it fell off the end of the list.
370 static int cache_clean(void)
373 struct list_head
*next
;
375 spin_lock(&cache_list_lock
);
377 /* find a suitable table if we don't already have one */
378 while (current_detail
== NULL
||
379 current_index
>= current_detail
->hash_size
) {
381 next
= current_detail
->others
.next
;
383 next
= cache_list
.next
;
384 if (next
== &cache_list
) {
385 current_detail
= NULL
;
386 spin_unlock(&cache_list_lock
);
389 current_detail
= list_entry(next
, struct cache_detail
, others
);
390 if (current_detail
->nextcheck
> get_seconds())
391 current_index
= current_detail
->hash_size
;
394 current_detail
->nextcheck
= get_seconds()+30*60;
398 /* find a non-empty bucket in the table */
399 while (current_detail
&&
400 current_index
< current_detail
->hash_size
&&
401 current_detail
->hash_table
[current_index
] == NULL
)
404 /* find a cleanable entry in the bucket and clean it, or set to next bucket */
406 if (current_detail
&& current_index
< current_detail
->hash_size
) {
407 struct cache_head
*ch
, **cp
;
408 struct cache_detail
*d
;
410 write_lock(¤t_detail
->hash_lock
);
412 /* Ok, now to clean this strand */
414 cp
= & current_detail
->hash_table
[current_index
];
415 for (ch
= *cp
; ch
; cp
= & ch
->next
, ch
= *cp
) {
416 if (current_detail
->nextcheck
> ch
->expiry_time
)
417 current_detail
->nextcheck
= ch
->expiry_time
+1;
418 if (!cache_is_expired(current_detail
, ch
))
423 current_detail
->entries
--;
428 write_unlock(¤t_detail
->hash_lock
);
432 spin_unlock(&cache_list_lock
);
434 if (test_and_clear_bit(CACHE_PENDING
, &ch
->flags
))
435 cache_dequeue(current_detail
, ch
);
436 cache_revisit_request(ch
);
440 spin_unlock(&cache_list_lock
);
446 * We want to regularly clean the cache, so we need to schedule some work ...
448 static void do_cache_clean(struct work_struct
*work
)
451 if (cache_clean() == -1)
452 delay
= round_jiffies_relative(30*HZ
);
454 if (list_empty(&cache_list
))
458 schedule_delayed_work(&cache_cleaner
, delay
);
463 * Clean all caches promptly. This just calls cache_clean
464 * repeatedly until we are sure that every cache has had a chance to
467 void cache_flush(void)
469 while (cache_clean() != -1)
471 while (cache_clean() != -1)
474 EXPORT_SYMBOL_GPL(cache_flush
);
476 void cache_purge(struct cache_detail
*detail
)
478 detail
->flush_time
= LONG_MAX
;
479 detail
->nextcheck
= get_seconds();
481 detail
->flush_time
= 1;
483 EXPORT_SYMBOL_GPL(cache_purge
);
487 * Deferral and Revisiting of Requests.
489 * If a cache lookup finds a pending entry, we
490 * need to defer the request and revisit it later.
491 * All deferred requests are stored in a hash table,
492 * indexed by "struct cache_head *".
493 * As it may be wasteful to store a whole request
494 * structure, we allow the request to provide a
495 * deferred form, which must contain a
496 * 'struct cache_deferred_req'
497 * This cache_deferred_req contains a method to allow
498 * it to be revisited when cache info is available
501 #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
502 #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
504 #define DFR_MAX 300 /* ??? */
506 static DEFINE_SPINLOCK(cache_defer_lock
);
507 static LIST_HEAD(cache_defer_list
);
508 static struct list_head cache_defer_hash
[DFR_HASHSIZE
];
509 static int cache_defer_cnt
;
511 static int cache_defer_req(struct cache_req
*req
, struct cache_head
*item
)
513 struct cache_deferred_req
*dreq
, *discard
;
514 int hash
= DFR_HASH(item
);
516 if (cache_defer_cnt
>= DFR_MAX
) {
517 /* too much in the cache, randomly drop this one,
518 * or continue and drop the oldest below
523 dreq
= req
->defer(req
);
529 spin_lock(&cache_defer_lock
);
531 list_add(&dreq
->recent
, &cache_defer_list
);
533 if (cache_defer_hash
[hash
].next
== NULL
)
534 INIT_LIST_HEAD(&cache_defer_hash
[hash
]);
535 list_add(&dreq
->hash
, &cache_defer_hash
[hash
]);
537 /* it is in, now maybe clean up */
539 if (++cache_defer_cnt
> DFR_MAX
) {
540 discard
= list_entry(cache_defer_list
.prev
,
541 struct cache_deferred_req
, recent
);
542 list_del_init(&discard
->recent
);
543 list_del_init(&discard
->hash
);
546 spin_unlock(&cache_defer_lock
);
549 /* there was one too many */
550 discard
->revisit(discard
, 1);
552 if (!test_bit(CACHE_PENDING
, &item
->flags
)) {
553 /* must have just been validated... */
554 cache_revisit_request(item
);
560 static void cache_revisit_request(struct cache_head
*item
)
562 struct cache_deferred_req
*dreq
;
563 struct list_head pending
;
565 struct list_head
*lp
;
566 int hash
= DFR_HASH(item
);
568 INIT_LIST_HEAD(&pending
);
569 spin_lock(&cache_defer_lock
);
571 lp
= cache_defer_hash
[hash
].next
;
573 while (lp
!= &cache_defer_hash
[hash
]) {
574 dreq
= list_entry(lp
, struct cache_deferred_req
, hash
);
576 if (dreq
->item
== item
) {
577 list_del_init(&dreq
->hash
);
578 list_move(&dreq
->recent
, &pending
);
583 spin_unlock(&cache_defer_lock
);
585 while (!list_empty(&pending
)) {
586 dreq
= list_entry(pending
.next
, struct cache_deferred_req
, recent
);
587 list_del_init(&dreq
->recent
);
588 dreq
->revisit(dreq
, 0);
592 void cache_clean_deferred(void *owner
)
594 struct cache_deferred_req
*dreq
, *tmp
;
595 struct list_head pending
;
598 INIT_LIST_HEAD(&pending
);
599 spin_lock(&cache_defer_lock
);
601 list_for_each_entry_safe(dreq
, tmp
, &cache_defer_list
, recent
) {
602 if (dreq
->owner
== owner
) {
603 list_del_init(&dreq
->hash
);
604 list_move(&dreq
->recent
, &pending
);
608 spin_unlock(&cache_defer_lock
);
610 while (!list_empty(&pending
)) {
611 dreq
= list_entry(pending
.next
, struct cache_deferred_req
, recent
);
612 list_del_init(&dreq
->recent
);
613 dreq
->revisit(dreq
, 1);
618 * communicate with user-space
620 * We have a magic /proc file - /proc/sunrpc/<cachename>/channel.
621 * On read, you get a full request, or block.
622 * On write, an update request is processed.
623 * Poll works if anything to read, and always allows write.
625 * Implemented by linked list of requests. Each open file has
626 * a ->private that also exists in this list. New requests are added
627 * to the end and may wakeup and preceding readers.
628 * New readers are added to the head. If, on read, an item is found with
629 * CACHE_UPCALLING clear, we free it from the list.
633 static DEFINE_SPINLOCK(queue_lock
);
634 static DEFINE_MUTEX(queue_io_mutex
);
637 struct list_head list
;
638 int reader
; /* if 0, then request */
640 struct cache_request
{
641 struct cache_queue q
;
642 struct cache_head
*item
;
647 struct cache_reader
{
648 struct cache_queue q
;
649 int offset
; /* if non-0, we have a refcnt on next request */
652 static ssize_t
cache_read(struct file
*filp
, char __user
*buf
, size_t count
,
653 loff_t
*ppos
, struct cache_detail
*cd
)
655 struct cache_reader
*rp
= filp
->private_data
;
656 struct cache_request
*rq
;
657 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
663 mutex_lock(&inode
->i_mutex
); /* protect against multiple concurrent
664 * readers on this file */
666 spin_lock(&queue_lock
);
667 /* need to find next request */
668 while (rp
->q
.list
.next
!= &cd
->queue
&&
669 list_entry(rp
->q
.list
.next
, struct cache_queue
, list
)
671 struct list_head
*next
= rp
->q
.list
.next
;
672 list_move(&rp
->q
.list
, next
);
674 if (rp
->q
.list
.next
== &cd
->queue
) {
675 spin_unlock(&queue_lock
);
676 mutex_unlock(&inode
->i_mutex
);
680 rq
= container_of(rp
->q
.list
.next
, struct cache_request
, q
.list
);
681 BUG_ON(rq
->q
.reader
);
684 spin_unlock(&queue_lock
);
686 if (rp
->offset
== 0 && !test_bit(CACHE_PENDING
, &rq
->item
->flags
)) {
688 spin_lock(&queue_lock
);
689 list_move(&rp
->q
.list
, &rq
->q
.list
);
690 spin_unlock(&queue_lock
);
692 if (rp
->offset
+ count
> rq
->len
)
693 count
= rq
->len
- rp
->offset
;
695 if (copy_to_user(buf
, rq
->buf
+ rp
->offset
, count
))
698 if (rp
->offset
>= rq
->len
) {
700 spin_lock(&queue_lock
);
701 list_move(&rp
->q
.list
, &rq
->q
.list
);
702 spin_unlock(&queue_lock
);
707 if (rp
->offset
== 0) {
708 /* need to release rq */
709 spin_lock(&queue_lock
);
711 if (rq
->readers
== 0 &&
712 !test_bit(CACHE_PENDING
, &rq
->item
->flags
)) {
713 list_del(&rq
->q
.list
);
714 spin_unlock(&queue_lock
);
715 cache_put(rq
->item
, cd
);
719 spin_unlock(&queue_lock
);
723 mutex_unlock(&inode
->i_mutex
);
724 return err
? err
: count
;
727 static ssize_t
cache_do_downcall(char *kaddr
, const char __user
*buf
,
728 size_t count
, struct cache_detail
*cd
)
732 if (copy_from_user(kaddr
, buf
, count
))
735 ret
= cd
->cache_parse(cd
, kaddr
, count
);
741 static ssize_t
cache_slow_downcall(const char __user
*buf
,
742 size_t count
, struct cache_detail
*cd
)
744 static char write_buf
[8192]; /* protected by queue_io_mutex */
745 ssize_t ret
= -EINVAL
;
747 if (count
>= sizeof(write_buf
))
749 mutex_lock(&queue_io_mutex
);
750 ret
= cache_do_downcall(write_buf
, buf
, count
, cd
);
751 mutex_unlock(&queue_io_mutex
);
756 static ssize_t
cache_downcall(struct address_space
*mapping
,
757 const char __user
*buf
,
758 size_t count
, struct cache_detail
*cd
)
762 ssize_t ret
= -ENOMEM
;
764 if (count
>= PAGE_CACHE_SIZE
)
767 page
= find_or_create_page(mapping
, 0, GFP_KERNEL
);
772 ret
= cache_do_downcall(kaddr
, buf
, count
, cd
);
775 page_cache_release(page
);
778 return cache_slow_downcall(buf
, count
, cd
);
781 static ssize_t
cache_write(struct file
*filp
, const char __user
*buf
,
782 size_t count
, loff_t
*ppos
,
783 struct cache_detail
*cd
)
785 struct address_space
*mapping
= filp
->f_mapping
;
786 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
787 ssize_t ret
= -EINVAL
;
789 if (!cd
->cache_parse
)
792 mutex_lock(&inode
->i_mutex
);
793 ret
= cache_downcall(mapping
, buf
, count
, cd
);
794 mutex_unlock(&inode
->i_mutex
);
799 static DECLARE_WAIT_QUEUE_HEAD(queue_wait
);
801 static unsigned int cache_poll(struct file
*filp
, poll_table
*wait
,
802 struct cache_detail
*cd
)
805 struct cache_reader
*rp
= filp
->private_data
;
806 struct cache_queue
*cq
;
808 poll_wait(filp
, &queue_wait
, wait
);
810 /* alway allow write */
811 mask
= POLL_OUT
| POLLWRNORM
;
816 spin_lock(&queue_lock
);
818 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
819 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
821 mask
|= POLLIN
| POLLRDNORM
;
824 spin_unlock(&queue_lock
);
828 static int cache_ioctl(struct inode
*ino
, struct file
*filp
,
829 unsigned int cmd
, unsigned long arg
,
830 struct cache_detail
*cd
)
833 struct cache_reader
*rp
= filp
->private_data
;
834 struct cache_queue
*cq
;
836 if (cmd
!= FIONREAD
|| !rp
)
839 spin_lock(&queue_lock
);
841 /* only find the length remaining in current request,
842 * or the length of the next request
844 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
845 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
847 struct cache_request
*cr
=
848 container_of(cq
, struct cache_request
, q
);
849 len
= cr
->len
- rp
->offset
;
852 spin_unlock(&queue_lock
);
854 return put_user(len
, (int __user
*)arg
);
857 static int cache_open(struct inode
*inode
, struct file
*filp
,
858 struct cache_detail
*cd
)
860 struct cache_reader
*rp
= NULL
;
862 if (!cd
|| !try_module_get(cd
->owner
))
864 nonseekable_open(inode
, filp
);
865 if (filp
->f_mode
& FMODE_READ
) {
866 rp
= kmalloc(sizeof(*rp
), GFP_KERNEL
);
871 atomic_inc(&cd
->readers
);
872 spin_lock(&queue_lock
);
873 list_add(&rp
->q
.list
, &cd
->queue
);
874 spin_unlock(&queue_lock
);
876 filp
->private_data
= rp
;
880 static int cache_release(struct inode
*inode
, struct file
*filp
,
881 struct cache_detail
*cd
)
883 struct cache_reader
*rp
= filp
->private_data
;
886 spin_lock(&queue_lock
);
888 struct cache_queue
*cq
;
889 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
890 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
892 container_of(cq
, struct cache_request
, q
)
898 list_del(&rp
->q
.list
);
899 spin_unlock(&queue_lock
);
901 filp
->private_data
= NULL
;
904 cd
->last_close
= get_seconds();
905 atomic_dec(&cd
->readers
);
907 module_put(cd
->owner
);
913 static void cache_dequeue(struct cache_detail
*detail
, struct cache_head
*ch
)
915 struct cache_queue
*cq
;
916 spin_lock(&queue_lock
);
917 list_for_each_entry(cq
, &detail
->queue
, list
)
919 struct cache_request
*cr
= container_of(cq
, struct cache_request
, q
);
922 if (cr
->readers
!= 0)
924 list_del(&cr
->q
.list
);
925 spin_unlock(&queue_lock
);
926 cache_put(cr
->item
, detail
);
931 spin_unlock(&queue_lock
);
935 * Support routines for text-based upcalls.
936 * Fields are separated by spaces.
937 * Fields are either mangled to quote space tab newline slosh with slosh
938 * or a hexified with a leading \x
939 * Record is terminated with newline.
943 void qword_add(char **bpp
, int *lp
, char *str
)
951 while ((c
=*str
++) && len
)
959 *bp
++ = '0' + ((c
& 0300)>>6);
960 *bp
++ = '0' + ((c
& 0070)>>3);
961 *bp
++ = '0' + ((c
& 0007)>>0);
969 if (c
|| len
<1) len
= -1;
977 EXPORT_SYMBOL_GPL(qword_add
);
979 void qword_addhex(char **bpp
, int *lp
, char *buf
, int blen
)
990 while (blen
&& len
>= 2) {
991 unsigned char c
= *buf
++;
992 *bp
++ = '0' + ((c
&0xf0)>>4) + (c
>=0xa0)*('a'-'9'-1);
993 *bp
++ = '0' + (c
&0x0f) + ((c
&0x0f)>=0x0a)*('a'-'9'-1);
998 if (blen
|| len
<1) len
= -1;
1006 EXPORT_SYMBOL_GPL(qword_addhex
);
1008 static void warn_no_listener(struct cache_detail
*detail
)
1010 if (detail
->last_warn
!= detail
->last_close
) {
1011 detail
->last_warn
= detail
->last_close
;
1012 if (detail
->warn_no_listener
)
1013 detail
->warn_no_listener(detail
, detail
->last_close
!= 0);
1018 * register an upcall request to user-space and queue it up for read() by the
1021 * Each request is at most one page long.
1023 int sunrpc_cache_pipe_upcall(struct cache_detail
*detail
, struct cache_head
*h
,
1024 void (*cache_request
)(struct cache_detail
*,
1025 struct cache_head
*,
1031 struct cache_request
*crq
;
1035 if (atomic_read(&detail
->readers
) == 0 &&
1036 detail
->last_close
< get_seconds() - 30) {
1037 warn_no_listener(detail
);
1041 buf
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
1045 crq
= kmalloc(sizeof (*crq
), GFP_KERNEL
);
1051 bp
= buf
; len
= PAGE_SIZE
;
1053 cache_request(detail
, h
, &bp
, &len
);
1061 crq
->item
= cache_get(h
);
1063 crq
->len
= PAGE_SIZE
- len
;
1065 spin_lock(&queue_lock
);
1066 list_add_tail(&crq
->q
.list
, &detail
->queue
);
1067 spin_unlock(&queue_lock
);
1068 wake_up(&queue_wait
);
1071 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall
);
1074 * parse a message from user-space and pass it
1075 * to an appropriate cache
1076 * Messages are, like requests, separated into fields by
1077 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1080 * reply cachename expiry key ... content....
1082 * key and content are both parsed by cache
1085 #define isodigit(c) (isdigit(c) && c <= '7')
1086 int qword_get(char **bpp
, char *dest
, int bufsize
)
1088 /* return bytes copied, or -1 on error */
1092 while (*bp
== ' ') bp
++;
1094 if (bp
[0] == '\\' && bp
[1] == 'x') {
1097 while (isxdigit(bp
[0]) && isxdigit(bp
[1]) && len
< bufsize
) {
1098 int byte
= isdigit(*bp
) ? *bp
-'0' : toupper(*bp
)-'A'+10;
1101 byte
|= isdigit(*bp
) ? *bp
-'0' : toupper(*bp
)-'A'+10;
1107 /* text with \nnn octal quoting */
1108 while (*bp
!= ' ' && *bp
!= '\n' && *bp
&& len
< bufsize
-1) {
1110 isodigit(bp
[1]) && (bp
[1] <= '3') &&
1113 int byte
= (*++bp
-'0');
1115 byte
= (byte
<< 3) | (*bp
++ - '0');
1116 byte
= (byte
<< 3) | (*bp
++ - '0');
1126 if (*bp
!= ' ' && *bp
!= '\n' && *bp
!= '\0')
1128 while (*bp
== ' ') bp
++;
1133 EXPORT_SYMBOL_GPL(qword_get
);
1137 * support /proc/sunrpc/cache/$CACHENAME/content
1139 * We call ->cache_show passing NULL for the item to
1140 * get a header, then pass each real item in the cache
1144 struct cache_detail
*cd
;
1147 static void *c_start(struct seq_file
*m
, loff_t
*pos
)
1148 __acquires(cd
->hash_lock
)
1151 unsigned hash
, entry
;
1152 struct cache_head
*ch
;
1153 struct cache_detail
*cd
= ((struct handle
*)m
->private)->cd
;
1156 read_lock(&cd
->hash_lock
);
1158 return SEQ_START_TOKEN
;
1160 entry
= n
& ((1LL<<32) - 1);
1162 for (ch
=cd
->hash_table
[hash
]; ch
; ch
=ch
->next
)
1165 n
&= ~((1LL<<32) - 1);
1169 } while(hash
< cd
->hash_size
&&
1170 cd
->hash_table
[hash
]==NULL
);
1171 if (hash
>= cd
->hash_size
)
1174 return cd
->hash_table
[hash
];
1177 static void *c_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
1179 struct cache_head
*ch
= p
;
1180 int hash
= (*pos
>> 32);
1181 struct cache_detail
*cd
= ((struct handle
*)m
->private)->cd
;
1183 if (p
== SEQ_START_TOKEN
)
1185 else if (ch
->next
== NULL
) {
1192 *pos
&= ~((1LL<<32) - 1);
1193 while (hash
< cd
->hash_size
&&
1194 cd
->hash_table
[hash
] == NULL
) {
1198 if (hash
>= cd
->hash_size
)
1201 return cd
->hash_table
[hash
];
1204 static void c_stop(struct seq_file
*m
, void *p
)
1205 __releases(cd
->hash_lock
)
1207 struct cache_detail
*cd
= ((struct handle
*)m
->private)->cd
;
1208 read_unlock(&cd
->hash_lock
);
1211 static int c_show(struct seq_file
*m
, void *p
)
1213 struct cache_head
*cp
= p
;
1214 struct cache_detail
*cd
= ((struct handle
*)m
->private)->cd
;
1216 if (p
== SEQ_START_TOKEN
)
1217 return cd
->cache_show(m
, cd
, NULL
);
1220 seq_printf(m
, "# expiry=%ld refcnt=%d flags=%lx\n",
1221 cp
->expiry_time
, atomic_read(&cp
->ref
.refcount
), cp
->flags
);
1223 if (cache_check(cd
, cp
, NULL
))
1224 /* cache_check does a cache_put on failure */
1225 seq_printf(m
, "# ");
1229 return cd
->cache_show(m
, cd
, cp
);
1232 static const struct seq_operations cache_content_op
= {
1239 static int content_open(struct inode
*inode
, struct file
*file
,
1240 struct cache_detail
*cd
)
1244 if (!cd
|| !try_module_get(cd
->owner
))
1246 han
= __seq_open_private(file
, &cache_content_op
, sizeof(*han
));
1248 module_put(cd
->owner
);
1256 static int content_release(struct inode
*inode
, struct file
*file
,
1257 struct cache_detail
*cd
)
1259 int ret
= seq_release_private(inode
, file
);
1260 module_put(cd
->owner
);
1264 static int open_flush(struct inode
*inode
, struct file
*file
,
1265 struct cache_detail
*cd
)
1267 if (!cd
|| !try_module_get(cd
->owner
))
1269 return nonseekable_open(inode
, file
);
1272 static int release_flush(struct inode
*inode
, struct file
*file
,
1273 struct cache_detail
*cd
)
1275 module_put(cd
->owner
);
1279 static ssize_t
read_flush(struct file
*file
, char __user
*buf
,
1280 size_t count
, loff_t
*ppos
,
1281 struct cache_detail
*cd
)
1284 unsigned long p
= *ppos
;
1287 sprintf(tbuf
, "%lu\n", cd
->flush_time
);
1294 if (copy_to_user(buf
, (void*)(tbuf
+p
), len
))
1300 static ssize_t
write_flush(struct file
*file
, const char __user
*buf
,
1301 size_t count
, loff_t
*ppos
,
1302 struct cache_detail
*cd
)
1307 if (*ppos
|| count
> sizeof(tbuf
)-1)
1309 if (copy_from_user(tbuf
, buf
, count
))
1312 flushtime
= simple_strtoul(tbuf
, &ep
, 0);
1313 if (*ep
&& *ep
!= '\n')
1316 cd
->flush_time
= flushtime
;
1317 cd
->nextcheck
= get_seconds();
1324 static ssize_t
cache_read_procfs(struct file
*filp
, char __user
*buf
,
1325 size_t count
, loff_t
*ppos
)
1327 struct cache_detail
*cd
= PDE(filp
->f_path
.dentry
->d_inode
)->data
;
1329 return cache_read(filp
, buf
, count
, ppos
, cd
);
1332 static ssize_t
cache_write_procfs(struct file
*filp
, const char __user
*buf
,
1333 size_t count
, loff_t
*ppos
)
1335 struct cache_detail
*cd
= PDE(filp
->f_path
.dentry
->d_inode
)->data
;
1337 return cache_write(filp
, buf
, count
, ppos
, cd
);
1340 static unsigned int cache_poll_procfs(struct file
*filp
, poll_table
*wait
)
1342 struct cache_detail
*cd
= PDE(filp
->f_path
.dentry
->d_inode
)->data
;
1344 return cache_poll(filp
, wait
, cd
);
1347 static long cache_ioctl_procfs(struct file
*filp
,
1348 unsigned int cmd
, unsigned long arg
)
1350 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
1351 struct cache_detail
*cd
= PDE(inode
)->data
;
1353 return cache_ioctl(inode
, filp
, cmd
, arg
, cd
);
1356 static int cache_open_procfs(struct inode
*inode
, struct file
*filp
)
1358 struct cache_detail
*cd
= PDE(inode
)->data
;
1360 return cache_open(inode
, filp
, cd
);
1363 static int cache_release_procfs(struct inode
*inode
, struct file
*filp
)
1365 struct cache_detail
*cd
= PDE(inode
)->data
;
1367 return cache_release(inode
, filp
, cd
);
1370 static const struct file_operations cache_file_operations_procfs
= {
1371 .owner
= THIS_MODULE
,
1372 .llseek
= no_llseek
,
1373 .read
= cache_read_procfs
,
1374 .write
= cache_write_procfs
,
1375 .poll
= cache_poll_procfs
,
1376 .unlocked_ioctl
= cache_ioctl_procfs
, /* for FIONREAD */
1377 .open
= cache_open_procfs
,
1378 .release
= cache_release_procfs
,
1381 static int content_open_procfs(struct inode
*inode
, struct file
*filp
)
1383 struct cache_detail
*cd
= PDE(inode
)->data
;
1385 return content_open(inode
, filp
, cd
);
1388 static int content_release_procfs(struct inode
*inode
, struct file
*filp
)
1390 struct cache_detail
*cd
= PDE(inode
)->data
;
1392 return content_release(inode
, filp
, cd
);
1395 static const struct file_operations content_file_operations_procfs
= {
1396 .open
= content_open_procfs
,
1398 .llseek
= seq_lseek
,
1399 .release
= content_release_procfs
,
1402 static int open_flush_procfs(struct inode
*inode
, struct file
*filp
)
1404 struct cache_detail
*cd
= PDE(inode
)->data
;
1406 return open_flush(inode
, filp
, cd
);
1409 static int release_flush_procfs(struct inode
*inode
, struct file
*filp
)
1411 struct cache_detail
*cd
= PDE(inode
)->data
;
1413 return release_flush(inode
, filp
, cd
);
1416 static ssize_t
read_flush_procfs(struct file
*filp
, char __user
*buf
,
1417 size_t count
, loff_t
*ppos
)
1419 struct cache_detail
*cd
= PDE(filp
->f_path
.dentry
->d_inode
)->data
;
1421 return read_flush(filp
, buf
, count
, ppos
, cd
);
1424 static ssize_t
write_flush_procfs(struct file
*filp
,
1425 const char __user
*buf
,
1426 size_t count
, loff_t
*ppos
)
1428 struct cache_detail
*cd
= PDE(filp
->f_path
.dentry
->d_inode
)->data
;
1430 return write_flush(filp
, buf
, count
, ppos
, cd
);
1433 static const struct file_operations cache_flush_operations_procfs
= {
1434 .open
= open_flush_procfs
,
1435 .read
= read_flush_procfs
,
1436 .write
= write_flush_procfs
,
1437 .release
= release_flush_procfs
,
1438 .llseek
= no_llseek
,
1441 static void remove_cache_proc_entries(struct cache_detail
*cd
)
1443 if (cd
->u
.procfs
.proc_ent
== NULL
)
1445 if (cd
->u
.procfs
.flush_ent
)
1446 remove_proc_entry("flush", cd
->u
.procfs
.proc_ent
);
1447 if (cd
->u
.procfs
.channel_ent
)
1448 remove_proc_entry("channel", cd
->u
.procfs
.proc_ent
);
1449 if (cd
->u
.procfs
.content_ent
)
1450 remove_proc_entry("content", cd
->u
.procfs
.proc_ent
);
1451 cd
->u
.procfs
.proc_ent
= NULL
;
1452 remove_proc_entry(cd
->name
, proc_net_rpc
);
1455 #ifdef CONFIG_PROC_FS
1456 static int create_cache_proc_entries(struct cache_detail
*cd
)
1458 struct proc_dir_entry
*p
;
1460 cd
->u
.procfs
.proc_ent
= proc_mkdir(cd
->name
, proc_net_rpc
);
1461 if (cd
->u
.procfs
.proc_ent
== NULL
)
1463 cd
->u
.procfs
.channel_ent
= NULL
;
1464 cd
->u
.procfs
.content_ent
= NULL
;
1466 p
= proc_create_data("flush", S_IFREG
|S_IRUSR
|S_IWUSR
,
1467 cd
->u
.procfs
.proc_ent
,
1468 &cache_flush_operations_procfs
, cd
);
1469 cd
->u
.procfs
.flush_ent
= p
;
1473 if (cd
->cache_upcall
|| cd
->cache_parse
) {
1474 p
= proc_create_data("channel", S_IFREG
|S_IRUSR
|S_IWUSR
,
1475 cd
->u
.procfs
.proc_ent
,
1476 &cache_file_operations_procfs
, cd
);
1477 cd
->u
.procfs
.channel_ent
= p
;
1481 if (cd
->cache_show
) {
1482 p
= proc_create_data("content", S_IFREG
|S_IRUSR
|S_IWUSR
,
1483 cd
->u
.procfs
.proc_ent
,
1484 &content_file_operations_procfs
, cd
);
1485 cd
->u
.procfs
.content_ent
= p
;
1491 remove_cache_proc_entries(cd
);
1494 #else /* CONFIG_PROC_FS */
1495 static int create_cache_proc_entries(struct cache_detail
*cd
)
1501 void __init
cache_initialize(void)
1503 INIT_DELAYED_WORK_DEFERRABLE(&cache_cleaner
, do_cache_clean
);
1506 int cache_register(struct cache_detail
*cd
)
1510 sunrpc_init_cache_detail(cd
);
1511 ret
= create_cache_proc_entries(cd
);
1513 sunrpc_destroy_cache_detail(cd
);
1516 EXPORT_SYMBOL_GPL(cache_register
);
1518 void cache_unregister(struct cache_detail
*cd
)
1520 remove_cache_proc_entries(cd
);
1521 sunrpc_destroy_cache_detail(cd
);
1523 EXPORT_SYMBOL_GPL(cache_unregister
);
1525 static ssize_t
cache_read_pipefs(struct file
*filp
, char __user
*buf
,
1526 size_t count
, loff_t
*ppos
)
1528 struct cache_detail
*cd
= RPC_I(filp
->f_path
.dentry
->d_inode
)->private;
1530 return cache_read(filp
, buf
, count
, ppos
, cd
);
1533 static ssize_t
cache_write_pipefs(struct file
*filp
, const char __user
*buf
,
1534 size_t count
, loff_t
*ppos
)
1536 struct cache_detail
*cd
= RPC_I(filp
->f_path
.dentry
->d_inode
)->private;
1538 return cache_write(filp
, buf
, count
, ppos
, cd
);
1541 static unsigned int cache_poll_pipefs(struct file
*filp
, poll_table
*wait
)
1543 struct cache_detail
*cd
= RPC_I(filp
->f_path
.dentry
->d_inode
)->private;
1545 return cache_poll(filp
, wait
, cd
);
1548 static long cache_ioctl_pipefs(struct file
*filp
,
1549 unsigned int cmd
, unsigned long arg
)
1551 struct inode
*inode
= filp
->f_dentry
->d_inode
;
1552 struct cache_detail
*cd
= RPC_I(inode
)->private;
1554 return cache_ioctl(inode
, filp
, cmd
, arg
, cd
);
1557 static int cache_open_pipefs(struct inode
*inode
, struct file
*filp
)
1559 struct cache_detail
*cd
= RPC_I(inode
)->private;
1561 return cache_open(inode
, filp
, cd
);
1564 static int cache_release_pipefs(struct inode
*inode
, struct file
*filp
)
1566 struct cache_detail
*cd
= RPC_I(inode
)->private;
1568 return cache_release(inode
, filp
, cd
);
1571 const struct file_operations cache_file_operations_pipefs
= {
1572 .owner
= THIS_MODULE
,
1573 .llseek
= no_llseek
,
1574 .read
= cache_read_pipefs
,
1575 .write
= cache_write_pipefs
,
1576 .poll
= cache_poll_pipefs
,
1577 .unlocked_ioctl
= cache_ioctl_pipefs
, /* for FIONREAD */
1578 .open
= cache_open_pipefs
,
1579 .release
= cache_release_pipefs
,
1582 static int content_open_pipefs(struct inode
*inode
, struct file
*filp
)
1584 struct cache_detail
*cd
= RPC_I(inode
)->private;
1586 return content_open(inode
, filp
, cd
);
1589 static int content_release_pipefs(struct inode
*inode
, struct file
*filp
)
1591 struct cache_detail
*cd
= RPC_I(inode
)->private;
1593 return content_release(inode
, filp
, cd
);
1596 const struct file_operations content_file_operations_pipefs
= {
1597 .open
= content_open_pipefs
,
1599 .llseek
= seq_lseek
,
1600 .release
= content_release_pipefs
,
1603 static int open_flush_pipefs(struct inode
*inode
, struct file
*filp
)
1605 struct cache_detail
*cd
= RPC_I(inode
)->private;
1607 return open_flush(inode
, filp
, cd
);
1610 static int release_flush_pipefs(struct inode
*inode
, struct file
*filp
)
1612 struct cache_detail
*cd
= RPC_I(inode
)->private;
1614 return release_flush(inode
, filp
, cd
);
1617 static ssize_t
read_flush_pipefs(struct file
*filp
, char __user
*buf
,
1618 size_t count
, loff_t
*ppos
)
1620 struct cache_detail
*cd
= RPC_I(filp
->f_path
.dentry
->d_inode
)->private;
1622 return read_flush(filp
, buf
, count
, ppos
, cd
);
1625 static ssize_t
write_flush_pipefs(struct file
*filp
,
1626 const char __user
*buf
,
1627 size_t count
, loff_t
*ppos
)
1629 struct cache_detail
*cd
= RPC_I(filp
->f_path
.dentry
->d_inode
)->private;
1631 return write_flush(filp
, buf
, count
, ppos
, cd
);
1634 const struct file_operations cache_flush_operations_pipefs
= {
1635 .open
= open_flush_pipefs
,
1636 .read
= read_flush_pipefs
,
1637 .write
= write_flush_pipefs
,
1638 .release
= release_flush_pipefs
,
1639 .llseek
= no_llseek
,
1642 int sunrpc_cache_register_pipefs(struct dentry
*parent
,
1643 const char *name
, mode_t umode
,
1644 struct cache_detail
*cd
)
1650 sunrpc_init_cache_detail(cd
);
1652 q
.len
= strlen(name
);
1653 q
.hash
= full_name_hash(q
.name
, q
.len
);
1654 dir
= rpc_create_cache_dir(parent
, &q
, umode
, cd
);
1656 cd
->u
.pipefs
.dir
= dir
;
1658 sunrpc_destroy_cache_detail(cd
);
1663 EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs
);
1665 void sunrpc_cache_unregister_pipefs(struct cache_detail
*cd
)
1667 rpc_remove_cache_dir(cd
->u
.pipefs
.dir
);
1668 cd
->u
.pipefs
.dir
= NULL
;
1669 sunrpc_destroy_cache_detail(cd
);
1671 EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs
);