aboutsummaryrefslogtreecommitdiffstats
path: root/fs/coda/upcall.c
diff options
context:
space:
mode:
authorPedro Cuadra <pjcuadra@gmail.com>2019-07-16 19:29:13 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-16 22:23:23 -0400
commita9fba24c6ac9b66c09dfc2a0e845ecace187e89c (patch)
treec0985325e935868b52697d0e0167e0023a0e5d9b /fs/coda/upcall.c
parent5bb44810f47a00b608ed2cb9f892ae7ce37b02bd (diff)
coda: add hinting support for partial file caching
This adds support for partial file caching in Coda. Every read, write and mmap informs the userspace cache manager about what part of a file is about to be accessed so that the cache manager can ensure the relevant parts are available before the operation is allowed to proceed. When a read or write operation completes, this is also reported to allow the cache manager to track when partially cached content can be released. If the cache manager does not support partial file caching, or when the entire file has been fetched into the local cache, the cache manager may return an EOPNOTSUPP error to indicate that intent upcalls are no longer necessary until the file is closed. [akpm@linux-foundation.org: little whitespace fixup] Link: http://lkml.kernel.org/r/20190618181301.6960-1-jaharkes@cs.cmu.edu Signed-off-by: Pedro Cuadra <pjcuadra@gmail.com> Signed-off-by: Jan Harkes <jaharkes@cs.cmu.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/coda/upcall.c')
-rw-r--r--fs/coda/upcall.c70
1 files changed, 58 insertions, 12 deletions
diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
index 15c0e4fdb0e3..eb3b1898da46 100644
--- a/fs/coda/upcall.c
+++ b/fs/coda/upcall.c
@@ -569,6 +569,47 @@ int venus_statfs(struct dentry *dentry, struct kstatfs *sfs)
569 return error; 569 return error;
570} 570}
571 571
572int venus_access_intent(struct super_block *sb, struct CodaFid *fid,
573 bool *access_intent_supported,
574 size_t count, loff_t ppos, int type)
575{
576 union inputArgs *inp;
577 union outputArgs *outp;
578 int insize, outsize, error;
579 bool finalizer =
580 type == CODA_ACCESS_TYPE_READ_FINISH ||
581 type == CODA_ACCESS_TYPE_WRITE_FINISH;
582
583 if (!*access_intent_supported && !finalizer)
584 return 0;
585
586 insize = SIZE(access_intent);
587 UPARG(CODA_ACCESS_INTENT);
588
589 inp->coda_access_intent.VFid = *fid;
590 inp->coda_access_intent.count = count;
591 inp->coda_access_intent.pos = ppos;
592 inp->coda_access_intent.type = type;
593
594 error = coda_upcall(coda_vcp(sb), insize,
595 finalizer ? NULL : &outsize, inp);
596
597 /*
598 * we have to free the request buffer for synchronous upcalls
599 * or when asynchronous upcalls fail, but not when asynchronous
600 * upcalls succeed
601 */
602 if (!finalizer || error)
603 kvfree(inp);
604
605 /* Chunked access is not supported or an old Coda client */
606 if (error == -EOPNOTSUPP) {
607 *access_intent_supported = false;
608 error = 0;
609 }
610 return error;
611}
612
572/* 613/*
573 * coda_upcall and coda_downcall routines. 614 * coda_upcall and coda_downcall routines.
574 */ 615 */
@@ -598,10 +639,12 @@ static void coda_unblock_signals(sigset_t *old)
598 * has seen them, 639 * has seen them,
599 * - CODA_CLOSE or CODA_RELEASE upcall (to avoid reference count problems) 640 * - CODA_CLOSE or CODA_RELEASE upcall (to avoid reference count problems)
600 * - CODA_STORE (to avoid data loss) 641 * - CODA_STORE (to avoid data loss)
642 * - CODA_ACCESS_INTENT (to avoid reference count problems)
601 */ 643 */
602#define CODA_INTERRUPTIBLE(r) (!coda_hard && \ 644#define CODA_INTERRUPTIBLE(r) (!coda_hard && \
603 (((r)->uc_opcode != CODA_CLOSE && \ 645 (((r)->uc_opcode != CODA_CLOSE && \
604 (r)->uc_opcode != CODA_STORE && \ 646 (r)->uc_opcode != CODA_STORE && \
647 (r)->uc_opcode != CODA_ACCESS_INTENT && \
605 (r)->uc_opcode != CODA_RELEASE) || \ 648 (r)->uc_opcode != CODA_RELEASE) || \
606 (r)->uc_flags & CODA_REQ_READ)) 649 (r)->uc_flags & CODA_REQ_READ))
607 650
@@ -687,21 +730,25 @@ static int coda_upcall(struct venus_comm *vcp,
687 goto exit; 730 goto exit;
688 } 731 }
689 732
733 buffer->ih.unique = ++vcp->vc_seq;
734
690 req->uc_data = (void *)buffer; 735 req->uc_data = (void *)buffer;
691 req->uc_flags = 0; 736 req->uc_flags = outSize ? 0 : CODA_REQ_ASYNC;
692 req->uc_inSize = inSize; 737 req->uc_inSize = inSize;
693 req->uc_outSize = *outSize ? *outSize : inSize; 738 req->uc_outSize = (outSize && *outSize) ? *outSize : inSize;
694 req->uc_opcode = ((union inputArgs *)buffer)->ih.opcode; 739 req->uc_opcode = buffer->ih.opcode;
695 req->uc_unique = ++vcp->vc_seq; 740 req->uc_unique = buffer->ih.unique;
696 init_waitqueue_head(&req->uc_sleep); 741 init_waitqueue_head(&req->uc_sleep);
697 742
698 /* Fill in the common input args. */
699 ((union inputArgs *)buffer)->ih.unique = req->uc_unique;
700
701 /* Append msg to pending queue and poke Venus. */ 743 /* Append msg to pending queue and poke Venus. */
702 list_add_tail(&req->uc_chain, &vcp->vc_pending); 744 list_add_tail(&req->uc_chain, &vcp->vc_pending);
703
704 wake_up_interruptible(&vcp->vc_waitq); 745 wake_up_interruptible(&vcp->vc_waitq);
746
747 if (req->uc_flags & CODA_REQ_ASYNC) {
748 mutex_unlock(&vcp->vc_mutex);
749 return 0;
750 }
751
705 /* We can be interrupted while we wait for Venus to process 752 /* We can be interrupted while we wait for Venus to process
706 * our request. If the interrupt occurs before Venus has read 753 * our request. If the interrupt occurs before Venus has read
707 * the request, we dequeue and return. If it occurs after the 754 * the request, we dequeue and return. If it occurs after the
@@ -743,20 +790,20 @@ static int coda_upcall(struct venus_comm *vcp,
743 sig_req = kmalloc(sizeof(struct upc_req), GFP_KERNEL); 790 sig_req = kmalloc(sizeof(struct upc_req), GFP_KERNEL);
744 if (!sig_req) goto exit; 791 if (!sig_req) goto exit;
745 792
746 sig_req->uc_data = kvzalloc(sizeof(struct coda_in_hdr), GFP_KERNEL); 793 sig_inputArgs = kvzalloc(sizeof(struct coda_in_hdr), GFP_KERNEL);
747 if (!sig_req->uc_data) { 794 if (!sig_inputArgs) {
748 kfree(sig_req); 795 kfree(sig_req);
749 goto exit; 796 goto exit;
750 } 797 }
751 798
752 error = -EINTR; 799 error = -EINTR;
753 sig_inputArgs = (union inputArgs *)sig_req->uc_data;
754 sig_inputArgs->ih.opcode = CODA_SIGNAL; 800 sig_inputArgs->ih.opcode = CODA_SIGNAL;
755 sig_inputArgs->ih.unique = req->uc_unique; 801 sig_inputArgs->ih.unique = req->uc_unique;
756 802
757 sig_req->uc_flags = CODA_REQ_ASYNC; 803 sig_req->uc_flags = CODA_REQ_ASYNC;
758 sig_req->uc_opcode = sig_inputArgs->ih.opcode; 804 sig_req->uc_opcode = sig_inputArgs->ih.opcode;
759 sig_req->uc_unique = sig_inputArgs->ih.unique; 805 sig_req->uc_unique = sig_inputArgs->ih.unique;
806 sig_req->uc_data = (void *)sig_inputArgs;
760 sig_req->uc_inSize = sizeof(struct coda_in_hdr); 807 sig_req->uc_inSize = sizeof(struct coda_in_hdr);
761 sig_req->uc_outSize = sizeof(struct coda_in_hdr); 808 sig_req->uc_outSize = sizeof(struct coda_in_hdr);
762 809
@@ -911,4 +958,3 @@ unlock_out:
911 iput(inode); 958 iput(inode);
912 return 0; 959 return 0;
913} 960}
914