diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-08-31 15:09:19 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-12-06 10:46:32 -0500 |
commit | 1e78957e0a8f882df6a3660b62f9aae441f54891 (patch) | |
tree | 6746e88e851053867bf49f68403c7b4da991ca85 /net/sunrpc | |
parent | eb5f8545ffff98a11c6656d4d2106341ab69c57d (diff) |
SUNRPC: Clean up argument types in xdr.c
Converts various integer buffer offsets and sizes to unsigned integer.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/xdr.c | 86 |
1 files changed, 43 insertions, 43 deletions
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 9022eb8b37ed..b474edbab3f1 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c | |||
@@ -640,41 +640,30 @@ xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf) | |||
640 | buf->buflen = buf->len = iov->iov_len; | 640 | buf->buflen = buf->len = iov->iov_len; |
641 | } | 641 | } |
642 | 642 | ||
643 | /* Sets subiov to the intersection of iov with the buffer of length len | ||
644 | * starting base bytes after iov. Indicates empty intersection by setting | ||
645 | * length of subiov to zero. Decrements len by length of subiov, sets base | ||
646 | * to zero (or decrements it by length of iov if subiov is empty). */ | ||
647 | static void | ||
648 | iov_subsegment(struct kvec *iov, struct kvec *subiov, int *base, int *len) | ||
649 | { | ||
650 | if (*base > iov->iov_len) { | ||
651 | subiov->iov_base = NULL; | ||
652 | subiov->iov_len = 0; | ||
653 | *base -= iov->iov_len; | ||
654 | } else { | ||
655 | subiov->iov_base = iov->iov_base + *base; | ||
656 | subiov->iov_len = min(*len, (int)iov->iov_len - *base); | ||
657 | *base = 0; | ||
658 | } | ||
659 | *len -= subiov->iov_len; | ||
660 | } | ||
661 | |||
662 | /* Sets subbuf to the portion of buf of length len beginning base bytes | 643 | /* Sets subbuf to the portion of buf of length len beginning base bytes |
663 | * from the start of buf. Returns -1 if base of length are out of bounds. */ | 644 | * from the start of buf. Returns -1 if base of length are out of bounds. */ |
664 | int | 645 | int |
665 | xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, | 646 | xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, |
666 | int base, int len) | 647 | unsigned int base, unsigned int len) |
667 | { | 648 | { |
668 | int i; | ||
669 | |||
670 | subbuf->buflen = subbuf->len = len; | 649 | subbuf->buflen = subbuf->len = len; |
671 | iov_subsegment(buf->head, subbuf->head, &base, &len); | 650 | if (base < buf->head[0].iov_len) { |
651 | subbuf->head[0].iov_base = buf->head[0].iov_base + base; | ||
652 | subbuf->head[0].iov_len = min_t(unsigned int, len, | ||
653 | buf->head[0].iov_len - base); | ||
654 | len -= subbuf->head[0].iov_len; | ||
655 | base = 0; | ||
656 | } else { | ||
657 | subbuf->head[0].iov_base = NULL; | ||
658 | subbuf->head[0].iov_len = 0; | ||
659 | base -= buf->head[0].iov_len; | ||
660 | } | ||
672 | 661 | ||
673 | if (base < buf->page_len) { | 662 | if (base < buf->page_len) { |
674 | i = (base + buf->page_base) >> PAGE_CACHE_SHIFT; | 663 | subbuf->page_len = min(buf->page_len - base, len); |
675 | subbuf->pages = &buf->pages[i]; | 664 | base += buf->page_base; |
676 | subbuf->page_base = (base + buf->page_base) & ~PAGE_CACHE_MASK; | 665 | subbuf->page_base = base & ~PAGE_CACHE_MASK; |
677 | subbuf->page_len = min((int)buf->page_len - base, len); | 666 | subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT]; |
678 | len -= subbuf->page_len; | 667 | len -= subbuf->page_len; |
679 | base = 0; | 668 | base = 0; |
680 | } else { | 669 | } else { |
@@ -682,7 +671,18 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, | |||
682 | subbuf->page_len = 0; | 671 | subbuf->page_len = 0; |
683 | } | 672 | } |
684 | 673 | ||
685 | iov_subsegment(buf->tail, subbuf->tail, &base, &len); | 674 | if (base < buf->tail[0].iov_len) { |
675 | subbuf->tail[0].iov_base = buf->tail[0].iov_base + base; | ||
676 | subbuf->tail[0].iov_len = min_t(unsigned int, len, | ||
677 | buf->tail[0].iov_len - base); | ||
678 | len -= subbuf->tail[0].iov_len; | ||
679 | base = 0; | ||
680 | } else { | ||
681 | subbuf->tail[0].iov_base = NULL; | ||
682 | subbuf->tail[0].iov_len = 0; | ||
683 | base -= buf->tail[0].iov_len; | ||
684 | } | ||
685 | |||
686 | if (base || len) | 686 | if (base || len) |
687 | return -1; | 687 | return -1; |
688 | return 0; | 688 | return 0; |
@@ -690,25 +690,25 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, | |||
690 | 690 | ||
691 | /* obj is assumed to point to allocated memory of size at least len: */ | 691 | /* obj is assumed to point to allocated memory of size at least len: */ |
692 | int | 692 | int |
693 | read_bytes_from_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len) | 693 | read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len) |
694 | { | 694 | { |
695 | struct xdr_buf subbuf; | 695 | struct xdr_buf subbuf; |
696 | int this_len; | 696 | unsigned int this_len; |
697 | int status; | 697 | int status; |
698 | 698 | ||
699 | status = xdr_buf_subsegment(buf, &subbuf, base, len); | 699 | status = xdr_buf_subsegment(buf, &subbuf, base, len); |
700 | if (status) | 700 | if (status) |
701 | goto out; | 701 | goto out; |
702 | this_len = min(len, (int)subbuf.head[0].iov_len); | 702 | this_len = min_t(unsigned int, len, subbuf.head[0].iov_len); |
703 | memcpy(obj, subbuf.head[0].iov_base, this_len); | 703 | memcpy(obj, subbuf.head[0].iov_base, this_len); |
704 | len -= this_len; | 704 | len -= this_len; |
705 | obj += this_len; | 705 | obj += this_len; |
706 | this_len = min(len, (int)subbuf.page_len); | 706 | this_len = min_t(unsigned int, len, subbuf.page_len); |
707 | if (this_len) | 707 | if (this_len) |
708 | _copy_from_pages(obj, subbuf.pages, subbuf.page_base, this_len); | 708 | _copy_from_pages(obj, subbuf.pages, subbuf.page_base, this_len); |
709 | len -= this_len; | 709 | len -= this_len; |
710 | obj += this_len; | 710 | obj += this_len; |
711 | this_len = min(len, (int)subbuf.tail[0].iov_len); | 711 | this_len = min_t(unsigned int, len, subbuf.tail[0].iov_len); |
712 | memcpy(obj, subbuf.tail[0].iov_base, this_len); | 712 | memcpy(obj, subbuf.tail[0].iov_base, this_len); |
713 | out: | 713 | out: |
714 | return status; | 714 | return status; |
@@ -716,32 +716,32 @@ out: | |||
716 | 716 | ||
717 | /* obj is assumed to point to allocated memory of size at least len: */ | 717 | /* obj is assumed to point to allocated memory of size at least len: */ |
718 | int | 718 | int |
719 | write_bytes_to_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len) | 719 | write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len) |
720 | { | 720 | { |
721 | struct xdr_buf subbuf; | 721 | struct xdr_buf subbuf; |
722 | int this_len; | 722 | unsigned int this_len; |
723 | int status; | 723 | int status; |
724 | 724 | ||
725 | status = xdr_buf_subsegment(buf, &subbuf, base, len); | 725 | status = xdr_buf_subsegment(buf, &subbuf, base, len); |
726 | if (status) | 726 | if (status) |
727 | goto out; | 727 | goto out; |
728 | this_len = min(len, (int)subbuf.head[0].iov_len); | 728 | this_len = min_t(unsigned int, len, subbuf.head[0].iov_len); |
729 | memcpy(subbuf.head[0].iov_base, obj, this_len); | 729 | memcpy(subbuf.head[0].iov_base, obj, this_len); |
730 | len -= this_len; | 730 | len -= this_len; |
731 | obj += this_len; | 731 | obj += this_len; |
732 | this_len = min(len, (int)subbuf.page_len); | 732 | this_len = min_t(unsigned int, len, subbuf.page_len); |
733 | if (this_len) | 733 | if (this_len) |
734 | _copy_to_pages(subbuf.pages, subbuf.page_base, obj, this_len); | 734 | _copy_to_pages(subbuf.pages, subbuf.page_base, obj, this_len); |
735 | len -= this_len; | 735 | len -= this_len; |
736 | obj += this_len; | 736 | obj += this_len; |
737 | this_len = min(len, (int)subbuf.tail[0].iov_len); | 737 | this_len = min_t(unsigned int, len, subbuf.tail[0].iov_len); |
738 | memcpy(subbuf.tail[0].iov_base, obj, this_len); | 738 | memcpy(subbuf.tail[0].iov_base, obj, this_len); |
739 | out: | 739 | out: |
740 | return status; | 740 | return status; |
741 | } | 741 | } |
742 | 742 | ||
743 | int | 743 | int |
744 | xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj) | 744 | xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj) |
745 | { | 745 | { |
746 | __be32 raw; | 746 | __be32 raw; |
747 | int status; | 747 | int status; |
@@ -754,7 +754,7 @@ xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj) | |||
754 | } | 754 | } |
755 | 755 | ||
756 | int | 756 | int |
757 | xdr_encode_word(struct xdr_buf *buf, int base, u32 obj) | 757 | xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj) |
758 | { | 758 | { |
759 | __be32 raw = htonl(obj); | 759 | __be32 raw = htonl(obj); |
760 | 760 | ||
@@ -766,10 +766,10 @@ xdr_encode_word(struct xdr_buf *buf, int base, u32 obj) | |||
766 | * try to find space for it at the end of the tail, copy it there, and | 766 | * try to find space for it at the end of the tail, copy it there, and |
767 | * set obj to point to it. */ | 767 | * set obj to point to it. */ |
768 | int | 768 | int |
769 | xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, int offset) | 769 | xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset) |
770 | { | 770 | { |
771 | u32 tail_offset = buf->head[0].iov_len + buf->page_len; | 771 | unsigned int tail_offset = buf->head[0].iov_len + buf->page_len; |
772 | u32 obj_end_offset; | 772 | unsigned int obj_end_offset; |
773 | 773 | ||
774 | if (xdr_decode_word(buf, offset, &obj->len)) | 774 | if (xdr_decode_word(buf, offset, &obj->len)) |
775 | goto out; | 775 | goto out; |