diff options
author | Roger Pau Monne <roger.pau@citrix.com> | 2013-04-17 14:19:00 -0400 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2013-04-18 09:29:25 -0400 |
commit | 31552ee32df89f97a61766cee51b8dabb1ae3f4f (patch) | |
tree | 9cfc1218886ecc35180d3327a9b9a2fa3e385cf4 /drivers/block/xen-blkback | |
parent | bf0720c48c7cefd127ed2329e6d0e40b39fa4d0e (diff) |
xen-blkback: expand map/unmap functions
Preparatory change for implementing indirect descriptors. Change
xen_blkbk_{map/unmap} in order to be able to map/unmap a random amount
of grants (previously it was limited to
BLKIF_MAX_SEGMENTS_PER_REQUEST). Also, remove the usage of pending_req
in the map/unmap functions, so we can map/unmap grants without needing
to pass a pending_req.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: xen-devel@lists.xen.org
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/block/xen-blkback')
-rw-r--r-- | drivers/block/xen-blkback/blkback.c | 141 |
1 files changed, 86 insertions, 55 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 90a57552f4b7..356722f65f88 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
@@ -163,10 +163,6 @@ static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num) | |||
163 | 163 | ||
164 | #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) | 164 | #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) |
165 | 165 | ||
166 | #define pending_handle(_req, _seg) \ | ||
167 | (_req->grant_handles[_seg]) | ||
168 | |||
169 | |||
170 | static int do_block_io_op(struct xen_blkif *blkif); | 166 | static int do_block_io_op(struct xen_blkif *blkif); |
171 | static int dispatch_rw_block_io(struct xen_blkif *blkif, | 167 | static int dispatch_rw_block_io(struct xen_blkif *blkif, |
172 | struct blkif_request *req, | 168 | struct blkif_request *req, |
@@ -648,50 +644,57 @@ struct seg_buf { | |||
648 | * Unmap the grant references, and also remove the M2P over-rides | 644 | * Unmap the grant references, and also remove the M2P over-rides |
649 | * used in the 'pending_req'. | 645 | * used in the 'pending_req'. |
650 | */ | 646 | */ |
651 | static void xen_blkbk_unmap(struct pending_req *req) | 647 | static void xen_blkbk_unmap(struct xen_blkif *blkif, |
648 | grant_handle_t handles[], | ||
649 | struct page *pages[], | ||
650 | struct persistent_gnt *persistent_gnts[], | ||
651 | int num) | ||
652 | { | 652 | { |
653 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 653 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
654 | struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 654 | struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
655 | unsigned int i, invcount = 0; | 655 | unsigned int i, invcount = 0; |
656 | grant_handle_t handle; | ||
657 | struct xen_blkif *blkif = req->blkif; | ||
658 | int ret; | 656 | int ret; |
659 | 657 | ||
660 | for (i = 0; i < req->nr_pages; i++) { | 658 | for (i = 0; i < num; i++) { |
661 | if (req->persistent_gnts[i] != NULL) { | 659 | if (persistent_gnts[i] != NULL) { |
662 | put_persistent_gnt(blkif, req->persistent_gnts[i]); | 660 | put_persistent_gnt(blkif, persistent_gnts[i]); |
663 | continue; | 661 | continue; |
664 | } | 662 | } |
665 | handle = pending_handle(req, i); | 663 | if (handles[i] == BLKBACK_INVALID_HANDLE) |
666 | pages[invcount] = req->pages[i]; | ||
667 | if (handle == BLKBACK_INVALID_HANDLE) | ||
668 | continue; | 664 | continue; |
669 | gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[invcount]), | 665 | unmap_pages[invcount] = pages[i]; |
670 | GNTMAP_host_map, handle); | 666 | gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]), |
671 | pending_handle(req, i) = BLKBACK_INVALID_HANDLE; | 667 | GNTMAP_host_map, handles[i]); |
672 | invcount++; | 668 | handles[i] = BLKBACK_INVALID_HANDLE; |
669 | if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { | ||
670 | ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, | ||
671 | invcount); | ||
672 | BUG_ON(ret); | ||
673 | put_free_pages(blkif, unmap_pages, invcount); | ||
674 | invcount = 0; | ||
675 | } | ||
676 | } | ||
677 | if (invcount) { | ||
678 | ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount); | ||
679 | BUG_ON(ret); | ||
680 | put_free_pages(blkif, unmap_pages, invcount); | ||
673 | } | 681 | } |
674 | |||
675 | ret = gnttab_unmap_refs(unmap, NULL, pages, invcount); | ||
676 | BUG_ON(ret); | ||
677 | put_free_pages(blkif, pages, invcount); | ||
678 | } | 682 | } |
679 | 683 | ||
680 | static int xen_blkbk_map(struct blkif_request *req, | 684 | static int xen_blkbk_map(struct xen_blkif *blkif, grant_ref_t grefs[], |
681 | struct pending_req *pending_req, | 685 | struct persistent_gnt *persistent_gnts[], |
682 | struct seg_buf seg[], | 686 | grant_handle_t handles[], |
683 | struct page *pages[]) | 687 | struct page *pages[], |
688 | int num, bool ro) | ||
684 | { | 689 | { |
685 | struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 690 | struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
686 | struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 691 | struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
687 | struct persistent_gnt **persistent_gnts = pending_req->persistent_gnts; | ||
688 | struct persistent_gnt *persistent_gnt = NULL; | 692 | struct persistent_gnt *persistent_gnt = NULL; |
689 | struct xen_blkif *blkif = pending_req->blkif; | ||
690 | phys_addr_t addr = 0; | 693 | phys_addr_t addr = 0; |
691 | int i, seg_idx, new_map_idx; | 694 | int i, seg_idx, new_map_idx; |
692 | int nseg = req->u.rw.nr_segments; | ||
693 | int segs_to_map = 0; | 695 | int segs_to_map = 0; |
694 | int ret = 0; | 696 | int ret = 0; |
697 | int last_map = 0, map_until = 0; | ||
695 | int use_persistent_gnts; | 698 | int use_persistent_gnts; |
696 | 699 | ||
697 | use_persistent_gnts = (blkif->vbd.feature_gnt_persistent); | 700 | use_persistent_gnts = (blkif->vbd.feature_gnt_persistent); |
@@ -701,13 +704,14 @@ static int xen_blkbk_map(struct blkif_request *req, | |||
701 | * assign map[..] with the PFN of the page in our domain with the | 704 | * assign map[..] with the PFN of the page in our domain with the |
702 | * corresponding grant reference for each page. | 705 | * corresponding grant reference for each page. |
703 | */ | 706 | */ |
704 | for (i = 0; i < nseg; i++) { | 707 | again: |
708 | for (i = map_until; i < num; i++) { | ||
705 | uint32_t flags; | 709 | uint32_t flags; |
706 | 710 | ||
707 | if (use_persistent_gnts) | 711 | if (use_persistent_gnts) |
708 | persistent_gnt = get_persistent_gnt( | 712 | persistent_gnt = get_persistent_gnt( |
709 | blkif, | 713 | blkif, |
710 | req->u.rw.seg[i].gref); | 714 | grefs[i]); |
711 | 715 | ||
712 | if (persistent_gnt) { | 716 | if (persistent_gnt) { |
713 | /* | 717 | /* |
@@ -723,13 +727,15 @@ static int xen_blkbk_map(struct blkif_request *req, | |||
723 | pages_to_gnt[segs_to_map] = pages[i]; | 727 | pages_to_gnt[segs_to_map] = pages[i]; |
724 | persistent_gnts[i] = NULL; | 728 | persistent_gnts[i] = NULL; |
725 | flags = GNTMAP_host_map; | 729 | flags = GNTMAP_host_map; |
726 | if (!use_persistent_gnts && | 730 | if (!use_persistent_gnts && ro) |
727 | (pending_req->operation != BLKIF_OP_READ)) | ||
728 | flags |= GNTMAP_readonly; | 731 | flags |= GNTMAP_readonly; |
729 | gnttab_set_map_op(&map[segs_to_map++], addr, | 732 | gnttab_set_map_op(&map[segs_to_map++], addr, |
730 | flags, req->u.rw.seg[i].gref, | 733 | flags, grefs[i], |
731 | blkif->domid); | 734 | blkif->domid); |
732 | } | 735 | } |
736 | map_until = i + 1; | ||
737 | if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST) | ||
738 | break; | ||
733 | } | 739 | } |
734 | 740 | ||
735 | if (segs_to_map) { | 741 | if (segs_to_map) { |
@@ -742,26 +748,19 @@ static int xen_blkbk_map(struct blkif_request *req, | |||
742 | * so that when we access vaddr(pending_req,i) it has the contents of | 748 | * so that when we access vaddr(pending_req,i) it has the contents of |
743 | * the page from the other domain. | 749 | * the page from the other domain. |
744 | */ | 750 | */ |
745 | for (seg_idx = 0, new_map_idx = 0; seg_idx < nseg; seg_idx++) { | 751 | for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) { |
746 | if (!persistent_gnts[seg_idx]) { | 752 | if (!persistent_gnts[seg_idx]) { |
747 | /* This is a newly mapped grant */ | 753 | /* This is a newly mapped grant */ |
748 | BUG_ON(new_map_idx >= segs_to_map); | 754 | BUG_ON(new_map_idx >= segs_to_map); |
749 | if (unlikely(map[new_map_idx].status != 0)) { | 755 | if (unlikely(map[new_map_idx].status != 0)) { |
750 | pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); | 756 | pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); |
751 | pending_handle(pending_req, seg_idx) = BLKBACK_INVALID_HANDLE; | 757 | handles[seg_idx] = BLKBACK_INVALID_HANDLE; |
752 | ret |= 1; | 758 | ret |= 1; |
753 | new_map_idx++; | 759 | goto next; |
754 | /* | ||
755 | * No need to set unmap_seg bit, since | ||
756 | * we can not unmap this grant because | ||
757 | * the handle is invalid. | ||
758 | */ | ||
759 | continue; | ||
760 | } | 760 | } |
761 | pending_handle(pending_req, seg_idx) = map[new_map_idx].handle; | 761 | handles[seg_idx] = map[new_map_idx].handle; |
762 | } else { | 762 | } else { |
763 | /* This grant is persistent and already mapped */ | 763 | continue; |
764 | goto next; | ||
765 | } | 764 | } |
766 | if (use_persistent_gnts && | 765 | if (use_persistent_gnts && |
767 | blkif->persistent_gnt_c < xen_blkif_max_pgrants) { | 766 | blkif->persistent_gnt_c < xen_blkif_max_pgrants) { |
@@ -777,7 +776,7 @@ static int xen_blkbk_map(struct blkif_request *req, | |||
777 | * allocate the persistent_gnt struct | 776 | * allocate the persistent_gnt struct |
778 | * map this grant non-persistenly | 777 | * map this grant non-persistenly |
779 | */ | 778 | */ |
780 | goto next_unmap; | 779 | goto next; |
781 | } | 780 | } |
782 | persistent_gnt->gnt = map[new_map_idx].ref; | 781 | persistent_gnt->gnt = map[new_map_idx].ref; |
783 | persistent_gnt->handle = map[new_map_idx].handle; | 782 | persistent_gnt->handle = map[new_map_idx].handle; |
@@ -786,13 +785,12 @@ static int xen_blkbk_map(struct blkif_request *req, | |||
786 | persistent_gnt)) { | 785 | persistent_gnt)) { |
787 | kfree(persistent_gnt); | 786 | kfree(persistent_gnt); |
788 | persistent_gnt = NULL; | 787 | persistent_gnt = NULL; |
789 | goto next_unmap; | 788 | goto next; |
790 | } | 789 | } |
791 | persistent_gnts[seg_idx] = persistent_gnt; | 790 | persistent_gnts[seg_idx] = persistent_gnt; |
792 | pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n", | 791 | pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n", |
793 | persistent_gnt->gnt, blkif->persistent_gnt_c, | 792 | persistent_gnt->gnt, blkif->persistent_gnt_c, |
794 | xen_blkif_max_pgrants); | 793 | xen_blkif_max_pgrants); |
795 | new_map_idx++; | ||
796 | goto next; | 794 | goto next; |
797 | } | 795 | } |
798 | if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) { | 796 | if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) { |
@@ -800,15 +798,18 @@ static int xen_blkbk_map(struct blkif_request *req, | |||
800 | pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n", | 798 | pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n", |
801 | blkif->domid, blkif->vbd.handle); | 799 | blkif->domid, blkif->vbd.handle); |
802 | } | 800 | } |
803 | next_unmap: | ||
804 | /* | 801 | /* |
805 | * We could not map this grant persistently, so use it as | 802 | * We could not map this grant persistently, so use it as |
806 | * a non-persistent grant. | 803 | * a non-persistent grant. |
807 | */ | 804 | */ |
808 | new_map_idx++; | ||
809 | next: | 805 | next: |
810 | seg[seg_idx].offset = (req->u.rw.seg[seg_idx].first_sect << 9); | 806 | new_map_idx++; |
811 | } | 807 | } |
808 | segs_to_map = 0; | ||
809 | last_map = map_until; | ||
810 | if (map_until != num) | ||
811 | goto again; | ||
812 | |||
812 | return ret; | 813 | return ret; |
813 | 814 | ||
814 | out_of_memory: | 815 | out_of_memory: |
@@ -817,6 +818,31 @@ out_of_memory: | |||
817 | return -ENOMEM; | 818 | return -ENOMEM; |
818 | } | 819 | } |
819 | 820 | ||
821 | static int xen_blkbk_map_seg(struct blkif_request *req, | ||
822 | struct pending_req *pending_req, | ||
823 | struct seg_buf seg[], | ||
824 | struct page *pages[]) | ||
825 | { | ||
826 | int i, rc; | ||
827 | grant_ref_t grefs[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | ||
828 | |||
829 | for (i = 0; i < req->u.rw.nr_segments; i++) | ||
830 | grefs[i] = req->u.rw.seg[i].gref; | ||
831 | |||
832 | rc = xen_blkbk_map(pending_req->blkif, grefs, | ||
833 | pending_req->persistent_gnts, | ||
834 | pending_req->grant_handles, pending_req->pages, | ||
835 | req->u.rw.nr_segments, | ||
836 | (pending_req->operation != BLKIF_OP_READ)); | ||
837 | if (rc) | ||
838 | return rc; | ||
839 | |||
840 | for (i = 0; i < req->u.rw.nr_segments; i++) | ||
841 | seg[i].offset = (req->u.rw.seg[i].first_sect << 9); | ||
842 | |||
843 | return 0; | ||
844 | } | ||
845 | |||
820 | static int dispatch_discard_io(struct xen_blkif *blkif, | 846 | static int dispatch_discard_io(struct xen_blkif *blkif, |
821 | struct blkif_request *req) | 847 | struct blkif_request *req) |
822 | { | 848 | { |
@@ -903,7 +929,10 @@ static void __end_block_io_op(struct pending_req *pending_req, int error) | |||
903 | * the proper response on the ring. | 929 | * the proper response on the ring. |
904 | */ | 930 | */ |
905 | if (atomic_dec_and_test(&pending_req->pendcnt)) { | 931 | if (atomic_dec_and_test(&pending_req->pendcnt)) { |
906 | xen_blkbk_unmap(pending_req); | 932 | xen_blkbk_unmap(pending_req->blkif, pending_req->grant_handles, |
933 | pending_req->pages, | ||
934 | pending_req->persistent_gnts, | ||
935 | pending_req->nr_pages); | ||
907 | make_response(pending_req->blkif, pending_req->id, | 936 | make_response(pending_req->blkif, pending_req->id, |
908 | pending_req->operation, pending_req->status); | 937 | pending_req->operation, pending_req->status); |
909 | xen_blkif_put(pending_req->blkif); | 938 | xen_blkif_put(pending_req->blkif); |
@@ -1125,7 +1154,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
1125 | * the hypercall to unmap the grants - that is all done in | 1154 | * the hypercall to unmap the grants - that is all done in |
1126 | * xen_blkbk_unmap. | 1155 | * xen_blkbk_unmap. |
1127 | */ | 1156 | */ |
1128 | if (xen_blkbk_map(req, pending_req, seg, pages)) | 1157 | if (xen_blkbk_map_seg(req, pending_req, seg, pages)) |
1129 | goto fail_flush; | 1158 | goto fail_flush; |
1130 | 1159 | ||
1131 | /* | 1160 | /* |
@@ -1186,7 +1215,9 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
1186 | return 0; | 1215 | return 0; |
1187 | 1216 | ||
1188 | fail_flush: | 1217 | fail_flush: |
1189 | xen_blkbk_unmap(pending_req); | 1218 | xen_blkbk_unmap(blkif, pending_req->grant_handles, |
1219 | pending_req->pages, pending_req->persistent_gnts, | ||
1220 | pending_req->nr_pages); | ||
1190 | fail_response: | 1221 | fail_response: |
1191 | /* Haven't submitted any bio's yet. */ | 1222 | /* Haven't submitted any bio's yet. */ |
1192 | make_response(blkif, req->u.rw.id, req->operation, BLKIF_RSP_ERROR); | 1223 | make_response(blkif, req->u.rw.id, req->operation, BLKIF_RSP_ERROR); |