diff options
author | Roger Pau Monne <roger.pau@citrix.com> | 2013-05-02 04:21:17 -0400 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2013-05-07 09:42:17 -0400 |
commit | bb642e8315fd573795e8b6fa9b9629064d73add1 (patch) | |
tree | 47ee834fb5a80a77129cb8bf0607ce4db9c04a55 /drivers/block/xen-blkback/blkback.c | |
parent | 402b27f9f2c22309d5bb285628765bc27b82fcf5 (diff) |
xen-blkback: allocate list of pending reqs in small chunks
Allocate pending requests in smaller chunks instead of allocating them
all at the same time.
This change also removes the global array of pending_reqs, it is no
longer necessay.
Variables related to the grant mapping have been grouped into a struct
called "grant_page", this allows to allocate them in smaller chunks,
and also improves memory locality.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reported-by: Sander Eikelenboom <linux@eikelenboom.it>
Tested-by: Sander Eikelenboom <linux@eikelenboom.it>
Reviewed-by: David Vrabel <david.vrabel@citrix.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/block/xen-blkback/blkback.c')
-rw-r--r-- | drivers/block/xen-blkback/blkback.c | 92 |
1 files changed, 40 insertions, 52 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 1ebc0aa0f0e4..e79ab4559233 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
@@ -641,9 +641,7 @@ purge_gnt_list: | |||
641 | * used in the 'pending_req'. | 641 | * used in the 'pending_req'. |
642 | */ | 642 | */ |
643 | static void xen_blkbk_unmap(struct xen_blkif *blkif, | 643 | static void xen_blkbk_unmap(struct xen_blkif *blkif, |
644 | grant_handle_t handles[], | 644 | struct grant_page *pages[], |
645 | struct page *pages[], | ||
646 | struct persistent_gnt *persistent_gnts[], | ||
647 | int num) | 645 | int num) |
648 | { | 646 | { |
649 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 647 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
@@ -652,16 +650,16 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif, | |||
652 | int ret; | 650 | int ret; |
653 | 651 | ||
654 | for (i = 0; i < num; i++) { | 652 | for (i = 0; i < num; i++) { |
655 | if (persistent_gnts[i] != NULL) { | 653 | if (pages[i]->persistent_gnt != NULL) { |
656 | put_persistent_gnt(blkif, persistent_gnts[i]); | 654 | put_persistent_gnt(blkif, pages[i]->persistent_gnt); |
657 | continue; | 655 | continue; |
658 | } | 656 | } |
659 | if (handles[i] == BLKBACK_INVALID_HANDLE) | 657 | if (pages[i]->handle == BLKBACK_INVALID_HANDLE) |
660 | continue; | 658 | continue; |
661 | unmap_pages[invcount] = pages[i]; | 659 | unmap_pages[invcount] = pages[i]->page; |
662 | gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]), | 660 | gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page), |
663 | GNTMAP_host_map, handles[i]); | 661 | GNTMAP_host_map, pages[i]->handle); |
664 | handles[i] = BLKBACK_INVALID_HANDLE; | 662 | pages[i]->handle = BLKBACK_INVALID_HANDLE; |
665 | if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { | 663 | if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { |
666 | ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, | 664 | ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, |
667 | invcount); | 665 | invcount); |
@@ -677,10 +675,8 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif, | |||
677 | } | 675 | } |
678 | } | 676 | } |
679 | 677 | ||
680 | static int xen_blkbk_map(struct xen_blkif *blkif, grant_ref_t grefs[], | 678 | static int xen_blkbk_map(struct xen_blkif *blkif, |
681 | struct persistent_gnt *persistent_gnts[], | 679 | struct grant_page *pages[], |
682 | grant_handle_t handles[], | ||
683 | struct page *pages[], | ||
684 | int num, bool ro) | 680 | int num, bool ro) |
685 | { | 681 | { |
686 | struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 682 | struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
@@ -707,26 +703,26 @@ again: | |||
707 | if (use_persistent_gnts) | 703 | if (use_persistent_gnts) |
708 | persistent_gnt = get_persistent_gnt( | 704 | persistent_gnt = get_persistent_gnt( |
709 | blkif, | 705 | blkif, |
710 | grefs[i]); | 706 | pages[i]->gref); |
711 | 707 | ||
712 | if (persistent_gnt) { | 708 | if (persistent_gnt) { |
713 | /* | 709 | /* |
714 | * We are using persistent grants and | 710 | * We are using persistent grants and |
715 | * the grant is already mapped | 711 | * the grant is already mapped |
716 | */ | 712 | */ |
717 | pages[i] = persistent_gnt->page; | 713 | pages[i]->page = persistent_gnt->page; |
718 | persistent_gnts[i] = persistent_gnt; | 714 | pages[i]->persistent_gnt = persistent_gnt; |
719 | } else { | 715 | } else { |
720 | if (get_free_page(blkif, &pages[i])) | 716 | if (get_free_page(blkif, &pages[i]->page)) |
721 | goto out_of_memory; | 717 | goto out_of_memory; |
722 | addr = vaddr(pages[i]); | 718 | addr = vaddr(pages[i]->page); |
723 | pages_to_gnt[segs_to_map] = pages[i]; | 719 | pages_to_gnt[segs_to_map] = pages[i]->page; |
724 | persistent_gnts[i] = NULL; | 720 | pages[i]->persistent_gnt = NULL; |
725 | flags = GNTMAP_host_map; | 721 | flags = GNTMAP_host_map; |
726 | if (!use_persistent_gnts && ro) | 722 | if (!use_persistent_gnts && ro) |
727 | flags |= GNTMAP_readonly; | 723 | flags |= GNTMAP_readonly; |
728 | gnttab_set_map_op(&map[segs_to_map++], addr, | 724 | gnttab_set_map_op(&map[segs_to_map++], addr, |
729 | flags, grefs[i], | 725 | flags, pages[i]->gref, |
730 | blkif->domid); | 726 | blkif->domid); |
731 | } | 727 | } |
732 | map_until = i + 1; | 728 | map_until = i + 1; |
@@ -745,16 +741,16 @@ again: | |||
745 | * the page from the other domain. | 741 | * the page from the other domain. |
746 | */ | 742 | */ |
747 | for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) { | 743 | for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) { |
748 | if (!persistent_gnts[seg_idx]) { | 744 | if (!pages[seg_idx]->persistent_gnt) { |
749 | /* This is a newly mapped grant */ | 745 | /* This is a newly mapped grant */ |
750 | BUG_ON(new_map_idx >= segs_to_map); | 746 | BUG_ON(new_map_idx >= segs_to_map); |
751 | if (unlikely(map[new_map_idx].status != 0)) { | 747 | if (unlikely(map[new_map_idx].status != 0)) { |
752 | pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); | 748 | pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); |
753 | handles[seg_idx] = BLKBACK_INVALID_HANDLE; | 749 | pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; |
754 | ret |= 1; | 750 | ret |= 1; |
755 | goto next; | 751 | goto next; |
756 | } | 752 | } |
757 | handles[seg_idx] = map[new_map_idx].handle; | 753 | pages[seg_idx]->handle = map[new_map_idx].handle; |
758 | } else { | 754 | } else { |
759 | continue; | 755 | continue; |
760 | } | 756 | } |
@@ -776,14 +772,14 @@ again: | |||
776 | } | 772 | } |
777 | persistent_gnt->gnt = map[new_map_idx].ref; | 773 | persistent_gnt->gnt = map[new_map_idx].ref; |
778 | persistent_gnt->handle = map[new_map_idx].handle; | 774 | persistent_gnt->handle = map[new_map_idx].handle; |
779 | persistent_gnt->page = pages[seg_idx]; | 775 | persistent_gnt->page = pages[seg_idx]->page; |
780 | if (add_persistent_gnt(blkif, | 776 | if (add_persistent_gnt(blkif, |
781 | persistent_gnt)) { | 777 | persistent_gnt)) { |
782 | kfree(persistent_gnt); | 778 | kfree(persistent_gnt); |
783 | persistent_gnt = NULL; | 779 | persistent_gnt = NULL; |
784 | goto next; | 780 | goto next; |
785 | } | 781 | } |
786 | persistent_gnts[seg_idx] = persistent_gnt; | 782 | pages[seg_idx]->persistent_gnt = persistent_gnt; |
787 | pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n", | 783 | pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n", |
788 | persistent_gnt->gnt, blkif->persistent_gnt_c, | 784 | persistent_gnt->gnt, blkif->persistent_gnt_c, |
789 | xen_blkif_max_pgrants); | 785 | xen_blkif_max_pgrants); |
@@ -814,15 +810,11 @@ out_of_memory: | |||
814 | return -ENOMEM; | 810 | return -ENOMEM; |
815 | } | 811 | } |
816 | 812 | ||
817 | static int xen_blkbk_map_seg(struct pending_req *pending_req, | 813 | static int xen_blkbk_map_seg(struct pending_req *pending_req) |
818 | struct seg_buf seg[], | ||
819 | struct page *pages[]) | ||
820 | { | 814 | { |
821 | int rc; | 815 | int rc; |
822 | 816 | ||
823 | rc = xen_blkbk_map(pending_req->blkif, pending_req->grefs, | 817 | rc = xen_blkbk_map(pending_req->blkif, pending_req->segments, |
824 | pending_req->persistent_gnts, | ||
825 | pending_req->grant_handles, pending_req->pages, | ||
826 | pending_req->nr_pages, | 818 | pending_req->nr_pages, |
827 | (pending_req->operation != BLKIF_OP_READ)); | 819 | (pending_req->operation != BLKIF_OP_READ)); |
828 | 820 | ||
@@ -834,9 +826,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req, | |||
834 | struct seg_buf seg[], | 826 | struct seg_buf seg[], |
835 | struct phys_req *preq) | 827 | struct phys_req *preq) |
836 | { | 828 | { |
837 | struct persistent_gnt **persistent = | 829 | struct grant_page **pages = pending_req->indirect_pages; |
838 | pending_req->indirect_persistent_gnts; | ||
839 | struct page **pages = pending_req->indirect_pages; | ||
840 | struct xen_blkif *blkif = pending_req->blkif; | 830 | struct xen_blkif *blkif = pending_req->blkif; |
841 | int indirect_grefs, rc, n, nseg, i; | 831 | int indirect_grefs, rc, n, nseg, i; |
842 | struct blkif_request_segment_aligned *segments = NULL; | 832 | struct blkif_request_segment_aligned *segments = NULL; |
@@ -845,9 +835,10 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req, | |||
845 | indirect_grefs = INDIRECT_PAGES(nseg); | 835 | indirect_grefs = INDIRECT_PAGES(nseg); |
846 | BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST); | 836 | BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST); |
847 | 837 | ||
848 | rc = xen_blkbk_map(blkif, req->u.indirect.indirect_grefs, | 838 | for (i = 0; i < indirect_grefs; i++) |
849 | persistent, pending_req->indirect_handles, | 839 | pages[i]->gref = req->u.indirect.indirect_grefs[i]; |
850 | pages, indirect_grefs, true); | 840 | |
841 | rc = xen_blkbk_map(blkif, pages, indirect_grefs, true); | ||
851 | if (rc) | 842 | if (rc) |
852 | goto unmap; | 843 | goto unmap; |
853 | 844 | ||
@@ -856,10 +847,10 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req, | |||
856 | /* Map indirect segments */ | 847 | /* Map indirect segments */ |
857 | if (segments) | 848 | if (segments) |
858 | kunmap_atomic(segments); | 849 | kunmap_atomic(segments); |
859 | segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]); | 850 | segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page); |
860 | } | 851 | } |
861 | i = n % SEGS_PER_INDIRECT_FRAME; | 852 | i = n % SEGS_PER_INDIRECT_FRAME; |
862 | pending_req->grefs[n] = segments[i].gref; | 853 | pending_req->segments[n]->gref = segments[i].gref; |
863 | seg[n].nsec = segments[i].last_sect - | 854 | seg[n].nsec = segments[i].last_sect - |
864 | segments[i].first_sect + 1; | 855 | segments[i].first_sect + 1; |
865 | seg[n].offset = (segments[i].first_sect << 9); | 856 | seg[n].offset = (segments[i].first_sect << 9); |
@@ -874,8 +865,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req, | |||
874 | unmap: | 865 | unmap: |
875 | if (segments) | 866 | if (segments) |
876 | kunmap_atomic(segments); | 867 | kunmap_atomic(segments); |
877 | xen_blkbk_unmap(blkif, pending_req->indirect_handles, | 868 | xen_blkbk_unmap(blkif, pages, indirect_grefs); |
878 | pages, persistent, indirect_grefs); | ||
879 | return rc; | 869 | return rc; |
880 | } | 870 | } |
881 | 871 | ||
@@ -965,9 +955,8 @@ static void __end_block_io_op(struct pending_req *pending_req, int error) | |||
965 | * the proper response on the ring. | 955 | * the proper response on the ring. |
966 | */ | 956 | */ |
967 | if (atomic_dec_and_test(&pending_req->pendcnt)) { | 957 | if (atomic_dec_and_test(&pending_req->pendcnt)) { |
968 | xen_blkbk_unmap(pending_req->blkif, pending_req->grant_handles, | 958 | xen_blkbk_unmap(pending_req->blkif, |
969 | pending_req->pages, | 959 | pending_req->segments, |
970 | pending_req->persistent_gnts, | ||
971 | pending_req->nr_pages); | 960 | pending_req->nr_pages); |
972 | make_response(pending_req->blkif, pending_req->id, | 961 | make_response(pending_req->blkif, pending_req->id, |
973 | pending_req->operation, pending_req->status); | 962 | pending_req->operation, pending_req->status); |
@@ -1104,7 +1093,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
1104 | int operation; | 1093 | int operation; |
1105 | struct blk_plug plug; | 1094 | struct blk_plug plug; |
1106 | bool drain = false; | 1095 | bool drain = false; |
1107 | struct page **pages = pending_req->pages; | 1096 | struct grant_page **pages = pending_req->segments; |
1108 | unsigned short req_operation; | 1097 | unsigned short req_operation; |
1109 | 1098 | ||
1110 | req_operation = req->operation == BLKIF_OP_INDIRECT ? | 1099 | req_operation = req->operation == BLKIF_OP_INDIRECT ? |
@@ -1165,7 +1154,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
1165 | preq.dev = req->u.rw.handle; | 1154 | preq.dev = req->u.rw.handle; |
1166 | preq.sector_number = req->u.rw.sector_number; | 1155 | preq.sector_number = req->u.rw.sector_number; |
1167 | for (i = 0; i < nseg; i++) { | 1156 | for (i = 0; i < nseg; i++) { |
1168 | pending_req->grefs[i] = req->u.rw.seg[i].gref; | 1157 | pages[i]->gref = req->u.rw.seg[i].gref; |
1169 | seg[i].nsec = req->u.rw.seg[i].last_sect - | 1158 | seg[i].nsec = req->u.rw.seg[i].last_sect - |
1170 | req->u.rw.seg[i].first_sect + 1; | 1159 | req->u.rw.seg[i].first_sect + 1; |
1171 | seg[i].offset = (req->u.rw.seg[i].first_sect << 9); | 1160 | seg[i].offset = (req->u.rw.seg[i].first_sect << 9); |
@@ -1216,7 +1205,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
1216 | * the hypercall to unmap the grants - that is all done in | 1205 | * the hypercall to unmap the grants - that is all done in |
1217 | * xen_blkbk_unmap. | 1206 | * xen_blkbk_unmap. |
1218 | */ | 1207 | */ |
1219 | if (xen_blkbk_map_seg(pending_req, seg, pages)) | 1208 | if (xen_blkbk_map_seg(pending_req)) |
1220 | goto fail_flush; | 1209 | goto fail_flush; |
1221 | 1210 | ||
1222 | /* | 1211 | /* |
@@ -1228,7 +1217,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
1228 | for (i = 0; i < nseg; i++) { | 1217 | for (i = 0; i < nseg; i++) { |
1229 | while ((bio == NULL) || | 1218 | while ((bio == NULL) || |
1230 | (bio_add_page(bio, | 1219 | (bio_add_page(bio, |
1231 | pages[i], | 1220 | pages[i]->page, |
1232 | seg[i].nsec << 9, | 1221 | seg[i].nsec << 9, |
1233 | seg[i].offset) == 0)) { | 1222 | seg[i].offset) == 0)) { |
1234 | 1223 | ||
@@ -1277,8 +1266,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
1277 | return 0; | 1266 | return 0; |
1278 | 1267 | ||
1279 | fail_flush: | 1268 | fail_flush: |
1280 | xen_blkbk_unmap(blkif, pending_req->grant_handles, | 1269 | xen_blkbk_unmap(blkif, pending_req->segments, |
1281 | pending_req->pages, pending_req->persistent_gnts, | ||
1282 | pending_req->nr_pages); | 1270 | pending_req->nr_pages); |
1283 | fail_response: | 1271 | fail_response: |
1284 | /* Haven't submitted any bio's yet. */ | 1272 | /* Haven't submitted any bio's yet. */ |