diff options
-rw-r--r-- | drivers/block/xen-blkback/blkback.c | 92 | ||||
-rw-r--r-- | drivers/block/xen-blkback/common.h | 18 | ||||
-rw-r--r-- | drivers/block/xen-blkback/xenbus.c | 74 |
3 files changed, 106 insertions, 78 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 1ebc0aa0f0e4..e79ab4559233 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
@@ -641,9 +641,7 @@ purge_gnt_list: | |||
641 | * used in the 'pending_req'. | 641 | * used in the 'pending_req'. |
642 | */ | 642 | */ |
643 | static void xen_blkbk_unmap(struct xen_blkif *blkif, | 643 | static void xen_blkbk_unmap(struct xen_blkif *blkif, |
644 | grant_handle_t handles[], | 644 | struct grant_page *pages[], |
645 | struct page *pages[], | ||
646 | struct persistent_gnt *persistent_gnts[], | ||
647 | int num) | 645 | int num) |
648 | { | 646 | { |
649 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 647 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
@@ -652,16 +650,16 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif, | |||
652 | int ret; | 650 | int ret; |
653 | 651 | ||
654 | for (i = 0; i < num; i++) { | 652 | for (i = 0; i < num; i++) { |
655 | if (persistent_gnts[i] != NULL) { | 653 | if (pages[i]->persistent_gnt != NULL) { |
656 | put_persistent_gnt(blkif, persistent_gnts[i]); | 654 | put_persistent_gnt(blkif, pages[i]->persistent_gnt); |
657 | continue; | 655 | continue; |
658 | } | 656 | } |
659 | if (handles[i] == BLKBACK_INVALID_HANDLE) | 657 | if (pages[i]->handle == BLKBACK_INVALID_HANDLE) |
660 | continue; | 658 | continue; |
661 | unmap_pages[invcount] = pages[i]; | 659 | unmap_pages[invcount] = pages[i]->page; |
662 | gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]), | 660 | gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page), |
663 | GNTMAP_host_map, handles[i]); | 661 | GNTMAP_host_map, pages[i]->handle); |
664 | handles[i] = BLKBACK_INVALID_HANDLE; | 662 | pages[i]->handle = BLKBACK_INVALID_HANDLE; |
665 | if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { | 663 | if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { |
666 | ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, | 664 | ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, |
667 | invcount); | 665 | invcount); |
@@ -677,10 +675,8 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif, | |||
677 | } | 675 | } |
678 | } | 676 | } |
679 | 677 | ||
680 | static int xen_blkbk_map(struct xen_blkif *blkif, grant_ref_t grefs[], | 678 | static int xen_blkbk_map(struct xen_blkif *blkif, |
681 | struct persistent_gnt *persistent_gnts[], | 679 | struct grant_page *pages[], |
682 | grant_handle_t handles[], | ||
683 | struct page *pages[], | ||
684 | int num, bool ro) | 680 | int num, bool ro) |
685 | { | 681 | { |
686 | struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 682 | struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
@@ -707,26 +703,26 @@ again: | |||
707 | if (use_persistent_gnts) | 703 | if (use_persistent_gnts) |
708 | persistent_gnt = get_persistent_gnt( | 704 | persistent_gnt = get_persistent_gnt( |
709 | blkif, | 705 | blkif, |
710 | grefs[i]); | 706 | pages[i]->gref); |
711 | 707 | ||
712 | if (persistent_gnt) { | 708 | if (persistent_gnt) { |
713 | /* | 709 | /* |
714 | * We are using persistent grants and | 710 | * We are using persistent grants and |
715 | * the grant is already mapped | 711 | * the grant is already mapped |
716 | */ | 712 | */ |
717 | pages[i] = persistent_gnt->page; | 713 | pages[i]->page = persistent_gnt->page; |
718 | persistent_gnts[i] = persistent_gnt; | 714 | pages[i]->persistent_gnt = persistent_gnt; |
719 | } else { | 715 | } else { |
720 | if (get_free_page(blkif, &pages[i])) | 716 | if (get_free_page(blkif, &pages[i]->page)) |
721 | goto out_of_memory; | 717 | goto out_of_memory; |
722 | addr = vaddr(pages[i]); | 718 | addr = vaddr(pages[i]->page); |
723 | pages_to_gnt[segs_to_map] = pages[i]; | 719 | pages_to_gnt[segs_to_map] = pages[i]->page; |
724 | persistent_gnts[i] = NULL; | 720 | pages[i]->persistent_gnt = NULL; |
725 | flags = GNTMAP_host_map; | 721 | flags = GNTMAP_host_map; |
726 | if (!use_persistent_gnts && ro) | 722 | if (!use_persistent_gnts && ro) |
727 | flags |= GNTMAP_readonly; | 723 | flags |= GNTMAP_readonly; |
728 | gnttab_set_map_op(&map[segs_to_map++], addr, | 724 | gnttab_set_map_op(&map[segs_to_map++], addr, |
729 | flags, grefs[i], | 725 | flags, pages[i]->gref, |
730 | blkif->domid); | 726 | blkif->domid); |
731 | } | 727 | } |
732 | map_until = i + 1; | 728 | map_until = i + 1; |
@@ -745,16 +741,16 @@ again: | |||
745 | * the page from the other domain. | 741 | * the page from the other domain. |
746 | */ | 742 | */ |
747 | for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) { | 743 | for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) { |
748 | if (!persistent_gnts[seg_idx]) { | 744 | if (!pages[seg_idx]->persistent_gnt) { |
749 | /* This is a newly mapped grant */ | 745 | /* This is a newly mapped grant */ |
750 | BUG_ON(new_map_idx >= segs_to_map); | 746 | BUG_ON(new_map_idx >= segs_to_map); |
751 | if (unlikely(map[new_map_idx].status != 0)) { | 747 | if (unlikely(map[new_map_idx].status != 0)) { |
752 | pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); | 748 | pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); |
753 | handles[seg_idx] = BLKBACK_INVALID_HANDLE; | 749 | pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; |
754 | ret |= 1; | 750 | ret |= 1; |
755 | goto next; | 751 | goto next; |
756 | } | 752 | } |
757 | handles[seg_idx] = map[new_map_idx].handle; | 753 | pages[seg_idx]->handle = map[new_map_idx].handle; |
758 | } else { | 754 | } else { |
759 | continue; | 755 | continue; |
760 | } | 756 | } |
@@ -776,14 +772,14 @@ again: | |||
776 | } | 772 | } |
777 | persistent_gnt->gnt = map[new_map_idx].ref; | 773 | persistent_gnt->gnt = map[new_map_idx].ref; |
778 | persistent_gnt->handle = map[new_map_idx].handle; | 774 | persistent_gnt->handle = map[new_map_idx].handle; |
779 | persistent_gnt->page = pages[seg_idx]; | 775 | persistent_gnt->page = pages[seg_idx]->page; |
780 | if (add_persistent_gnt(blkif, | 776 | if (add_persistent_gnt(blkif, |
781 | persistent_gnt)) { | 777 | persistent_gnt)) { |
782 | kfree(persistent_gnt); | 778 | kfree(persistent_gnt); |
783 | persistent_gnt = NULL; | 779 | persistent_gnt = NULL; |
784 | goto next; | 780 | goto next; |
785 | } | 781 | } |
786 | persistent_gnts[seg_idx] = persistent_gnt; | 782 | pages[seg_idx]->persistent_gnt = persistent_gnt; |
787 | pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n", | 783 | pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n", |
788 | persistent_gnt->gnt, blkif->persistent_gnt_c, | 784 | persistent_gnt->gnt, blkif->persistent_gnt_c, |
789 | xen_blkif_max_pgrants); | 785 | xen_blkif_max_pgrants); |
@@ -814,15 +810,11 @@ out_of_memory: | |||
814 | return -ENOMEM; | 810 | return -ENOMEM; |
815 | } | 811 | } |
816 | 812 | ||
817 | static int xen_blkbk_map_seg(struct pending_req *pending_req, | 813 | static int xen_blkbk_map_seg(struct pending_req *pending_req) |
818 | struct seg_buf seg[], | ||
819 | struct page *pages[]) | ||
820 | { | 814 | { |
821 | int rc; | 815 | int rc; |
822 | 816 | ||
823 | rc = xen_blkbk_map(pending_req->blkif, pending_req->grefs, | 817 | rc = xen_blkbk_map(pending_req->blkif, pending_req->segments, |
824 | pending_req->persistent_gnts, | ||
825 | pending_req->grant_handles, pending_req->pages, | ||
826 | pending_req->nr_pages, | 818 | pending_req->nr_pages, |
827 | (pending_req->operation != BLKIF_OP_READ)); | 819 | (pending_req->operation != BLKIF_OP_READ)); |
828 | 820 | ||
@@ -834,9 +826,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req, | |||
834 | struct seg_buf seg[], | 826 | struct seg_buf seg[], |
835 | struct phys_req *preq) | 827 | struct phys_req *preq) |
836 | { | 828 | { |
837 | struct persistent_gnt **persistent = | 829 | struct grant_page **pages = pending_req->indirect_pages; |
838 | pending_req->indirect_persistent_gnts; | ||
839 | struct page **pages = pending_req->indirect_pages; | ||
840 | struct xen_blkif *blkif = pending_req->blkif; | 830 | struct xen_blkif *blkif = pending_req->blkif; |
841 | int indirect_grefs, rc, n, nseg, i; | 831 | int indirect_grefs, rc, n, nseg, i; |
842 | struct blkif_request_segment_aligned *segments = NULL; | 832 | struct blkif_request_segment_aligned *segments = NULL; |
@@ -845,9 +835,10 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req, | |||
845 | indirect_grefs = INDIRECT_PAGES(nseg); | 835 | indirect_grefs = INDIRECT_PAGES(nseg); |
846 | BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST); | 836 | BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST); |
847 | 837 | ||
848 | rc = xen_blkbk_map(blkif, req->u.indirect.indirect_grefs, | 838 | for (i = 0; i < indirect_grefs; i++) |
849 | persistent, pending_req->indirect_handles, | 839 | pages[i]->gref = req->u.indirect.indirect_grefs[i]; |
850 | pages, indirect_grefs, true); | 840 | |
841 | rc = xen_blkbk_map(blkif, pages, indirect_grefs, true); | ||
851 | if (rc) | 842 | if (rc) |
852 | goto unmap; | 843 | goto unmap; |
853 | 844 | ||
@@ -856,10 +847,10 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req, | |||
856 | /* Map indirect segments */ | 847 | /* Map indirect segments */ |
857 | if (segments) | 848 | if (segments) |
858 | kunmap_atomic(segments); | 849 | kunmap_atomic(segments); |
859 | segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]); | 850 | segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page); |
860 | } | 851 | } |
861 | i = n % SEGS_PER_INDIRECT_FRAME; | 852 | i = n % SEGS_PER_INDIRECT_FRAME; |
862 | pending_req->grefs[n] = segments[i].gref; | 853 | pending_req->segments[n]->gref = segments[i].gref; |
863 | seg[n].nsec = segments[i].last_sect - | 854 | seg[n].nsec = segments[i].last_sect - |
864 | segments[i].first_sect + 1; | 855 | segments[i].first_sect + 1; |
865 | seg[n].offset = (segments[i].first_sect << 9); | 856 | seg[n].offset = (segments[i].first_sect << 9); |
@@ -874,8 +865,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req, | |||
874 | unmap: | 865 | unmap: |
875 | if (segments) | 866 | if (segments) |
876 | kunmap_atomic(segments); | 867 | kunmap_atomic(segments); |
877 | xen_blkbk_unmap(blkif, pending_req->indirect_handles, | 868 | xen_blkbk_unmap(blkif, pages, indirect_grefs); |
878 | pages, persistent, indirect_grefs); | ||
879 | return rc; | 869 | return rc; |
880 | } | 870 | } |
881 | 871 | ||
@@ -965,9 +955,8 @@ static void __end_block_io_op(struct pending_req *pending_req, int error) | |||
965 | * the proper response on the ring. | 955 | * the proper response on the ring. |
966 | */ | 956 | */ |
967 | if (atomic_dec_and_test(&pending_req->pendcnt)) { | 957 | if (atomic_dec_and_test(&pending_req->pendcnt)) { |
968 | xen_blkbk_unmap(pending_req->blkif, pending_req->grant_handles, | 958 | xen_blkbk_unmap(pending_req->blkif, |
969 | pending_req->pages, | 959 | pending_req->segments, |
970 | pending_req->persistent_gnts, | ||
971 | pending_req->nr_pages); | 960 | pending_req->nr_pages); |
972 | make_response(pending_req->blkif, pending_req->id, | 961 | make_response(pending_req->blkif, pending_req->id, |
973 | pending_req->operation, pending_req->status); | 962 | pending_req->operation, pending_req->status); |
@@ -1104,7 +1093,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
1104 | int operation; | 1093 | int operation; |
1105 | struct blk_plug plug; | 1094 | struct blk_plug plug; |
1106 | bool drain = false; | 1095 | bool drain = false; |
1107 | struct page **pages = pending_req->pages; | 1096 | struct grant_page **pages = pending_req->segments; |
1108 | unsigned short req_operation; | 1097 | unsigned short req_operation; |
1109 | 1098 | ||
1110 | req_operation = req->operation == BLKIF_OP_INDIRECT ? | 1099 | req_operation = req->operation == BLKIF_OP_INDIRECT ? |
@@ -1165,7 +1154,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
1165 | preq.dev = req->u.rw.handle; | 1154 | preq.dev = req->u.rw.handle; |
1166 | preq.sector_number = req->u.rw.sector_number; | 1155 | preq.sector_number = req->u.rw.sector_number; |
1167 | for (i = 0; i < nseg; i++) { | 1156 | for (i = 0; i < nseg; i++) { |
1168 | pending_req->grefs[i] = req->u.rw.seg[i].gref; | 1157 | pages[i]->gref = req->u.rw.seg[i].gref; |
1169 | seg[i].nsec = req->u.rw.seg[i].last_sect - | 1158 | seg[i].nsec = req->u.rw.seg[i].last_sect - |
1170 | req->u.rw.seg[i].first_sect + 1; | 1159 | req->u.rw.seg[i].first_sect + 1; |
1171 | seg[i].offset = (req->u.rw.seg[i].first_sect << 9); | 1160 | seg[i].offset = (req->u.rw.seg[i].first_sect << 9); |
@@ -1216,7 +1205,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
1216 | * the hypercall to unmap the grants - that is all done in | 1205 | * the hypercall to unmap the grants - that is all done in |
1217 | * xen_blkbk_unmap. | 1206 | * xen_blkbk_unmap. |
1218 | */ | 1207 | */ |
1219 | if (xen_blkbk_map_seg(pending_req, seg, pages)) | 1208 | if (xen_blkbk_map_seg(pending_req)) |
1220 | goto fail_flush; | 1209 | goto fail_flush; |
1221 | 1210 | ||
1222 | /* | 1211 | /* |
@@ -1228,7 +1217,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
1228 | for (i = 0; i < nseg; i++) { | 1217 | for (i = 0; i < nseg; i++) { |
1229 | while ((bio == NULL) || | 1218 | while ((bio == NULL) || |
1230 | (bio_add_page(bio, | 1219 | (bio_add_page(bio, |
1231 | pages[i], | 1220 | pages[i]->page, |
1232 | seg[i].nsec << 9, | 1221 | seg[i].nsec << 9, |
1233 | seg[i].offset) == 0)) { | 1222 | seg[i].offset) == 0)) { |
1234 | 1223 | ||
@@ -1277,8 +1266,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
1277 | return 0; | 1266 | return 0; |
1278 | 1267 | ||
1279 | fail_flush: | 1268 | fail_flush: |
1280 | xen_blkbk_unmap(blkif, pending_req->grant_handles, | 1269 | xen_blkbk_unmap(blkif, pending_req->segments, |
1281 | pending_req->pages, pending_req->persistent_gnts, | ||
1282 | pending_req->nr_pages); | 1270 | pending_req->nr_pages); |
1283 | fail_response: | 1271 | fail_response: |
1284 | /* Haven't submitted any bio's yet. */ | 1272 | /* Haven't submitted any bio's yet. */ |
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 1ac53da8410f..c6b4cb9af6c2 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h | |||
@@ -297,8 +297,6 @@ struct xen_blkif { | |||
297 | int free_pages_num; | 297 | int free_pages_num; |
298 | struct list_head free_pages; | 298 | struct list_head free_pages; |
299 | 299 | ||
300 | /* Allocation of pending_reqs */ | ||
301 | struct pending_req *pending_reqs; | ||
302 | /* List of all 'pending_req' available */ | 300 | /* List of all 'pending_req' available */ |
303 | struct list_head pending_free; | 301 | struct list_head pending_free; |
304 | /* And its spinlock. */ | 302 | /* And its spinlock. */ |
@@ -323,6 +321,13 @@ struct seg_buf { | |||
323 | unsigned int nsec; | 321 | unsigned int nsec; |
324 | }; | 322 | }; |
325 | 323 | ||
324 | struct grant_page { | ||
325 | struct page *page; | ||
326 | struct persistent_gnt *persistent_gnt; | ||
327 | grant_handle_t handle; | ||
328 | grant_ref_t gref; | ||
329 | }; | ||
330 | |||
326 | /* | 331 | /* |
327 | * Each outstanding request that we've passed to the lower device layers has a | 332 | * Each outstanding request that we've passed to the lower device layers has a |
328 | * 'pending_req' allocated to it. Each buffer_head that completes decrements | 333 | * 'pending_req' allocated to it. Each buffer_head that completes decrements |
@@ -337,14 +342,9 @@ struct pending_req { | |||
337 | unsigned short operation; | 342 | unsigned short operation; |
338 | int status; | 343 | int status; |
339 | struct list_head free_list; | 344 | struct list_head free_list; |
340 | struct page *pages[MAX_INDIRECT_SEGMENTS]; | 345 | struct grant_page *segments[MAX_INDIRECT_SEGMENTS]; |
341 | struct persistent_gnt *persistent_gnts[MAX_INDIRECT_SEGMENTS]; | ||
342 | grant_handle_t grant_handles[MAX_INDIRECT_SEGMENTS]; | ||
343 | grant_ref_t grefs[MAX_INDIRECT_SEGMENTS]; | ||
344 | /* Indirect descriptors */ | 346 | /* Indirect descriptors */ |
345 | struct persistent_gnt *indirect_persistent_gnts[MAX_INDIRECT_PAGES]; | 347 | struct grant_page *indirect_pages[MAX_INDIRECT_PAGES]; |
346 | struct page *indirect_pages[MAX_INDIRECT_PAGES]; | ||
347 | grant_handle_t indirect_handles[MAX_INDIRECT_PAGES]; | ||
348 | struct seg_buf seg[MAX_INDIRECT_SEGMENTS]; | 348 | struct seg_buf seg[MAX_INDIRECT_SEGMENTS]; |
349 | struct bio *biolist[MAX_INDIRECT_SEGMENTS]; | 349 | struct bio *biolist[MAX_INDIRECT_SEGMENTS]; |
350 | }; | 350 | }; |
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index afab208c54e3..4a4749c78942 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c | |||
@@ -105,7 +105,8 @@ static void xen_update_blkif_status(struct xen_blkif *blkif) | |||
105 | static struct xen_blkif *xen_blkif_alloc(domid_t domid) | 105 | static struct xen_blkif *xen_blkif_alloc(domid_t domid) |
106 | { | 106 | { |
107 | struct xen_blkif *blkif; | 107 | struct xen_blkif *blkif; |
108 | int i; | 108 | struct pending_req *req, *n; |
109 | int i, j; | ||
109 | 110 | ||
110 | BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST); | 111 | BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST); |
111 | 112 | ||
@@ -127,22 +128,51 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid) | |||
127 | blkif->free_pages_num = 0; | 128 | blkif->free_pages_num = 0; |
128 | atomic_set(&blkif->persistent_gnt_in_use, 0); | 129 | atomic_set(&blkif->persistent_gnt_in_use, 0); |
129 | 130 | ||
130 | blkif->pending_reqs = kcalloc(XEN_BLKIF_REQS, | ||
131 | sizeof(blkif->pending_reqs[0]), | ||
132 | GFP_KERNEL); | ||
133 | if (!blkif->pending_reqs) { | ||
134 | kmem_cache_free(xen_blkif_cachep, blkif); | ||
135 | return ERR_PTR(-ENOMEM); | ||
136 | } | ||
137 | INIT_LIST_HEAD(&blkif->pending_free); | 131 | INIT_LIST_HEAD(&blkif->pending_free); |
132 | |||
133 | for (i = 0; i < XEN_BLKIF_REQS; i++) { | ||
134 | req = kzalloc(sizeof(*req), GFP_KERNEL); | ||
135 | if (!req) | ||
136 | goto fail; | ||
137 | list_add_tail(&req->free_list, | ||
138 | &blkif->pending_free); | ||
139 | for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) { | ||
140 | req->segments[j] = kzalloc(sizeof(*req->segments[0]), | ||
141 | GFP_KERNEL); | ||
142 | if (!req->segments[j]) | ||
143 | goto fail; | ||
144 | } | ||
145 | for (j = 0; j < MAX_INDIRECT_PAGES; j++) { | ||
146 | req->indirect_pages[j] = kzalloc(sizeof(*req->indirect_pages[0]), | ||
147 | GFP_KERNEL); | ||
148 | if (!req->indirect_pages[j]) | ||
149 | goto fail; | ||
150 | } | ||
151 | } | ||
138 | spin_lock_init(&blkif->pending_free_lock); | 152 | spin_lock_init(&blkif->pending_free_lock); |
139 | init_waitqueue_head(&blkif->pending_free_wq); | 153 | init_waitqueue_head(&blkif->pending_free_wq); |
140 | 154 | ||
141 | for (i = 0; i < XEN_BLKIF_REQS; i++) | ||
142 | list_add_tail(&blkif->pending_reqs[i].free_list, | ||
143 | &blkif->pending_free); | ||
144 | |||
145 | return blkif; | 155 | return blkif; |
156 | |||
157 | fail: | ||
158 | list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { | ||
159 | list_del(&req->free_list); | ||
160 | for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) { | ||
161 | if (!req->segments[j]) | ||
162 | break; | ||
163 | kfree(req->segments[j]); | ||
164 | } | ||
165 | for (j = 0; j < MAX_INDIRECT_PAGES; j++) { | ||
166 | if (!req->indirect_pages[j]) | ||
167 | break; | ||
168 | kfree(req->indirect_pages[j]); | ||
169 | } | ||
170 | kfree(req); | ||
171 | } | ||
172 | |||
173 | kmem_cache_free(xen_blkif_cachep, blkif); | ||
174 | |||
175 | return ERR_PTR(-ENOMEM); | ||
146 | } | 176 | } |
147 | 177 | ||
148 | static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page, | 178 | static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page, |
@@ -221,18 +251,28 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif) | |||
221 | 251 | ||
222 | static void xen_blkif_free(struct xen_blkif *blkif) | 252 | static void xen_blkif_free(struct xen_blkif *blkif) |
223 | { | 253 | { |
224 | struct pending_req *req; | 254 | struct pending_req *req, *n; |
225 | int i = 0; | 255 | int i = 0, j; |
226 | 256 | ||
227 | if (!atomic_dec_and_test(&blkif->refcnt)) | 257 | if (!atomic_dec_and_test(&blkif->refcnt)) |
228 | BUG(); | 258 | BUG(); |
229 | 259 | ||
230 | /* Check that there is no request in use */ | 260 | /* Check that there is no request in use */ |
231 | list_for_each_entry(req, &blkif->pending_free, free_list) | 261 | list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { |
262 | list_del(&req->free_list); | ||
263 | |||
264 | for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) | ||
265 | kfree(req->segments[j]); | ||
266 | |||
267 | for (j = 0; j < MAX_INDIRECT_PAGES; j++) | ||
268 | kfree(req->indirect_pages[j]); | ||
269 | |||
270 | kfree(req); | ||
232 | i++; | 271 | i++; |
233 | BUG_ON(i != XEN_BLKIF_REQS); | 272 | } |
273 | |||
274 | WARN_ON(i != XEN_BLKIF_REQS); | ||
234 | 275 | ||
235 | kfree(blkif->pending_reqs); | ||
236 | kmem_cache_free(xen_blkif_cachep, blkif); | 276 | kmem_cache_free(xen_blkif_cachep, blkif); |
237 | } | 277 | } |
238 | 278 | ||