aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBob Liu <bob.liu@oracle.com>2015-06-03 01:40:01 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2015-06-05 21:14:05 -0400
commit69b91ede5cab843dcf345c28bd1f4b5a99dacd9b (patch)
tree542bccc08bb0281d7a2474dc48021c5b4917e038
parenta5768aa887fb636f0cc4c83a2f1242506aaf50f6 (diff)
drivers: xen-blkback: delay pending_req allocation to connect_ring
This is a pre-patch for multi-page ring feature. In connect_ring, we can know exactly how many pages are used for the shared ring, delay pending_req allocation here so that we won't waste too much memory. Signed-off-by: Bob Liu <bob.liu@oracle.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-rw-r--r--drivers/block/xen-blkback/common.h2
-rw-r--r--drivers/block/xen-blkback/xenbus.c82
2 files changed, 39 insertions, 45 deletions
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index f620b5d3f77c..043f13b7b7b0 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -248,7 +248,7 @@ struct backend_info;
248#define PERSISTENT_GNT_WAS_ACTIVE 1 248#define PERSISTENT_GNT_WAS_ACTIVE 1
249 249
250/* Number of requests that we can fit in a ring */ 250/* Number of requests that we can fit in a ring */
251#define XEN_BLKIF_REQS 32 251#define XEN_BLKIF_REQS_PER_PAGE 32
252 252
253struct persistent_gnt { 253struct persistent_gnt {
254 struct page *page; 254 struct page *page;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 6ab69ad61ee1..c212d41fd5bd 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -124,8 +124,6 @@ static void xen_update_blkif_status(struct xen_blkif *blkif)
124static struct xen_blkif *xen_blkif_alloc(domid_t domid) 124static struct xen_blkif *xen_blkif_alloc(domid_t domid)
125{ 125{
126 struct xen_blkif *blkif; 126 struct xen_blkif *blkif;
127 struct pending_req *req, *n;
128 int i, j;
129 127
130 BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST); 128 BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
131 129
@@ -151,51 +149,11 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
151 149
152 INIT_LIST_HEAD(&blkif->pending_free); 150 INIT_LIST_HEAD(&blkif->pending_free);
153 INIT_WORK(&blkif->free_work, xen_blkif_deferred_free); 151 INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
154
155 for (i = 0; i < XEN_BLKIF_REQS; i++) {
156 req = kzalloc(sizeof(*req), GFP_KERNEL);
157 if (!req)
158 goto fail;
159 list_add_tail(&req->free_list,
160 &blkif->pending_free);
161 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
162 req->segments[j] = kzalloc(sizeof(*req->segments[0]),
163 GFP_KERNEL);
164 if (!req->segments[j])
165 goto fail;
166 }
167 for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
168 req->indirect_pages[j] = kzalloc(sizeof(*req->indirect_pages[0]),
169 GFP_KERNEL);
170 if (!req->indirect_pages[j])
171 goto fail;
172 }
173 }
174 spin_lock_init(&blkif->pending_free_lock); 152 spin_lock_init(&blkif->pending_free_lock);
175 init_waitqueue_head(&blkif->pending_free_wq); 153 init_waitqueue_head(&blkif->pending_free_wq);
176 init_waitqueue_head(&blkif->shutdown_wq); 154 init_waitqueue_head(&blkif->shutdown_wq);
177 155
178 return blkif; 156 return blkif;
179
180fail:
181 list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
182 list_del(&req->free_list);
183 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
184 if (!req->segments[j])
185 break;
186 kfree(req->segments[j]);
187 }
188 for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
189 if (!req->indirect_pages[j])
190 break;
191 kfree(req->indirect_pages[j]);
192 }
193 kfree(req);
194 }
195
196 kmem_cache_free(xen_blkif_cachep, blkif);
197
198 return ERR_PTR(-ENOMEM);
199} 157}
200 158
201static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t gref, 159static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t gref,
@@ -312,7 +270,7 @@ static void xen_blkif_free(struct xen_blkif *blkif)
312 i++; 270 i++;
313 } 271 }
314 272
315 WARN_ON(i != XEN_BLKIF_REQS); 273 WARN_ON(i != XEN_BLKIF_REQS_PER_PAGE);
316 274
317 kmem_cache_free(xen_blkif_cachep, blkif); 275 kmem_cache_free(xen_blkif_cachep, blkif);
318} 276}
@@ -864,7 +822,8 @@ static int connect_ring(struct backend_info *be)
864 unsigned int evtchn; 822 unsigned int evtchn;
865 unsigned int pers_grants; 823 unsigned int pers_grants;
866 char protocol[64] = ""; 824 char protocol[64] = "";
867 int err; 825 struct pending_req *req, *n;
826 int err, i, j;
868 827
869 pr_debug("%s %s\n", __func__, dev->otherend); 828 pr_debug("%s %s\n", __func__, dev->otherend);
870 829
@@ -905,6 +864,24 @@ static int connect_ring(struct backend_info *be)
905 ring_ref, evtchn, be->blkif->blk_protocol, protocol, 864 ring_ref, evtchn, be->blkif->blk_protocol, protocol,
906 pers_grants ? "persistent grants" : ""); 865 pers_grants ? "persistent grants" : "");
907 866
867 for (i = 0; i < XEN_BLKIF_REQS_PER_PAGE; i++) {
868 req = kzalloc(sizeof(*req), GFP_KERNEL);
869 if (!req)
870 goto fail;
871 list_add_tail(&req->free_list, &be->blkif->pending_free);
872 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
873 req->segments[j] = kzalloc(sizeof(*req->segments[0]), GFP_KERNEL);
874 if (!req->segments[j])
875 goto fail;
876 }
877 for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
878 req->indirect_pages[j] = kzalloc(sizeof(*req->indirect_pages[0]),
879 GFP_KERNEL);
880 if (!req->indirect_pages[j])
881 goto fail;
882 }
883 }
884
908 /* Map the shared frame, irq etc. */ 885 /* Map the shared frame, irq etc. */
909 err = xen_blkif_map(be->blkif, ring_ref, evtchn); 886 err = xen_blkif_map(be->blkif, ring_ref, evtchn);
910 if (err) { 887 if (err) {
@@ -914,6 +891,23 @@ static int connect_ring(struct backend_info *be)
914 } 891 }
915 892
916 return 0; 893 return 0;
894
895fail:
896 list_for_each_entry_safe(req, n, &be->blkif->pending_free, free_list) {
897 list_del(&req->free_list);
898 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
899 if (!req->segments[j])
900 break;
901 kfree(req->segments[j]);
902 }
903 for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
904 if (!req->indirect_pages[j])
905 break;
906 kfree(req->indirect_pages[j]);
907 }
908 kfree(req);
909 }
910 return -ENOMEM;
917} 911}
918 912
919static const struct xenbus_device_id xen_blkbk_ids[] = { 913static const struct xenbus_device_id xen_blkbk_ids[] = {