aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/xen-blkback/xenbus.c
diff options
context:
space:
mode:
authorRoger Pau Monne <roger.pau@citrix.com>2013-05-02 04:21:17 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2013-05-07 09:42:17 -0400
commitbb642e8315fd573795e8b6fa9b9629064d73add1 (patch)
tree47ee834fb5a80a77129cb8bf0607ce4db9c04a55 /drivers/block/xen-blkback/xenbus.c
parent402b27f9f2c22309d5bb285628765bc27b82fcf5 (diff)
xen-blkback: allocate list of pending reqs in small chunks
Allocate pending requests in smaller chunks instead of allocating them all at the same time. This change also removes the global array of pending_reqs, it is no longer necessay. Variables related to the grant mapping have been grouped into a struct called "grant_page", this allows to allocate them in smaller chunks, and also improves memory locality. Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> Reported-by: Sander Eikelenboom <linux@eikelenboom.it> Tested-by: Sander Eikelenboom <linux@eikelenboom.it> Reviewed-by: David Vrabel <david.vrabel@citrix.com> Cc: David Vrabel <david.vrabel@citrix.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/block/xen-blkback/xenbus.c')
-rw-r--r--drivers/block/xen-blkback/xenbus.c74
1 files changed, 57 insertions, 17 deletions
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index afab208c54e3..4a4749c78942 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -105,7 +105,8 @@ static void xen_update_blkif_status(struct xen_blkif *blkif)
105static struct xen_blkif *xen_blkif_alloc(domid_t domid) 105static struct xen_blkif *xen_blkif_alloc(domid_t domid)
106{ 106{
107 struct xen_blkif *blkif; 107 struct xen_blkif *blkif;
108 int i; 108 struct pending_req *req, *n;
109 int i, j;
109 110
110 BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST); 111 BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
111 112
@@ -127,22 +128,51 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
127 blkif->free_pages_num = 0; 128 blkif->free_pages_num = 0;
128 atomic_set(&blkif->persistent_gnt_in_use, 0); 129 atomic_set(&blkif->persistent_gnt_in_use, 0);
129 130
130 blkif->pending_reqs = kcalloc(XEN_BLKIF_REQS,
131 sizeof(blkif->pending_reqs[0]),
132 GFP_KERNEL);
133 if (!blkif->pending_reqs) {
134 kmem_cache_free(xen_blkif_cachep, blkif);
135 return ERR_PTR(-ENOMEM);
136 }
137 INIT_LIST_HEAD(&blkif->pending_free); 131 INIT_LIST_HEAD(&blkif->pending_free);
132
133 for (i = 0; i < XEN_BLKIF_REQS; i++) {
134 req = kzalloc(sizeof(*req), GFP_KERNEL);
135 if (!req)
136 goto fail;
137 list_add_tail(&req->free_list,
138 &blkif->pending_free);
139 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
140 req->segments[j] = kzalloc(sizeof(*req->segments[0]),
141 GFP_KERNEL);
142 if (!req->segments[j])
143 goto fail;
144 }
145 for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
146 req->indirect_pages[j] = kzalloc(sizeof(*req->indirect_pages[0]),
147 GFP_KERNEL);
148 if (!req->indirect_pages[j])
149 goto fail;
150 }
151 }
138 spin_lock_init(&blkif->pending_free_lock); 152 spin_lock_init(&blkif->pending_free_lock);
139 init_waitqueue_head(&blkif->pending_free_wq); 153 init_waitqueue_head(&blkif->pending_free_wq);
140 154
141 for (i = 0; i < XEN_BLKIF_REQS; i++)
142 list_add_tail(&blkif->pending_reqs[i].free_list,
143 &blkif->pending_free);
144
145 return blkif; 155 return blkif;
156
157fail:
158 list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
159 list_del(&req->free_list);
160 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
161 if (!req->segments[j])
162 break;
163 kfree(req->segments[j]);
164 }
165 for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
166 if (!req->indirect_pages[j])
167 break;
168 kfree(req->indirect_pages[j]);
169 }
170 kfree(req);
171 }
172
173 kmem_cache_free(xen_blkif_cachep, blkif);
174
175 return ERR_PTR(-ENOMEM);
146} 176}
147 177
148static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page, 178static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
@@ -221,18 +251,28 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif)
221 251
222static void xen_blkif_free(struct xen_blkif *blkif) 252static void xen_blkif_free(struct xen_blkif *blkif)
223{ 253{
224 struct pending_req *req; 254 struct pending_req *req, *n;
225 int i = 0; 255 int i = 0, j;
226 256
227 if (!atomic_dec_and_test(&blkif->refcnt)) 257 if (!atomic_dec_and_test(&blkif->refcnt))
228 BUG(); 258 BUG();
229 259
230 /* Check that there is no request in use */ 260 /* Check that there is no request in use */
231 list_for_each_entry(req, &blkif->pending_free, free_list) 261 list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
262 list_del(&req->free_list);
263
264 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
265 kfree(req->segments[j]);
266
267 for (j = 0; j < MAX_INDIRECT_PAGES; j++)
268 kfree(req->indirect_pages[j]);
269
270 kfree(req);
232 i++; 271 i++;
233 BUG_ON(i != XEN_BLKIF_REQS); 272 }
273
274 WARN_ON(i != XEN_BLKIF_REQS);
234 275
235 kfree(blkif->pending_reqs);
236 kmem_cache_free(xen_blkif_cachep, blkif); 276 kmem_cache_free(xen_blkif_cachep, blkif);
237} 277}
238 278