aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/xen-blkback/xenbus.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/xen-blkback/xenbus.c')
-rw-r--r--drivers/block/xen-blkback/xenbus.c151
1 files changed, 151 insertions, 0 deletions
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index b41ed65db2d3..0c263a248007 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -20,6 +20,8 @@
20#include <stdarg.h> 20#include <stdarg.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/kthread.h> 22#include <linux/kthread.h>
23#include <xen/events.h>
24#include <xen/grant_table.h>
23#include "common.h" 25#include "common.h"
24 26
25#undef DPRINTK 27#undef DPRINTK
@@ -36,6 +38,7 @@ struct backend_info {
36 char *mode; 38 char *mode;
37}; 39};
38 40
41static struct kmem_cache *blkif_cachep;
39static void connect(struct backend_info *); 42static void connect(struct backend_info *);
40static int connect_ring(struct backend_info *); 43static int connect_ring(struct backend_info *);
41static void backend_changed(struct xenbus_watch *, const char **, 44static void backend_changed(struct xenbus_watch *, const char **,
@@ -106,6 +109,154 @@ static void update_blkif_status(struct blkif_st *blkif)
106 } 109 }
107} 110}
108 111
112struct blkif_st *blkif_alloc(domid_t domid)
113{
114 struct blkif_st *blkif;
115
116 blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
117 if (!blkif)
118 return ERR_PTR(-ENOMEM);
119
120 memset(blkif, 0, sizeof(*blkif));
121 blkif->domid = domid;
122 spin_lock_init(&blkif->blk_ring_lock);
123 atomic_set(&blkif->refcnt, 1);
124 init_waitqueue_head(&blkif->wq);
125 blkif->st_print = jiffies;
126 init_waitqueue_head(&blkif->waiting_to_free);
127
128 return blkif;
129}
130
131static int map_frontend_page(struct blkif_st *blkif, unsigned long shared_page)
132{
133 struct gnttab_map_grant_ref op;
134
135 gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
136 GNTMAP_host_map, shared_page, blkif->domid);
137
138 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
139 BUG();
140
141 if (op.status) {
142 DPRINTK(" Grant table operation failure !\n");
143 return op.status;
144 }
145
146 blkif->shmem_ref = shared_page;
147 blkif->shmem_handle = op.handle;
148
149 return 0;
150}
151
152static void unmap_frontend_page(struct blkif_st *blkif)
153{
154 struct gnttab_unmap_grant_ref op;
155
156 gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
157 GNTMAP_host_map, blkif->shmem_handle);
158
159 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
160 BUG();
161}
162
163int blkif_map(struct blkif_st *blkif, unsigned long shared_page,
164 unsigned int evtchn)
165{
166 int err;
167
168 /* Already connected through? */
169 if (blkif->irq)
170 return 0;
171
172 blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE);
173 if (!blkif->blk_ring_area)
174 return -ENOMEM;
175
176 err = map_frontend_page(blkif, shared_page);
177 if (err) {
178 free_vm_area(blkif->blk_ring_area);
179 return err;
180 }
181
182 switch (blkif->blk_protocol) {
183 case BLKIF_PROTOCOL_NATIVE:
184 {
185 struct blkif_sring *sring;
186 sring = (struct blkif_sring *)blkif->blk_ring_area->addr;
187 BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
188 break;
189 }
190 case BLKIF_PROTOCOL_X86_32:
191 {
192 struct blkif_x86_32_sring *sring_x86_32;
193 sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring_area->addr;
194 BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
195 break;
196 }
197 case BLKIF_PROTOCOL_X86_64:
198 {
199 struct blkif_x86_64_sring *sring_x86_64;
200 sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring_area->addr;
201 BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
202 break;
203 }
204 default:
205 BUG();
206 }
207
208 err = bind_interdomain_evtchn_to_irqhandler(
209 blkif->domid, evtchn, blkif_be_int, 0, "blkif-backend", blkif);
210 if (err < 0) {
211 unmap_frontend_page(blkif);
212 free_vm_area(blkif->blk_ring_area);
213 blkif->blk_rings.common.sring = NULL;
214 return err;
215 }
216 blkif->irq = err;
217
218 return 0;
219}
220
221void blkif_disconnect(struct blkif_st *blkif)
222{
223 if (blkif->xenblkd) {
224 kthread_stop(blkif->xenblkd);
225 blkif->xenblkd = NULL;
226 }
227
228 atomic_dec(&blkif->refcnt);
229 wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
230 atomic_inc(&blkif->refcnt);
231
232 if (blkif->irq) {
233 unbind_from_irqhandler(blkif->irq, blkif);
234 blkif->irq = 0;
235 }
236
237 if (blkif->blk_rings.common.sring) {
238 unmap_frontend_page(blkif);
239 free_vm_area(blkif->blk_ring_area);
240 blkif->blk_rings.common.sring = NULL;
241 }
242}
243
244void blkif_free(struct blkif_st *blkif)
245{
246 if (!atomic_dec_and_test(&blkif->refcnt))
247 BUG();
248 kmem_cache_free(blkif_cachep, blkif);
249}
250
251int __init blkif_interface_init(void)
252{
253 blkif_cachep = kmem_cache_create("blkif_cache", sizeof(struct blkif_st),
254 0, 0, NULL);
255 if (!blkif_cachep)
256 return -ENOMEM;
257
258 return 0;
259}
109 260
110/* 261/*
111 * sysfs interface for VBD I/O requests 262 * sysfs interface for VBD I/O requests