aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/lguest/io.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/lguest/io.c')
-rw-r--r--drivers/lguest/io.c265
1 files changed, 246 insertions, 19 deletions
diff --git a/drivers/lguest/io.c b/drivers/lguest/io.c
index c8eb79266991..ea68613b43f6 100644
--- a/drivers/lguest/io.c
+++ b/drivers/lguest/io.c
@@ -1,5 +1,9 @@
1/* Simple I/O model for guests, based on shared memory. 1/*P:300 The I/O mechanism in lguest is simple yet flexible, allowing the Guest
2 * Copyright (C) 2006 Rusty Russell IBM Corporation 2 * to talk to the Launcher or directly to another Guest. It uses familiar
3 * concepts of DMA and interrupts, plus some neat code stolen from
4 * futexes... :*/
5
6/* Copyright (C) 2006 Rusty Russell IBM Corporation
3 * 7 *
4 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -23,8 +27,36 @@
23#include <linux/uaccess.h> 27#include <linux/uaccess.h>
24#include "lg.h" 28#include "lg.h"
25 29
30/*L:300
31 * I/O
32 *
33 * Getting data in and out of the Guest is quite an art. There are numerous
34 * ways to do it, and they all suck differently. We try to keep things fairly
35 * close to "real" hardware so our Guest's drivers don't look like an alien
36 * visitation in the middle of the Linux code, and yet make sure that Guests
37 * can talk directly to other Guests, not just the Launcher.
38 *
39 * To do this, the Guest gives us a key when it binds or sends DMA buffers.
40 * The key corresponds to a "physical" address inside the Guest (ie. a virtual
41 * address inside the Launcher process). We don't, however, use this key
42 * directly.
43 *
44 * We want Guests which share memory to be able to DMA to each other: two
45 * Launchers can mmap memory the same file, then the Guests can communicate.
46 * Fortunately, the futex code provides us with a way to get a "union
47 * futex_key" corresponding to the memory lying at a virtual address: if the
48 * two processes share memory, the "union futex_key" for that memory will match
49 * even if the memory is mapped at different addresses in each. So we always
50 * convert the keys to "union futex_key"s to compare them.
51 *
52 * Before we dive into this though, we need to look at another set of helper
53 * routines used throughout the Host kernel code to access Guest memory.
54 :*/
26static struct list_head dma_hash[61]; 55static struct list_head dma_hash[61];
27 56
57/* An unfortunate side effect of the Linux double-linked list implementation is
58 * that there's no good way to statically initialize an array of linked
59 * lists. */
28void lguest_io_init(void) 60void lguest_io_init(void)
29{ 61{
30 unsigned int i; 62 unsigned int i;
@@ -56,6 +88,19 @@ kill:
56 return 0; 88 return 0;
57} 89}
58 90
91/*L:330 This is our hash function, using the wonderful Jenkins hash.
92 *
93 * The futex key is a union with three parts: an unsigned long word, a pointer,
94 * and an int "offset". We could use jhash_2words() which takes three u32s.
95 * (Ok, the hash functions are great: the naming sucks though).
96 *
97 * It's nice to be portable to 64-bit platforms, so we use the more generic
98 * jhash2(), which takes an array of u32, the number of u32s, and an initial
99 * u32 to roll in. This is uglier, but breaks down to almost the same code on
100 * 32-bit platforms like this one.
101 *
102 * We want a position in the array, so we modulo ARRAY_SIZE(dma_hash) (ie. 61).
103 */
59static unsigned int hash(const union futex_key *key) 104static unsigned int hash(const union futex_key *key)
60{ 105{
61 return jhash2((u32*)&key->both.word, 106 return jhash2((u32*)&key->both.word,
@@ -64,6 +109,9 @@ static unsigned int hash(const union futex_key *key)
64 % ARRAY_SIZE(dma_hash); 109 % ARRAY_SIZE(dma_hash);
65} 110}
66 111
112/* This is a convenience routine to compare two keys. It's a much bemoaned C
113 * weakness that it doesn't allow '==' on structures or unions, so we have to
114 * open-code it like this. */
67static inline int key_eq(const union futex_key *a, const union futex_key *b) 115static inline int key_eq(const union futex_key *a, const union futex_key *b)
68{ 116{
69 return (a->both.word == b->both.word 117 return (a->both.word == b->both.word
@@ -71,22 +119,36 @@ static inline int key_eq(const union futex_key *a, const union futex_key *b)
71 && a->both.offset == b->both.offset); 119 && a->both.offset == b->both.offset);
72} 120}
73 121
74/* Must hold read lock on dmainfo owner's current->mm->mmap_sem */ 122/*L:360 OK, when we need to actually free up a Guest's DMA array we do several
123 * things, so we have a convenient function to do it.
124 *
125 * The caller must hold a read lock on dmainfo owner's current->mm->mmap_sem
126 * for the drop_futex_key_refs(). */
75static void unlink_dma(struct lguest_dma_info *dmainfo) 127static void unlink_dma(struct lguest_dma_info *dmainfo)
76{ 128{
129 /* You locked this too, right? */
77 BUG_ON(!mutex_is_locked(&lguest_lock)); 130 BUG_ON(!mutex_is_locked(&lguest_lock));
131 /* This is how we know that the entry is free. */
78 dmainfo->interrupt = 0; 132 dmainfo->interrupt = 0;
133 /* Remove it from the hash table. */
79 list_del(&dmainfo->list); 134 list_del(&dmainfo->list);
135 /* Drop the references we were holding (to the inode or mm). */
80 drop_futex_key_refs(&dmainfo->key); 136 drop_futex_key_refs(&dmainfo->key);
81} 137}
82 138
139/*L:350 This is the routine which we call when the Guest asks to unregister a
140 * DMA array attached to a given key. Returns true if the array was found. */
83static int unbind_dma(struct lguest *lg, 141static int unbind_dma(struct lguest *lg,
84 const union futex_key *key, 142 const union futex_key *key,
85 unsigned long dmas) 143 unsigned long dmas)
86{ 144{
87 int i, ret = 0; 145 int i, ret = 0;
88 146
147 /* We don't bother with the hash table, just look through all this
148 * Guest's DMA arrays. */
89 for (i = 0; i < LGUEST_MAX_DMA; i++) { 149 for (i = 0; i < LGUEST_MAX_DMA; i++) {
150 /* In theory it could have more than one array on the same key,
151 * or one array on multiple keys, so we check both */
90 if (key_eq(key, &lg->dma[i].key) && dmas == lg->dma[i].dmas) { 152 if (key_eq(key, &lg->dma[i].key) && dmas == lg->dma[i].dmas) {
91 unlink_dma(&lg->dma[i]); 153 unlink_dma(&lg->dma[i]);
92 ret = 1; 154 ret = 1;
@@ -96,51 +158,91 @@ static int unbind_dma(struct lguest *lg,
96 return ret; 158 return ret;
97} 159}
98 160
161/*L:340 BIND_DMA: this is the hypercall which sets up an array of "struct
162 * lguest_dma" for receiving I/O.
163 *
164 * The Guest wants to bind an array of "struct lguest_dma"s to a particular key
165 * to receive input. This only happens when the Guest is setting up a new
166 * device, so it doesn't have to be very fast.
167 *
168 * It returns 1 on a successful registration (it can fail if we hit the limit
169 * of registrations for this Guest).
170 */
99int bind_dma(struct lguest *lg, 171int bind_dma(struct lguest *lg,
100 unsigned long ukey, unsigned long dmas, u16 numdmas, u8 interrupt) 172 unsigned long ukey, unsigned long dmas, u16 numdmas, u8 interrupt)
101{ 173{
102 unsigned int i; 174 unsigned int i;
103 int ret = 0; 175 int ret = 0;
104 union futex_key key; 176 union futex_key key;
177 /* Futex code needs the mmap_sem. */
105 struct rw_semaphore *fshared = &current->mm->mmap_sem; 178 struct rw_semaphore *fshared = &current->mm->mmap_sem;
106 179
180 /* Invalid interrupt? (We could kill the guest here). */
107 if (interrupt >= LGUEST_IRQS) 181 if (interrupt >= LGUEST_IRQS)
108 return 0; 182 return 0;
109 183
184 /* We need to grab the Big Lguest Lock, because other Guests may be
185 * trying to look through this Guest's DMAs to send something while
186 * we're doing this. */
110 mutex_lock(&lguest_lock); 187 mutex_lock(&lguest_lock);
111 down_read(fshared); 188 down_read(fshared);
112 if (get_futex_key((u32 __user *)ukey, fshared, &key) != 0) { 189 if (get_futex_key((u32 __user *)ukey, fshared, &key) != 0) {
113 kill_guest(lg, "bad dma key %#lx", ukey); 190 kill_guest(lg, "bad dma key %#lx", ukey);
114 goto unlock; 191 goto unlock;
115 } 192 }
193
194 /* We want to keep this key valid once we drop mmap_sem, so we have to
195 * hold a reference. */
116 get_futex_key_refs(&key); 196 get_futex_key_refs(&key);
117 197
198 /* If the Guest specified an interrupt of 0, that means they want to
199 * unregister this array of "struct lguest_dma"s. */
118 if (interrupt == 0) 200 if (interrupt == 0)
119 ret = unbind_dma(lg, &key, dmas); 201 ret = unbind_dma(lg, &key, dmas);
120 else { 202 else {
203 /* Look through this Guest's dma array for an unused entry. */
121 for (i = 0; i < LGUEST_MAX_DMA; i++) { 204 for (i = 0; i < LGUEST_MAX_DMA; i++) {
205 /* If the interrupt is non-zero, the entry is already
206 * used. */
122 if (lg->dma[i].interrupt) 207 if (lg->dma[i].interrupt)
123 continue; 208 continue;
124 209
210 /* OK, a free one! Fill on our details. */
125 lg->dma[i].dmas = dmas; 211 lg->dma[i].dmas = dmas;
126 lg->dma[i].num_dmas = numdmas; 212 lg->dma[i].num_dmas = numdmas;
127 lg->dma[i].next_dma = 0; 213 lg->dma[i].next_dma = 0;
128 lg->dma[i].key = key; 214 lg->dma[i].key = key;
129 lg->dma[i].guestid = lg->guestid; 215 lg->dma[i].guestid = lg->guestid;
130 lg->dma[i].interrupt = interrupt; 216 lg->dma[i].interrupt = interrupt;
217
218 /* Now we add it to the hash table: the position
219 * depends on the futex key that we got. */
131 list_add(&lg->dma[i].list, &dma_hash[hash(&key)]); 220 list_add(&lg->dma[i].list, &dma_hash[hash(&key)]);
221 /* Success! */
132 ret = 1; 222 ret = 1;
133 goto unlock; 223 goto unlock;
134 } 224 }
135 } 225 }
226 /* If we didn't find a slot to put the key in, drop the reference
227 * again. */
136 drop_futex_key_refs(&key); 228 drop_futex_key_refs(&key);
137unlock: 229unlock:
230 /* Unlock and out. */
138 up_read(fshared); 231 up_read(fshared);
139 mutex_unlock(&lguest_lock); 232 mutex_unlock(&lguest_lock);
140 return ret; 233 return ret;
141} 234}
142 235
143/* lgread from another guest */ 236/*L:385 Note that our routines to access a different Guest's memory are called
237 * lgread_other() and lgwrite_other(): these names emphasize that they are only
238 * used when the Guest is *not* the current Guest.
239 *
240 * The interface for copying from another process's memory is called
241 * access_process_vm(), with a final argument of 0 for a read, and 1 for a
242 * write.
243 *
244 * We need lgread_other() to read the destination Guest's "struct lguest_dma"
245 * array. */
144static int lgread_other(struct lguest *lg, 246static int lgread_other(struct lguest *lg,
145 void *buf, u32 addr, unsigned bytes) 247 void *buf, u32 addr, unsigned bytes)
146{ 248{
@@ -153,7 +255,8 @@ static int lgread_other(struct lguest *lg,
153 return 1; 255 return 1;
154} 256}
155 257
156/* lgwrite to another guest */ 258/* "lgwrite()" to another Guest: used to update the destination "used_len" once
259 * we've transferred data into the buffer. */
157static int lgwrite_other(struct lguest *lg, u32 addr, 260static int lgwrite_other(struct lguest *lg, u32 addr,
158 const void *buf, unsigned bytes) 261 const void *buf, unsigned bytes)
159{ 262{
@@ -166,6 +269,15 @@ static int lgwrite_other(struct lguest *lg, u32 addr,
166 return 1; 269 return 1;
167} 270}
168 271
272/*L:400 This is the generic engine which copies from a source "struct
273 * lguest_dma" from this Guest into another Guest's "struct lguest_dma". The
274 * destination Guest's pages have already been mapped, as contained in the
275 * pages array.
276 *
277 * If you're wondering if there's a nice "copy from one process to another"
278 * routine, so was I. But Linux isn't really set up to copy between two
279 * unrelated processes, so we have to write it ourselves.
280 */
169static u32 copy_data(struct lguest *srclg, 281static u32 copy_data(struct lguest *srclg,
170 const struct lguest_dma *src, 282 const struct lguest_dma *src,
171 const struct lguest_dma *dst, 283 const struct lguest_dma *dst,
@@ -174,33 +286,59 @@ static u32 copy_data(struct lguest *srclg,
174 unsigned int totlen, si, di, srcoff, dstoff; 286 unsigned int totlen, si, di, srcoff, dstoff;
175 void *maddr = NULL; 287 void *maddr = NULL;
176 288
289 /* We return the total length transferred. */
177 totlen = 0; 290 totlen = 0;
291
292 /* We keep indexes into the source and destination "struct lguest_dma",
293 * and an offset within each region. */
178 si = di = 0; 294 si = di = 0;
179 srcoff = dstoff = 0; 295 srcoff = dstoff = 0;
296
297 /* We loop until the source or destination is exhausted. */
180 while (si < LGUEST_MAX_DMA_SECTIONS && src->len[si] 298 while (si < LGUEST_MAX_DMA_SECTIONS && src->len[si]
181 && di < LGUEST_MAX_DMA_SECTIONS && dst->len[di]) { 299 && di < LGUEST_MAX_DMA_SECTIONS && dst->len[di]) {
300 /* We can only transfer the rest of the src buffer, or as much
301 * as will fit into the destination buffer. */
182 u32 len = min(src->len[si] - srcoff, dst->len[di] - dstoff); 302 u32 len = min(src->len[si] - srcoff, dst->len[di] - dstoff);
183 303
304 /* For systems using "highmem" we need to use kmap() to access
305 * the page we want. We often use the same page over and over,
306 * so rather than kmap() it on every loop, we set the maddr
307 * pointer to NULL when we need to move to the next
308 * destination page. */
184 if (!maddr) 309 if (!maddr)
185 maddr = kmap(pages[di]); 310 maddr = kmap(pages[di]);
186 311
187 /* FIXME: This is not completely portable, since 312 /* Copy directly from (this Guest's) source address to the
188 archs do different things for copy_to_user_page. */ 313 * destination Guest's kmap()ed buffer. Note that maddr points
314 * to the start of the page: we need to add the offset of the
315 * destination address and offset within the buffer. */
316
317 /* FIXME: This is not completely portable. I looked at
318 * copy_to_user_page(), and some arch's seem to need special
319 * flushes. x86 is fine. */
189 if (copy_from_user(maddr + (dst->addr[di] + dstoff)%PAGE_SIZE, 320 if (copy_from_user(maddr + (dst->addr[di] + dstoff)%PAGE_SIZE,
190 (void __user *)src->addr[si], len) != 0) { 321 (void __user *)src->addr[si], len) != 0) {
322 /* If a copy failed, it's the source's fault. */
191 kill_guest(srclg, "bad address in sending DMA"); 323 kill_guest(srclg, "bad address in sending DMA");
192 totlen = 0; 324 totlen = 0;
193 break; 325 break;
194 } 326 }
195 327
328 /* Increment the total and src & dst offsets */
196 totlen += len; 329 totlen += len;
197 srcoff += len; 330 srcoff += len;
198 dstoff += len; 331 dstoff += len;
332
333 /* Presumably we reached the end of the src or dest buffers: */
199 if (srcoff == src->len[si]) { 334 if (srcoff == src->len[si]) {
335 /* Move to the next buffer at offset 0 */
200 si++; 336 si++;
201 srcoff = 0; 337 srcoff = 0;
202 } 338 }
203 if (dstoff == dst->len[di]) { 339 if (dstoff == dst->len[di]) {
340 /* We need to unmap that destination page and reset
341 * maddr ready for the next one. */
204 kunmap(pages[di]); 342 kunmap(pages[di]);
205 maddr = NULL; 343 maddr = NULL;
206 di++; 344 di++;
@@ -208,13 +346,15 @@ static u32 copy_data(struct lguest *srclg,
208 } 346 }
209 } 347 }
210 348
349 /* If we still had a page mapped at the end, unmap now. */
211 if (maddr) 350 if (maddr)
212 kunmap(pages[di]); 351 kunmap(pages[di]);
213 352
214 return totlen; 353 return totlen;
215} 354}
216 355
217/* Src is us, ie. current. */ 356/*L:390 This is how we transfer a "struct lguest_dma" from the source Guest
357 * (the current Guest which called SEND_DMA) to another Guest. */
218static u32 do_dma(struct lguest *srclg, const struct lguest_dma *src, 358static u32 do_dma(struct lguest *srclg, const struct lguest_dma *src,
219 struct lguest *dstlg, const struct lguest_dma *dst) 359 struct lguest *dstlg, const struct lguest_dma *dst)
220{ 360{
@@ -222,23 +362,31 @@ static u32 do_dma(struct lguest *srclg, const struct lguest_dma *src,
222 u32 ret; 362 u32 ret;
223 struct page *pages[LGUEST_MAX_DMA_SECTIONS]; 363 struct page *pages[LGUEST_MAX_DMA_SECTIONS];
224 364
365 /* We check that both source and destination "struct lguest_dma"s are
366 * within the bounds of the source and destination Guests */
225 if (!check_dma_list(dstlg, dst) || !check_dma_list(srclg, src)) 367 if (!check_dma_list(dstlg, dst) || !check_dma_list(srclg, src))
226 return 0; 368 return 0;
227 369
228 /* First get the destination pages */ 370 /* We need to map the pages which correspond to each parts of
371 * destination buffer. */
229 for (i = 0; i < LGUEST_MAX_DMA_SECTIONS; i++) { 372 for (i = 0; i < LGUEST_MAX_DMA_SECTIONS; i++) {
230 if (dst->len[i] == 0) 373 if (dst->len[i] == 0)
231 break; 374 break;
375 /* get_user_pages() is a complicated function, especially since
376 * we only want a single page. But it works, and returns the
377 * number of pages. Note that we're holding the destination's
378 * mmap_sem, as get_user_pages() requires. */
232 if (get_user_pages(dstlg->tsk, dstlg->mm, 379 if (get_user_pages(dstlg->tsk, dstlg->mm,
233 dst->addr[i], 1, 1, 1, pages+i, NULL) 380 dst->addr[i], 1, 1, 1, pages+i, NULL)
234 != 1) { 381 != 1) {
382 /* This means the destination gave us a bogus buffer */
235 kill_guest(dstlg, "Error mapping DMA pages"); 383 kill_guest(dstlg, "Error mapping DMA pages");
236 ret = 0; 384 ret = 0;
237 goto drop_pages; 385 goto drop_pages;
238 } 386 }
239 } 387 }
240 388
241 /* Now copy until we run out of src or dst. */ 389 /* Now copy the data until we run out of src or dst. */
242 ret = copy_data(srclg, src, dst, pages); 390 ret = copy_data(srclg, src, dst, pages);
243 391
244drop_pages: 392drop_pages:
@@ -247,6 +395,11 @@ drop_pages:
247 return ret; 395 return ret;
248} 396}
249 397
398/*L:380 Transferring data from one Guest to another is not as simple as I'd
399 * like. We've found the "struct lguest_dma_info" bound to the same address as
400 * the send, we need to copy into it.
401 *
402 * This function returns true if the destination array was empty. */
250static int dma_transfer(struct lguest *srclg, 403static int dma_transfer(struct lguest *srclg,
251 unsigned long udma, 404 unsigned long udma,
252 struct lguest_dma_info *dst) 405 struct lguest_dma_info *dst)
@@ -255,15 +408,23 @@ static int dma_transfer(struct lguest *srclg,
255 struct lguest *dstlg; 408 struct lguest *dstlg;
256 u32 i, dma = 0; 409 u32 i, dma = 0;
257 410
411 /* From the "struct lguest_dma_info" we found in the hash, grab the
412 * Guest. */
258 dstlg = &lguests[dst->guestid]; 413 dstlg = &lguests[dst->guestid];
259 /* Get our dma list. */ 414 /* Read in the source "struct lguest_dma" handed to SEND_DMA. */
260 lgread(srclg, &src_dma, udma, sizeof(src_dma)); 415 lgread(srclg, &src_dma, udma, sizeof(src_dma));
261 416
262 /* We can't deadlock against them dmaing to us, because this 417 /* We need the destination's mmap_sem, and we already hold the source's
263 * is all under the lguest_lock. */ 418 * mmap_sem for the futex key lookup. Normally this would suggest that
419 * we could deadlock if the destination Guest was trying to send to
420 * this source Guest at the same time, which is another reason that all
421 * I/O is done under the big lguest_lock. */
264 down_read(&dstlg->mm->mmap_sem); 422 down_read(&dstlg->mm->mmap_sem);
265 423
424 /* Look through the destination DMA array for an available buffer. */
266 for (i = 0; i < dst->num_dmas; i++) { 425 for (i = 0; i < dst->num_dmas; i++) {
426 /* We keep a "next_dma" pointer which often helps us avoid
427 * looking at lots of previously-filled entries. */
267 dma = (dst->next_dma + i) % dst->num_dmas; 428 dma = (dst->next_dma + i) % dst->num_dmas;
268 if (!lgread_other(dstlg, &dst_dma, 429 if (!lgread_other(dstlg, &dst_dma,
269 dst->dmas + dma * sizeof(struct lguest_dma), 430 dst->dmas + dma * sizeof(struct lguest_dma),
@@ -273,30 +434,46 @@ static int dma_transfer(struct lguest *srclg,
273 if (!dst_dma.used_len) 434 if (!dst_dma.used_len)
274 break; 435 break;
275 } 436 }
437
438 /* If we found a buffer, we do the actual data copy. */
276 if (i != dst->num_dmas) { 439 if (i != dst->num_dmas) {
277 unsigned long used_lenp; 440 unsigned long used_lenp;
278 unsigned int ret; 441 unsigned int ret;
279 442
280 ret = do_dma(srclg, &src_dma, dstlg, &dst_dma); 443 ret = do_dma(srclg, &src_dma, dstlg, &dst_dma);
281 /* Put used length in src. */ 444 /* Put used length in the source "struct lguest_dma"'s used_len
445 * field. It's a little tricky to figure out where that is,
446 * though. */
282 lgwrite_u32(srclg, 447 lgwrite_u32(srclg,
283 udma+offsetof(struct lguest_dma, used_len), ret); 448 udma+offsetof(struct lguest_dma, used_len), ret);
449 /* Tranferring 0 bytes is OK if the source buffer was empty. */
284 if (ret == 0 && src_dma.len[0] != 0) 450 if (ret == 0 && src_dma.len[0] != 0)
285 goto fail; 451 goto fail;
286 452
287 /* Make sure destination sees contents before length. */ 453 /* The destination Guest might be running on a different CPU:
454 * we have to make sure that it will see the "used_len" field
455 * change to non-zero *after* it sees the data we copied into
456 * the buffer. Hence a write memory barrier. */
288 wmb(); 457 wmb();
458 /* Figuring out where the destination's used_len field for this
459 * "struct lguest_dma" in the array is also a little ugly. */
289 used_lenp = dst->dmas 460 used_lenp = dst->dmas
290 + dma * sizeof(struct lguest_dma) 461 + dma * sizeof(struct lguest_dma)
291 + offsetof(struct lguest_dma, used_len); 462 + offsetof(struct lguest_dma, used_len);
292 lgwrite_other(dstlg, used_lenp, &ret, sizeof(ret)); 463 lgwrite_other(dstlg, used_lenp, &ret, sizeof(ret));
464 /* Move the cursor for next time. */
293 dst->next_dma++; 465 dst->next_dma++;
294 } 466 }
295 up_read(&dstlg->mm->mmap_sem); 467 up_read(&dstlg->mm->mmap_sem);
296 468
297 /* Do this last so dst doesn't simply sleep on lock. */ 469 /* We trigger the destination interrupt, even if the destination was
470 * empty and we didn't transfer anything: this gives them a chance to
471 * wake up and refill. */
298 set_bit(dst->interrupt, dstlg->irqs_pending); 472 set_bit(dst->interrupt, dstlg->irqs_pending);
473 /* Wake up the destination process. */
299 wake_up_process(dstlg->tsk); 474 wake_up_process(dstlg->tsk);
475 /* If we passed the last "struct lguest_dma", the receive had no
476 * buffers left. */
300 return i == dst->num_dmas; 477 return i == dst->num_dmas;
301 478
302fail: 479fail:
@@ -304,6 +481,8 @@ fail:
304 return 0; 481 return 0;
305} 482}
306 483
484/*L:370 This is the counter-side to the BIND_DMA hypercall; the SEND_DMA
485 * hypercall. We find out who's listening, and send to them. */
307void send_dma(struct lguest *lg, unsigned long ukey, unsigned long udma) 486void send_dma(struct lguest *lg, unsigned long ukey, unsigned long udma)
308{ 487{
309 union futex_key key; 488 union futex_key key;
@@ -313,31 +492,43 @@ void send_dma(struct lguest *lg, unsigned long ukey, unsigned long udma)
313again: 492again:
314 mutex_lock(&lguest_lock); 493 mutex_lock(&lguest_lock);
315 down_read(fshared); 494 down_read(fshared);
495 /* Get the futex key for the key the Guest gave us */
316 if (get_futex_key((u32 __user *)ukey, fshared, &key) != 0) { 496 if (get_futex_key((u32 __user *)ukey, fshared, &key) != 0) {
317 kill_guest(lg, "bad sending DMA key"); 497 kill_guest(lg, "bad sending DMA key");
318 goto unlock; 498 goto unlock;
319 } 499 }
320 /* Shared mapping? Look for other guests... */ 500 /* Since the key must be a multiple of 4, the futex key uses the lower
501 * bit of the "offset" field (which would always be 0) to indicate a
502 * mapping which is shared with other processes (ie. Guests). */
321 if (key.shared.offset & 1) { 503 if (key.shared.offset & 1) {
322 struct lguest_dma_info *i; 504 struct lguest_dma_info *i;
505 /* Look through the hash for other Guests. */
323 list_for_each_entry(i, &dma_hash[hash(&key)], list) { 506 list_for_each_entry(i, &dma_hash[hash(&key)], list) {
507 /* Don't send to ourselves. */
324 if (i->guestid == lg->guestid) 508 if (i->guestid == lg->guestid)
325 continue; 509 continue;
326 if (!key_eq(&key, &i->key)) 510 if (!key_eq(&key, &i->key))
327 continue; 511 continue;
328 512
513 /* If dma_transfer() tells us the destination has no
514 * available buffers, we increment "empty". */
329 empty += dma_transfer(lg, udma, i); 515 empty += dma_transfer(lg, udma, i);
330 break; 516 break;
331 } 517 }
518 /* If the destination is empty, we release our locks and
519 * give the destination Guest a brief chance to restock. */
332 if (empty == 1) { 520 if (empty == 1) {
333 /* Give any recipients one chance to restock. */ 521 /* Give any recipients one chance to restock. */
334 up_read(&current->mm->mmap_sem); 522 up_read(&current->mm->mmap_sem);
335 mutex_unlock(&lguest_lock); 523 mutex_unlock(&lguest_lock);
524 /* Next time, we won't try again. */
336 empty++; 525 empty++;
337 goto again; 526 goto again;
338 } 527 }
339 } else { 528 } else {
340 /* Private mapping: tell our userspace. */ 529 /* Private mapping: Guest is sending to its Launcher. We set
530 * the "dma_is_pending" flag so that the main loop will exit
531 * and the Launcher's read() from /dev/lguest will return. */
341 lg->dma_is_pending = 1; 532 lg->dma_is_pending = 1;
342 lg->pending_dma = udma; 533 lg->pending_dma = udma;
343 lg->pending_key = ukey; 534 lg->pending_key = ukey;
@@ -346,6 +537,7 @@ unlock:
346 up_read(fshared); 537 up_read(fshared);
347 mutex_unlock(&lguest_lock); 538 mutex_unlock(&lguest_lock);
348} 539}
540/*:*/
349 541
350void release_all_dma(struct lguest *lg) 542void release_all_dma(struct lguest *lg)
351{ 543{
@@ -361,7 +553,18 @@ void release_all_dma(struct lguest *lg)
361 up_read(&lg->mm->mmap_sem); 553 up_read(&lg->mm->mmap_sem);
362} 554}
363 555
364/* Userspace wants a dma buffer from this guest. */ 556/*M:007 We only return a single DMA buffer to the Launcher, but it would be
557 * more efficient to return a pointer to the entire array of DMA buffers, which
558 * it can cache and choose one whenever it wants.
559 *
560 * Currently the Launcher uses a write to /dev/lguest, and the return value is
561 * the address of the DMA structure with the interrupt number placed in
562 * dma->used_len. If we wanted to return the entire array, we need to return
563 * the address, array size and interrupt number: this seems to require an
564 * ioctl(). :*/
565
566/*L:320 This routine looks for a DMA buffer registered by the Guest on the
567 * given key (using the BIND_DMA hypercall). */
365unsigned long get_dma_buffer(struct lguest *lg, 568unsigned long get_dma_buffer(struct lguest *lg,
366 unsigned long ukey, unsigned long *interrupt) 569 unsigned long ukey, unsigned long *interrupt)
367{ 570{
@@ -370,15 +573,29 @@ unsigned long get_dma_buffer(struct lguest *lg,
370 struct lguest_dma_info *i; 573 struct lguest_dma_info *i;
371 struct rw_semaphore *fshared = &current->mm->mmap_sem; 574 struct rw_semaphore *fshared = &current->mm->mmap_sem;
372 575
576 /* Take the Big Lguest Lock to stop other Guests sending this Guest DMA
577 * at the same time. */
373 mutex_lock(&lguest_lock); 578 mutex_lock(&lguest_lock);
579 /* To match between Guests sharing the same underlying memory we steal
580 * code from the futex infrastructure. This requires that we hold the
581 * "mmap_sem" for our process (the Launcher), and pass it to the futex
582 * code. */
374 down_read(fshared); 583 down_read(fshared);
584
585 /* This can fail if it's not a valid address, or if the address is not
586 * divisible by 4 (the futex code needs that, we don't really). */
375 if (get_futex_key((u32 __user *)ukey, fshared, &key) != 0) { 587 if (get_futex_key((u32 __user *)ukey, fshared, &key) != 0) {
376 kill_guest(lg, "bad registered DMA buffer"); 588 kill_guest(lg, "bad registered DMA buffer");
377 goto unlock; 589 goto unlock;
378 } 590 }
591 /* Search the hash table for matching entries (the Launcher can only
592 * send to its own Guest for the moment, so the entry must be for this
593 * Guest) */
379 list_for_each_entry(i, &dma_hash[hash(&key)], list) { 594 list_for_each_entry(i, &dma_hash[hash(&key)], list) {
380 if (key_eq(&key, &i->key) && i->guestid == lg->guestid) { 595 if (key_eq(&key, &i->key) && i->guestid == lg->guestid) {
381 unsigned int j; 596 unsigned int j;
597 /* Look through the registered DMA array for an
598 * available buffer. */
382 for (j = 0; j < i->num_dmas; j++) { 599 for (j = 0; j < i->num_dmas; j++) {
383 struct lguest_dma dma; 600 struct lguest_dma dma;
384 601
@@ -387,6 +604,8 @@ unsigned long get_dma_buffer(struct lguest *lg,
387 if (dma.used_len == 0) 604 if (dma.used_len == 0)
388 break; 605 break;
389 } 606 }
607 /* Store the interrupt the Guest wants when the buffer
608 * is used. */
390 *interrupt = i->interrupt; 609 *interrupt = i->interrupt;
391 break; 610 break;
392 } 611 }
@@ -396,4 +615,12 @@ unlock:
396 mutex_unlock(&lguest_lock); 615 mutex_unlock(&lguest_lock);
397 return ret; 616 return ret;
398} 617}
618/*:*/
399 619
620/*L:410 This really has completed the Launcher. Not only have we now finished
621 * the longest chapter in our journey, but this also means we are over halfway
622 * through!
623 *
624 * Enough prevaricating around the bush: it is time for us to dive into the
625 * core of the Host, in "make Host".
626 */