diff options
-rw-r--r-- | Documentation/lguest/lguest.c | 599 | ||||
-rw-r--r-- | drivers/lguest/core.c | 24 | ||||
-rw-r--r-- | drivers/lguest/io.c | 247 | ||||
-rw-r--r-- | drivers/lguest/lg.h | 25 | ||||
-rw-r--r-- | drivers/lguest/lguest_user.c | 159 |
5 files changed, 982 insertions, 72 deletions
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c index fc1bf70abfb1..d7e26f025959 100644 --- a/Documentation/lguest/lguest.c +++ b/Documentation/lguest/lguest.c | |||
@@ -34,12 +34,20 @@ | |||
34 | #include <termios.h> | 34 | #include <termios.h> |
35 | #include <getopt.h> | 35 | #include <getopt.h> |
36 | #include <zlib.h> | 36 | #include <zlib.h> |
37 | /*L:110 We can ignore the 28 include files we need for this program, but I do | ||
38 | * want to draw attention to the use of kernel-style types. | ||
39 | * | ||
40 | * As Linus said, "C is a Spartan language, and so should your naming be." I | ||
41 | * like these abbreviations and the header we need uses them, so we define them | ||
42 | * here. | ||
43 | */ | ||
37 | typedef unsigned long long u64; | 44 | typedef unsigned long long u64; |
38 | typedef uint32_t u32; | 45 | typedef uint32_t u32; |
39 | typedef uint16_t u16; | 46 | typedef uint16_t u16; |
40 | typedef uint8_t u8; | 47 | typedef uint8_t u8; |
41 | #include "../../include/linux/lguest_launcher.h" | 48 | #include "../../include/linux/lguest_launcher.h" |
42 | #include "../../include/asm-i386/e820.h" | 49 | #include "../../include/asm-i386/e820.h" |
50 | /*:*/ | ||
43 | 51 | ||
44 | #define PAGE_PRESENT 0x7 /* Present, RW, Execute */ | 52 | #define PAGE_PRESENT 0x7 /* Present, RW, Execute */ |
45 | #define NET_PEERNUM 1 | 53 | #define NET_PEERNUM 1 |
@@ -48,33 +56,52 @@ typedef uint8_t u8; | |||
48 | #define SIOCBRADDIF 0x89a2 /* add interface to bridge */ | 56 | #define SIOCBRADDIF 0x89a2 /* add interface to bridge */ |
49 | #endif | 57 | #endif |
50 | 58 | ||
59 | /*L:120 verbose is both a global flag and a macro. The C preprocessor allows | ||
60 | * this, and although I wouldn't recommend it, it works quite nicely here. */ | ||
51 | static bool verbose; | 61 | static bool verbose; |
52 | #define verbose(args...) \ | 62 | #define verbose(args...) \ |
53 | do { if (verbose) printf(args); } while(0) | 63 | do { if (verbose) printf(args); } while(0) |
64 | /*:*/ | ||
65 | |||
66 | /* The pipe to send commands to the waker process */ | ||
54 | static int waker_fd; | 67 | static int waker_fd; |
68 | /* The top of guest physical memory. */ | ||
55 | static u32 top; | 69 | static u32 top; |
56 | 70 | ||
71 | /* This is our list of devices. */ | ||
57 | struct device_list | 72 | struct device_list |
58 | { | 73 | { |
74 | /* Summary information about the devices in our list: ready to pass to | ||
75 | * select() to ask which need servicing.*/ | ||
59 | fd_set infds; | 76 | fd_set infds; |
60 | int max_infd; | 77 | int max_infd; |
61 | 78 | ||
79 | /* The descriptor page for the devices. */ | ||
62 | struct lguest_device_desc *descs; | 80 | struct lguest_device_desc *descs; |
81 | |||
82 | /* A single linked list of devices. */ | ||
63 | struct device *dev; | 83 | struct device *dev; |
84 | /* ... And an end pointer so we can easily append new devices */ | ||
64 | struct device **lastdev; | 85 | struct device **lastdev; |
65 | }; | 86 | }; |
66 | 87 | ||
88 | /* The device structure describes a single device. */ | ||
67 | struct device | 89 | struct device |
68 | { | 90 | { |
91 | /* The linked-list pointer. */ | ||
69 | struct device *next; | 92 | struct device *next; |
93 | /* The descriptor for this device, as mapped into the Guest. */ | ||
70 | struct lguest_device_desc *desc; | 94 | struct lguest_device_desc *desc; |
95 | /* The memory page(s) of this device, if any. Also mapped in Guest. */ | ||
71 | void *mem; | 96 | void *mem; |
72 | 97 | ||
73 | /* Watch this fd if handle_input non-NULL. */ | 98 | /* If handle_input is set, it wants to be called when this file |
99 | * descriptor is ready. */ | ||
74 | int fd; | 100 | int fd; |
75 | bool (*handle_input)(int fd, struct device *me); | 101 | bool (*handle_input)(int fd, struct device *me); |
76 | 102 | ||
77 | /* Watch DMA to this key if handle_input non-NULL. */ | 103 | /* If handle_output is set, it wants to be called when the Guest sends |
104 | * DMA to this key. */ | ||
78 | unsigned long watch_key; | 105 | unsigned long watch_key; |
79 | u32 (*handle_output)(int fd, const struct iovec *iov, | 106 | u32 (*handle_output)(int fd, const struct iovec *iov, |
80 | unsigned int num, struct device *me); | 107 | unsigned int num, struct device *me); |
@@ -83,6 +110,11 @@ struct device | |||
83 | void *priv; | 110 | void *priv; |
84 | }; | 111 | }; |
85 | 112 | ||
113 | /*L:130 | ||
114 | * Loading the Kernel. | ||
115 | * | ||
116 | * We start with couple of simple helper routines. open_or_die() avoids | ||
117 | * error-checking code cluttering the callers: */ | ||
86 | static int open_or_die(const char *name, int flags) | 118 | static int open_or_die(const char *name, int flags) |
87 | { | 119 | { |
88 | int fd = open(name, flags); | 120 | int fd = open(name, flags); |
@@ -91,26 +123,38 @@ static int open_or_die(const char *name, int flags) | |||
91 | return fd; | 123 | return fd; |
92 | } | 124 | } |
93 | 125 | ||
126 | /* map_zeroed_pages() takes a (page-aligned) address and a number of pages. */ | ||
94 | static void *map_zeroed_pages(unsigned long addr, unsigned int num) | 127 | static void *map_zeroed_pages(unsigned long addr, unsigned int num) |
95 | { | 128 | { |
129 | /* We cache the /dev/zero file-descriptor so we only open it once. */ | ||
96 | static int fd = -1; | 130 | static int fd = -1; |
97 | 131 | ||
98 | if (fd == -1) | 132 | if (fd == -1) |
99 | fd = open_or_die("/dev/zero", O_RDONLY); | 133 | fd = open_or_die("/dev/zero", O_RDONLY); |
100 | 134 | ||
135 | /* We use a private mapping (ie. if we write to the page, it will be | ||
136 | * copied), and obviously we insist that it be mapped where we ask. */ | ||
101 | if (mmap((void *)addr, getpagesize() * num, | 137 | if (mmap((void *)addr, getpagesize() * num, |
102 | PROT_READ|PROT_WRITE|PROT_EXEC, MAP_FIXED|MAP_PRIVATE, fd, 0) | 138 | PROT_READ|PROT_WRITE|PROT_EXEC, MAP_FIXED|MAP_PRIVATE, fd, 0) |
103 | != (void *)addr) | 139 | != (void *)addr) |
104 | err(1, "Mmaping %u pages of /dev/zero @%p", num, (void *)addr); | 140 | err(1, "Mmaping %u pages of /dev/zero @%p", num, (void *)addr); |
141 | |||
142 | /* Returning the address is just a courtesy: can simplify callers. */ | ||
105 | return (void *)addr; | 143 | return (void *)addr; |
106 | } | 144 | } |
107 | 145 | ||
108 | /* Find magic string marking entry point, return entry point. */ | 146 | /* To find out where to start we look for the magic Guest string, which marks |
147 | * the code we see in lguest_asm.S. This is a hack which we are currently | ||
148 | * plotting to replace with the normal Linux entry point. */ | ||
109 | static unsigned long entry_point(void *start, void *end, | 149 | static unsigned long entry_point(void *start, void *end, |
110 | unsigned long page_offset) | 150 | unsigned long page_offset) |
111 | { | 151 | { |
112 | void *p; | 152 | void *p; |
113 | 153 | ||
154 | /* The scan gives us the physical starting address. We want the | ||
155 | * virtual address in this case, and fortunately, we already figured | ||
156 | * out the physical-virtual difference and passed it here in | ||
157 | * "page_offset". */ | ||
114 | for (p = start; p < end; p++) | 158 | for (p = start; p < end; p++) |
115 | if (memcmp(p, "GenuineLguest", strlen("GenuineLguest")) == 0) | 159 | if (memcmp(p, "GenuineLguest", strlen("GenuineLguest")) == 0) |
116 | return (long)p + strlen("GenuineLguest") + page_offset; | 160 | return (long)p + strlen("GenuineLguest") + page_offset; |
@@ -118,7 +162,17 @@ static unsigned long entry_point(void *start, void *end, | |||
118 | err(1, "Is this image a genuine lguest?"); | 162 | err(1, "Is this image a genuine lguest?"); |
119 | } | 163 | } |
120 | 164 | ||
121 | /* Returns the entry point */ | 165 | /* This routine takes an open vmlinux image, which is in ELF, and maps it into |
166 | * the Guest memory. ELF = Embedded Linking Format, which is the format used | ||
167 | * by all modern binaries on Linux including the kernel. | ||
168 | * | ||
169 | * The ELF headers give *two* addresses: a physical address, and a virtual | ||
170 | * address. The Guest kernel expects to be placed in memory at the physical | ||
171 | * address, and the page tables set up so it will correspond to that virtual | ||
172 | * address. We return the difference between the virtual and physical | ||
173 | * addresses in the "page_offset" pointer. | ||
174 | * | ||
175 | * We return the starting address. */ | ||
122 | static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr, | 176 | static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr, |
123 | unsigned long *page_offset) | 177 | unsigned long *page_offset) |
124 | { | 178 | { |
@@ -127,40 +181,61 @@ static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr, | |||
127 | unsigned int i; | 181 | unsigned int i; |
128 | unsigned long start = -1UL, end = 0; | 182 | unsigned long start = -1UL, end = 0; |
129 | 183 | ||
130 | /* Sanity checks. */ | 184 | /* Sanity checks on the main ELF header: an x86 executable with a |
185 | * reasonable number of correctly-sized program headers. */ | ||
131 | if (ehdr->e_type != ET_EXEC | 186 | if (ehdr->e_type != ET_EXEC |
132 | || ehdr->e_machine != EM_386 | 187 | || ehdr->e_machine != EM_386 |
133 | || ehdr->e_phentsize != sizeof(Elf32_Phdr) | 188 | || ehdr->e_phentsize != sizeof(Elf32_Phdr) |
134 | || ehdr->e_phnum < 1 || ehdr->e_phnum > 65536U/sizeof(Elf32_Phdr)) | 189 | || ehdr->e_phnum < 1 || ehdr->e_phnum > 65536U/sizeof(Elf32_Phdr)) |
135 | errx(1, "Malformed elf header"); | 190 | errx(1, "Malformed elf header"); |
136 | 191 | ||
192 | /* An ELF executable contains an ELF header and a number of "program" | ||
193 | * headers which indicate which parts ("segments") of the program to | ||
194 | * load where. */ | ||
195 | |||
196 | /* We read in all the program headers at once: */ | ||
137 | if (lseek(elf_fd, ehdr->e_phoff, SEEK_SET) < 0) | 197 | if (lseek(elf_fd, ehdr->e_phoff, SEEK_SET) < 0) |
138 | err(1, "Seeking to program headers"); | 198 | err(1, "Seeking to program headers"); |
139 | if (read(elf_fd, phdr, sizeof(phdr)) != sizeof(phdr)) | 199 | if (read(elf_fd, phdr, sizeof(phdr)) != sizeof(phdr)) |
140 | err(1, "Reading program headers"); | 200 | err(1, "Reading program headers"); |
141 | 201 | ||
202 | /* We don't know page_offset yet. */ | ||
142 | *page_offset = 0; | 203 | *page_offset = 0; |
143 | /* We map the loadable segments at virtual addresses corresponding | 204 | |
144 | * to their physical addresses (our virtual == guest physical). */ | 205 | /* Try all the headers: there are usually only three. A read-only one, |
206 | * a read-write one, and a "note" section which isn't loadable. */ | ||
145 | for (i = 0; i < ehdr->e_phnum; i++) { | 207 | for (i = 0; i < ehdr->e_phnum; i++) { |
208 | /* If this isn't a loadable segment, we ignore it */ | ||
146 | if (phdr[i].p_type != PT_LOAD) | 209 | if (phdr[i].p_type != PT_LOAD) |
147 | continue; | 210 | continue; |
148 | 211 | ||
149 | verbose("Section %i: size %i addr %p\n", | 212 | verbose("Section %i: size %i addr %p\n", |
150 | i, phdr[i].p_memsz, (void *)phdr[i].p_paddr); | 213 | i, phdr[i].p_memsz, (void *)phdr[i].p_paddr); |
151 | 214 | ||
152 | /* We expect linear address space. */ | 215 | /* We expect a simple linear address space: every segment must |
216 | * have the same difference between virtual (p_vaddr) and | ||
217 | * physical (p_paddr) address. */ | ||
153 | if (!*page_offset) | 218 | if (!*page_offset) |
154 | *page_offset = phdr[i].p_vaddr - phdr[i].p_paddr; | 219 | *page_offset = phdr[i].p_vaddr - phdr[i].p_paddr; |
155 | else if (*page_offset != phdr[i].p_vaddr - phdr[i].p_paddr) | 220 | else if (*page_offset != phdr[i].p_vaddr - phdr[i].p_paddr) |
156 | errx(1, "Page offset of section %i different", i); | 221 | errx(1, "Page offset of section %i different", i); |
157 | 222 | ||
223 | /* We track the first and last address we mapped, so we can | ||
224 | * tell entry_point() where to scan. */ | ||
158 | if (phdr[i].p_paddr < start) | 225 | if (phdr[i].p_paddr < start) |
159 | start = phdr[i].p_paddr; | 226 | start = phdr[i].p_paddr; |
160 | if (phdr[i].p_paddr + phdr[i].p_filesz > end) | 227 | if (phdr[i].p_paddr + phdr[i].p_filesz > end) |
161 | end = phdr[i].p_paddr + phdr[i].p_filesz; | 228 | end = phdr[i].p_paddr + phdr[i].p_filesz; |
162 | 229 | ||
163 | /* We map everything private, writable. */ | 230 | /* We map this section of the file at its physical address. We |
231 | * map it read & write even if the header says this segment is | ||
232 | * read-only. The kernel really wants to be writable: it | ||
233 | * patches its own instructions which would normally be | ||
234 | * read-only. | ||
235 | * | ||
236 | * MAP_PRIVATE means that the page won't be copied until a | ||
237 | * write is done to it. This allows us to share much of the | ||
238 | * kernel memory between Guests. */ | ||
164 | addr = mmap((void *)phdr[i].p_paddr, | 239 | addr = mmap((void *)phdr[i].p_paddr, |
165 | phdr[i].p_filesz, | 240 | phdr[i].p_filesz, |
166 | PROT_READ|PROT_WRITE|PROT_EXEC, | 241 | PROT_READ|PROT_WRITE|PROT_EXEC, |
@@ -174,7 +249,31 @@ static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr, | |||
174 | return entry_point((void *)start, (void *)end, *page_offset); | 249 | return entry_point((void *)start, (void *)end, *page_offset); |
175 | } | 250 | } |
176 | 251 | ||
177 | /* This is amazingly reliable. */ | 252 | /*L:170 Prepare to be SHOCKED and AMAZED. And possibly a trifle nauseated. |
253 | * | ||
254 | * We know that CONFIG_PAGE_OFFSET sets what virtual address the kernel expects | ||
255 | * to be. We don't know what that option was, but we can figure it out | ||
256 | * approximately by looking at the addresses in the code. I chose the common | ||
257 | * case of reading a memory location into the %eax register: | ||
258 | * | ||
259 | * movl <some-address>, %eax | ||
260 | * | ||
261 | * This gets encoded as five bytes: "0xA1 <4-byte-address>". For example, | ||
262 | * "0xA1 0x18 0x60 0x47 0xC0" reads the address 0xC0476018 into %eax. | ||
263 | * | ||
264 | * In this example can guess that the kernel was compiled with | ||
265 | * CONFIG_PAGE_OFFSET set to 0xC0000000 (it's always a round number). If the | ||
266 | * kernel were larger than 16MB, we might see 0xC1 addresses show up, but our | ||
267 | * kernel isn't that bloated yet. | ||
268 | * | ||
269 | * Unfortunately, x86 has variable-length instructions, so finding this | ||
270 | * particular instruction properly involves writing a disassembler. Instead, | ||
271 | * we rely on statistics. We look for "0xA1" and tally the different bytes | ||
272 | * which occur 4 bytes later (the "0xC0" in our example above). When one of | ||
273 | * those bytes appears three times, we can be reasonably confident that it | ||
274 | * forms the start of CONFIG_PAGE_OFFSET. | ||
275 | * | ||
276 | * This is amazingly reliable. */ | ||
178 | static unsigned long intuit_page_offset(unsigned char *img, unsigned long len) | 277 | static unsigned long intuit_page_offset(unsigned char *img, unsigned long len) |
179 | { | 278 | { |
180 | unsigned int i, possibilities[256] = { 0 }; | 279 | unsigned int i, possibilities[256] = { 0 }; |
@@ -187,30 +286,52 @@ static unsigned long intuit_page_offset(unsigned char *img, unsigned long len) | |||
187 | errx(1, "could not determine page offset"); | 286 | errx(1, "could not determine page offset"); |
188 | } | 287 | } |
189 | 288 | ||
289 | /*L:160 Unfortunately the entire ELF image isn't compressed: the segments | ||
290 | * which need loading are extracted and compressed raw. This denies us the | ||
291 | * information we need to make a fully-general loader. */ | ||
190 | static unsigned long unpack_bzimage(int fd, unsigned long *page_offset) | 292 | static unsigned long unpack_bzimage(int fd, unsigned long *page_offset) |
191 | { | 293 | { |
192 | gzFile f; | 294 | gzFile f; |
193 | int ret, len = 0; | 295 | int ret, len = 0; |
296 | /* A bzImage always gets loaded at physical address 1M. This is | ||
297 | * actually configurable as CONFIG_PHYSICAL_START, but as the comment | ||
298 | * there says, "Don't change this unless you know what you are doing". | ||
299 | * Indeed. */ | ||
194 | void *img = (void *)0x100000; | 300 | void *img = (void *)0x100000; |
195 | 301 | ||
302 | /* gzdopen takes our file descriptor (carefully placed at the start of | ||
303 | * the GZIP header we found) and returns a gzFile. */ | ||
196 | f = gzdopen(fd, "rb"); | 304 | f = gzdopen(fd, "rb"); |
305 | /* We read it into memory in 64k chunks until we hit the end. */ | ||
197 | while ((ret = gzread(f, img + len, 65536)) > 0) | 306 | while ((ret = gzread(f, img + len, 65536)) > 0) |
198 | len += ret; | 307 | len += ret; |
199 | if (ret < 0) | 308 | if (ret < 0) |
200 | err(1, "reading image from bzImage"); | 309 | err(1, "reading image from bzImage"); |
201 | 310 | ||
202 | verbose("Unpacked size %i addr %p\n", len, img); | 311 | verbose("Unpacked size %i addr %p\n", len, img); |
312 | |||
313 | /* Without the ELF header, we can't tell virtual-physical gap. This is | ||
314 | * CONFIG_PAGE_OFFSET, and people do actually change it. Fortunately, | ||
315 | * I have a clever way of figuring it out from the code itself. */ | ||
203 | *page_offset = intuit_page_offset(img, len); | 316 | *page_offset = intuit_page_offset(img, len); |
204 | 317 | ||
205 | return entry_point(img, img + len, *page_offset); | 318 | return entry_point(img, img + len, *page_offset); |
206 | } | 319 | } |
207 | 320 | ||
321 | /*L:150 A bzImage, unlike an ELF file, is not meant to be loaded. You're | ||
322 | * supposed to jump into it and it will unpack itself. We can't do that | ||
323 | * because the Guest can't run the unpacking code, and adding features to | ||
324 | * lguest kills puppies, so we don't want to. | ||
325 | * | ||
326 | * The bzImage is formed by putting the decompressing code in front of the | ||
327 | * compressed kernel code. So we can simple scan through it looking for the | ||
328 | * first "gzip" header, and start decompressing from there. */ | ||
208 | static unsigned long load_bzimage(int fd, unsigned long *page_offset) | 329 | static unsigned long load_bzimage(int fd, unsigned long *page_offset) |
209 | { | 330 | { |
210 | unsigned char c; | 331 | unsigned char c; |
211 | int state = 0; | 332 | int state = 0; |
212 | 333 | ||
213 | /* Ugly brute force search for gzip header. */ | 334 | /* GZIP header is 0x1F 0x8B <method> <flags>... <compressed-by>. */ |
214 | while (read(fd, &c, 1) == 1) { | 335 | while (read(fd, &c, 1) == 1) { |
215 | switch (state) { | 336 | switch (state) { |
216 | case 0: | 337 | case 0: |
@@ -227,8 +348,10 @@ static unsigned long load_bzimage(int fd, unsigned long *page_offset) | |||
227 | state++; | 348 | state++; |
228 | break; | 349 | break; |
229 | case 9: | 350 | case 9: |
351 | /* Seek back to the start of the gzip header. */ | ||
230 | lseek(fd, -10, SEEK_CUR); | 352 | lseek(fd, -10, SEEK_CUR); |
231 | if (c != 0x03) /* Compressed under UNIX. */ | 353 | /* One final check: "compressed under UNIX". */ |
354 | if (c != 0x03) | ||
232 | state = -1; | 355 | state = -1; |
233 | else | 356 | else |
234 | return unpack_bzimage(fd, page_offset); | 357 | return unpack_bzimage(fd, page_offset); |
@@ -237,25 +360,43 @@ static unsigned long load_bzimage(int fd, unsigned long *page_offset) | |||
237 | errx(1, "Could not find kernel in bzImage"); | 360 | errx(1, "Could not find kernel in bzImage"); |
238 | } | 361 | } |
239 | 362 | ||
363 | /*L:140 Loading the kernel is easy when it's a "vmlinux", but most kernels | ||
364 | * come wrapped up in the self-decompressing "bzImage" format. With some funky | ||
365 | * coding, we can load those, too. */ | ||
240 | static unsigned long load_kernel(int fd, unsigned long *page_offset) | 366 | static unsigned long load_kernel(int fd, unsigned long *page_offset) |
241 | { | 367 | { |
242 | Elf32_Ehdr hdr; | 368 | Elf32_Ehdr hdr; |
243 | 369 | ||
370 | /* Read in the first few bytes. */ | ||
244 | if (read(fd, &hdr, sizeof(hdr)) != sizeof(hdr)) | 371 | if (read(fd, &hdr, sizeof(hdr)) != sizeof(hdr)) |
245 | err(1, "Reading kernel"); | 372 | err(1, "Reading kernel"); |
246 | 373 | ||
374 | /* If it's an ELF file, it starts with "\177ELF" */ | ||
247 | if (memcmp(hdr.e_ident, ELFMAG, SELFMAG) == 0) | 375 | if (memcmp(hdr.e_ident, ELFMAG, SELFMAG) == 0) |
248 | return map_elf(fd, &hdr, page_offset); | 376 | return map_elf(fd, &hdr, page_offset); |
249 | 377 | ||
378 | /* Otherwise we assume it's a bzImage, and try to unpack it */ | ||
250 | return load_bzimage(fd, page_offset); | 379 | return load_bzimage(fd, page_offset); |
251 | } | 380 | } |
252 | 381 | ||
382 | /* This is a trivial little helper to align pages. Andi Kleen hated it because | ||
383 | * it calls getpagesize() twice: "it's dumb code." | ||
384 | * | ||
385 | * Kernel guys get really het up about optimization, even when it's not | ||
386 | * necessary. I leave this code as a reaction against that. */ | ||
253 | static inline unsigned long page_align(unsigned long addr) | 387 | static inline unsigned long page_align(unsigned long addr) |
254 | { | 388 | { |
389 | /* Add upwards and truncate downwards. */ | ||
255 | return ((addr + getpagesize()-1) & ~(getpagesize()-1)); | 390 | return ((addr + getpagesize()-1) & ~(getpagesize()-1)); |
256 | } | 391 | } |
257 | 392 | ||
258 | /* initrd gets loaded at top of memory: return length. */ | 393 | /*L:180 An "initial ram disk" is a disk image loaded into memory along with |
394 | * the kernel which the kernel can use to boot from without needing any | ||
395 | * drivers. Most distributions now use this as standard: the initrd contains | ||
396 | * the code to load the appropriate driver modules for the current machine. | ||
397 | * | ||
398 | * Importantly, James Morris works for RedHat, and Fedora uses initrds for its | ||
399 | * kernels. He sent me this (and tells me when I break it). */ | ||
259 | static unsigned long load_initrd(const char *name, unsigned long mem) | 400 | static unsigned long load_initrd(const char *name, unsigned long mem) |
260 | { | 401 | { |
261 | int ifd; | 402 | int ifd; |
@@ -264,21 +405,35 @@ static unsigned long load_initrd(const char *name, unsigned long mem) | |||
264 | void *iaddr; | 405 | void *iaddr; |
265 | 406 | ||
266 | ifd = open_or_die(name, O_RDONLY); | 407 | ifd = open_or_die(name, O_RDONLY); |
408 | /* fstat() is needed to get the file size. */ | ||
267 | if (fstat(ifd, &st) < 0) | 409 | if (fstat(ifd, &st) < 0) |
268 | err(1, "fstat() on initrd '%s'", name); | 410 | err(1, "fstat() on initrd '%s'", name); |
269 | 411 | ||
412 | /* The length needs to be rounded up to a page size: mmap needs the | ||
413 | * address to be page aligned. */ | ||
270 | len = page_align(st.st_size); | 414 | len = page_align(st.st_size); |
415 | /* We map the initrd at the top of memory. */ | ||
271 | iaddr = mmap((void *)mem - len, st.st_size, | 416 | iaddr = mmap((void *)mem - len, st.st_size, |
272 | PROT_READ|PROT_EXEC|PROT_WRITE, | 417 | PROT_READ|PROT_EXEC|PROT_WRITE, |
273 | MAP_FIXED|MAP_PRIVATE, ifd, 0); | 418 | MAP_FIXED|MAP_PRIVATE, ifd, 0); |
274 | if (iaddr != (void *)mem - len) | 419 | if (iaddr != (void *)mem - len) |
275 | err(1, "Mmaping initrd '%s' returned %p not %p", | 420 | err(1, "Mmaping initrd '%s' returned %p not %p", |
276 | name, iaddr, (void *)mem - len); | 421 | name, iaddr, (void *)mem - len); |
422 | /* Once a file is mapped, you can close the file descriptor. It's a | ||
423 | * little odd, but quite useful. */ | ||
277 | close(ifd); | 424 | close(ifd); |
278 | verbose("mapped initrd %s size=%lu @ %p\n", name, st.st_size, iaddr); | 425 | verbose("mapped initrd %s size=%lu @ %p\n", name, st.st_size, iaddr); |
426 | |||
427 | /* We return the initrd size. */ | ||
279 | return len; | 428 | return len; |
280 | } | 429 | } |
281 | 430 | ||
431 | /* Once we know how much memory we have, and the address the Guest kernel | ||
432 | * expects, we can construct simple linear page tables which will get the Guest | ||
433 | * far enough into the boot to create its own. | ||
434 | * | ||
435 | * We lay them out of the way, just below the initrd (which is why we need to | ||
436 | * know its size). */ | ||
282 | static unsigned long setup_pagetables(unsigned long mem, | 437 | static unsigned long setup_pagetables(unsigned long mem, |
283 | unsigned long initrd_size, | 438 | unsigned long initrd_size, |
284 | unsigned long page_offset) | 439 | unsigned long page_offset) |
@@ -287,23 +442,32 @@ static unsigned long setup_pagetables(unsigned long mem, | |||
287 | unsigned int mapped_pages, i, linear_pages; | 442 | unsigned int mapped_pages, i, linear_pages; |
288 | unsigned int ptes_per_page = getpagesize()/sizeof(u32); | 443 | unsigned int ptes_per_page = getpagesize()/sizeof(u32); |
289 | 444 | ||
290 | /* If we can map all of memory above page_offset, we do so. */ | 445 | /* Ideally we map all physical memory starting at page_offset. |
446 | * However, if page_offset is 0xC0000000 we can only map 1G of physical | ||
447 | * (0xC0000000 + 1G overflows). */ | ||
291 | if (mem <= -page_offset) | 448 | if (mem <= -page_offset) |
292 | mapped_pages = mem/getpagesize(); | 449 | mapped_pages = mem/getpagesize(); |
293 | else | 450 | else |
294 | mapped_pages = -page_offset/getpagesize(); | 451 | mapped_pages = -page_offset/getpagesize(); |
295 | 452 | ||
296 | /* Each linear PTE page can map ptes_per_page pages. */ | 453 | /* Each PTE page can map ptes_per_page pages: how many do we need? */ |
297 | linear_pages = (mapped_pages + ptes_per_page-1)/ptes_per_page; | 454 | linear_pages = (mapped_pages + ptes_per_page-1)/ptes_per_page; |
298 | 455 | ||
299 | /* We lay out top-level then linear mapping immediately below initrd */ | 456 | /* We put the toplevel page directory page at the top of memory. */ |
300 | pgdir = (void *)mem - initrd_size - getpagesize(); | 457 | pgdir = (void *)mem - initrd_size - getpagesize(); |
458 | |||
459 | /* Now we use the next linear_pages pages as pte pages */ | ||
301 | linear = (void *)pgdir - linear_pages*getpagesize(); | 460 | linear = (void *)pgdir - linear_pages*getpagesize(); |
302 | 461 | ||
462 | /* Linear mapping is easy: put every page's address into the mapping in | ||
463 | * order. PAGE_PRESENT contains the flags Present, Writable and | ||
464 | * Executable. */ | ||
303 | for (i = 0; i < mapped_pages; i++) | 465 | for (i = 0; i < mapped_pages; i++) |
304 | linear[i] = ((i * getpagesize()) | PAGE_PRESENT); | 466 | linear[i] = ((i * getpagesize()) | PAGE_PRESENT); |
305 | 467 | ||
306 | /* Now set up pgd so that this memory is at page_offset */ | 468 | /* The top level points to the linear page table pages above. The |
469 | * entry representing page_offset points to the first one, and they | ||
470 | * continue from there. */ | ||
307 | for (i = 0; i < mapped_pages; i += ptes_per_page) { | 471 | for (i = 0; i < mapped_pages; i += ptes_per_page) { |
308 | pgdir[(i + page_offset/getpagesize())/ptes_per_page] | 472 | pgdir[(i + page_offset/getpagesize())/ptes_per_page] |
309 | = (((u32)linear + i*sizeof(u32)) | PAGE_PRESENT); | 473 | = (((u32)linear + i*sizeof(u32)) | PAGE_PRESENT); |
@@ -312,9 +476,13 @@ static unsigned long setup_pagetables(unsigned long mem, | |||
312 | verbose("Linear mapping of %u pages in %u pte pages at %p\n", | 476 | verbose("Linear mapping of %u pages in %u pte pages at %p\n", |
313 | mapped_pages, linear_pages, linear); | 477 | mapped_pages, linear_pages, linear); |
314 | 478 | ||
479 | /* We return the top level (guest-physical) address: the kernel needs | ||
480 | * to know where it is. */ | ||
315 | return (unsigned long)pgdir; | 481 | return (unsigned long)pgdir; |
316 | } | 482 | } |
317 | 483 | ||
484 | /* Simple routine to roll all the commandline arguments together with spaces | ||
485 | * between them. */ | ||
318 | static void concat(char *dst, char *args[]) | 486 | static void concat(char *dst, char *args[]) |
319 | { | 487 | { |
320 | unsigned int i, len = 0; | 488 | unsigned int i, len = 0; |
@@ -328,6 +496,10 @@ static void concat(char *dst, char *args[]) | |||
328 | dst[len] = '\0'; | 496 | dst[len] = '\0'; |
329 | } | 497 | } |
330 | 498 | ||
499 | /* This is where we actually tell the kernel to initialize the Guest. We saw | ||
500 | * the arguments it expects when we looked at initialize() in lguest_user.c: | ||
501 | * the top physical page to allow, the top level pagetable, the entry point and | ||
502 | * the page_offset constant for the Guest. */ | ||
331 | static int tell_kernel(u32 pgdir, u32 start, u32 page_offset) | 503 | static int tell_kernel(u32 pgdir, u32 start, u32 page_offset) |
332 | { | 504 | { |
333 | u32 args[] = { LHREQ_INITIALIZE, | 505 | u32 args[] = { LHREQ_INITIALIZE, |
@@ -337,8 +509,11 @@ static int tell_kernel(u32 pgdir, u32 start, u32 page_offset) | |||
337 | fd = open_or_die("/dev/lguest", O_RDWR); | 509 | fd = open_or_die("/dev/lguest", O_RDWR); |
338 | if (write(fd, args, sizeof(args)) < 0) | 510 | if (write(fd, args, sizeof(args)) < 0) |
339 | err(1, "Writing to /dev/lguest"); | 511 | err(1, "Writing to /dev/lguest"); |
512 | |||
513 | /* We return the /dev/lguest file descriptor to control this Guest */ | ||
340 | return fd; | 514 | return fd; |
341 | } | 515 | } |
516 | /*:*/ | ||
342 | 517 | ||
343 | static void set_fd(int fd, struct device_list *devices) | 518 | static void set_fd(int fd, struct device_list *devices) |
344 | { | 519 | { |
@@ -347,61 +522,108 @@ static void set_fd(int fd, struct device_list *devices) | |||
347 | devices->max_infd = fd; | 522 | devices->max_infd = fd; |
348 | } | 523 | } |
349 | 524 | ||
350 | /* When input arrives, we tell the kernel to kick lguest out with -EAGAIN. */ | 525 | /*L:200 |
526 | * The Waker. | ||
527 | * | ||
528 | * With a console and network devices, we can have lots of input which we need | ||
529 | * to process. We could try to tell the kernel what file descriptors to watch, | ||
530 | * but handing a file descriptor mask through to the kernel is fairly icky. | ||
531 | * | ||
532 | * Instead, we fork off a process which watches the file descriptors and writes | ||
533 | * the LHREQ_BREAK command to the /dev/lguest filedescriptor to tell the Host | ||
534 | * loop to stop running the Guest. This causes it to return from the | ||
535 | * /dev/lguest read with -EAGAIN, where it will write to /dev/lguest to reset | ||
536 | * the LHREQ_BREAK and wake us up again. | ||
537 | * | ||
538 | * This, of course, is merely a different *kind* of icky. | ||
539 | */ | ||
351 | static void wake_parent(int pipefd, int lguest_fd, struct device_list *devices) | 540 | static void wake_parent(int pipefd, int lguest_fd, struct device_list *devices) |
352 | { | 541 | { |
542 | /* Add the pipe from the Launcher to the fdset in the device_list, so | ||
543 | * we watch it, too. */ | ||
353 | set_fd(pipefd, devices); | 544 | set_fd(pipefd, devices); |
354 | 545 | ||
355 | for (;;) { | 546 | for (;;) { |
356 | fd_set rfds = devices->infds; | 547 | fd_set rfds = devices->infds; |
357 | u32 args[] = { LHREQ_BREAK, 1 }; | 548 | u32 args[] = { LHREQ_BREAK, 1 }; |
358 | 549 | ||
550 | /* Wait until input is ready from one of the devices. */ | ||
359 | select(devices->max_infd+1, &rfds, NULL, NULL, NULL); | 551 | select(devices->max_infd+1, &rfds, NULL, NULL, NULL); |
552 | /* Is it a message from the Launcher? */ | ||
360 | if (FD_ISSET(pipefd, &rfds)) { | 553 | if (FD_ISSET(pipefd, &rfds)) { |
361 | int ignorefd; | 554 | int ignorefd; |
555 | /* If read() returns 0, it means the Launcher has | ||
556 | * exited. We silently follow. */ | ||
362 | if (read(pipefd, &ignorefd, sizeof(ignorefd)) == 0) | 557 | if (read(pipefd, &ignorefd, sizeof(ignorefd)) == 0) |
363 | exit(0); | 558 | exit(0); |
559 | /* Otherwise it's telling us there's a problem with one | ||
560 | * of the devices, and we should ignore that file | ||
561 | * descriptor from now on. */ | ||
364 | FD_CLR(ignorefd, &devices->infds); | 562 | FD_CLR(ignorefd, &devices->infds); |
365 | } else | 563 | } else /* Send LHREQ_BREAK command. */ |
366 | write(lguest_fd, args, sizeof(args)); | 564 | write(lguest_fd, args, sizeof(args)); |
367 | } | 565 | } |
368 | } | 566 | } |
369 | 567 | ||
568 | /* This routine just sets up a pipe to the Waker process. */ | ||
370 | static int setup_waker(int lguest_fd, struct device_list *device_list) | 569 | static int setup_waker(int lguest_fd, struct device_list *device_list) |
371 | { | 570 | { |
372 | int pipefd[2], child; | 571 | int pipefd[2], child; |
373 | 572 | ||
573 | /* We create a pipe to talk to the waker, and also so it knows when the | ||
574 | * Launcher dies (and closes pipe). */ | ||
374 | pipe(pipefd); | 575 | pipe(pipefd); |
375 | child = fork(); | 576 | child = fork(); |
376 | if (child == -1) | 577 | if (child == -1) |
377 | err(1, "forking"); | 578 | err(1, "forking"); |
378 | 579 | ||
379 | if (child == 0) { | 580 | if (child == 0) { |
581 | /* Close the "writing" end of our copy of the pipe */ | ||
380 | close(pipefd[1]); | 582 | close(pipefd[1]); |
381 | wake_parent(pipefd[0], lguest_fd, device_list); | 583 | wake_parent(pipefd[0], lguest_fd, device_list); |
382 | } | 584 | } |
585 | /* Close the reading end of our copy of the pipe. */ | ||
383 | close(pipefd[0]); | 586 | close(pipefd[0]); |
384 | 587 | ||
588 | /* Here is the fd used to talk to the waker. */ | ||
385 | return pipefd[1]; | 589 | return pipefd[1]; |
386 | } | 590 | } |
387 | 591 | ||
592 | /*L:210 | ||
593 | * Device Handling. | ||
594 | * | ||
595 | * When the Guest sends DMA to us, it sends us an array of addresses and sizes. | ||
596 | * We need to make sure it's not trying to reach into the Launcher itself, so | ||
597 | * we have a convenient routine which check it and exits with an error message | ||
598 | * if something funny is going on: | ||
599 | */ | ||
388 | static void *_check_pointer(unsigned long addr, unsigned int size, | 600 | static void *_check_pointer(unsigned long addr, unsigned int size, |
389 | unsigned int line) | 601 | unsigned int line) |
390 | { | 602 | { |
603 | /* We have to separately check addr and addr+size, because size could | ||
604 | * be huge and addr + size might wrap around. */ | ||
391 | if (addr >= top || addr + size >= top) | 605 | if (addr >= top || addr + size >= top) |
392 | errx(1, "%s:%i: Invalid address %li", __FILE__, line, addr); | 606 | errx(1, "%s:%i: Invalid address %li", __FILE__, line, addr); |
607 | /* We return a pointer for the caller's convenience, now we know it's | ||
608 | * safe to use. */ | ||
393 | return (void *)addr; | 609 | return (void *)addr; |
394 | } | 610 | } |
611 | /* A macro which transparently hands the line number to the real function. */ | ||
395 | #define check_pointer(addr,size) _check_pointer(addr, size, __LINE__) | 612 | #define check_pointer(addr,size) _check_pointer(addr, size, __LINE__) |
396 | 613 | ||
397 | /* Returns pointer to dma->used_len */ | 614 | /* The Guest has given us the address of a "struct lguest_dma". We check it's |
615 | * OK and convert it to an iovec (which is a simple array of ptr/size | ||
616 | * pairs). */ | ||
398 | static u32 *dma2iov(unsigned long dma, struct iovec iov[], unsigned *num) | 617 | static u32 *dma2iov(unsigned long dma, struct iovec iov[], unsigned *num) |
399 | { | 618 | { |
400 | unsigned int i; | 619 | unsigned int i; |
401 | struct lguest_dma *udma; | 620 | struct lguest_dma *udma; |
402 | 621 | ||
622 | /* First we make sure that the array memory itself is valid. */ | ||
403 | udma = check_pointer(dma, sizeof(*udma)); | 623 | udma = check_pointer(dma, sizeof(*udma)); |
624 | /* Now we check each element */ | ||
404 | for (i = 0; i < LGUEST_MAX_DMA_SECTIONS; i++) { | 625 | for (i = 0; i < LGUEST_MAX_DMA_SECTIONS; i++) { |
626 | /* A zero length ends the array. */ | ||
405 | if (!udma->len[i]) | 627 | if (!udma->len[i]) |
406 | break; | 628 | break; |
407 | 629 | ||
@@ -409,9 +631,15 @@ static u32 *dma2iov(unsigned long dma, struct iovec iov[], unsigned *num) | |||
409 | iov[i].iov_len = udma->len[i]; | 631 | iov[i].iov_len = udma->len[i]; |
410 | } | 632 | } |
411 | *num = i; | 633 | *num = i; |
634 | |||
635 | /* We return the pointer to where the caller should write the amount of | ||
636 | * the buffer used. */ | ||
412 | return &udma->used_len; | 637 | return &udma->used_len; |
413 | } | 638 | } |
414 | 639 | ||
640 | /* This routine gets a DMA buffer from the Guest for a given key, and converts | ||
641 | * it to an iovec array. It returns the interrupt the Guest wants when we're | ||
642 | * finished, and a pointer to the "used_len" field to fill in. */ | ||
415 | static u32 *get_dma_buffer(int fd, void *key, | 643 | static u32 *get_dma_buffer(int fd, void *key, |
416 | struct iovec iov[], unsigned int *num, u32 *irq) | 644 | struct iovec iov[], unsigned int *num, u32 *irq) |
417 | { | 645 | { |
@@ -419,16 +647,21 @@ static u32 *get_dma_buffer(int fd, void *key, | |||
419 | unsigned long udma; | 647 | unsigned long udma; |
420 | u32 *res; | 648 | u32 *res; |
421 | 649 | ||
650 | /* Ask the kernel for a DMA buffer corresponding to this key. */ | ||
422 | udma = write(fd, buf, sizeof(buf)); | 651 | udma = write(fd, buf, sizeof(buf)); |
652 | /* They haven't registered any, or they're all used? */ | ||
423 | if (udma == (unsigned long)-1) | 653 | if (udma == (unsigned long)-1) |
424 | return NULL; | 654 | return NULL; |
425 | 655 | ||
426 | /* Kernel stashes irq in ->used_len. */ | 656 | /* Convert it into our iovec array */ |
427 | res = dma2iov(udma, iov, num); | 657 | res = dma2iov(udma, iov, num); |
658 | /* The kernel stashes irq in ->used_len to get it out to us. */ | ||
428 | *irq = *res; | 659 | *irq = *res; |
660 | /* Return a pointer to ((struct lguest_dma *)udma)->used_len. */ | ||
429 | return res; | 661 | return res; |
430 | } | 662 | } |
431 | 663 | ||
664 | /* This is a convenient routine to send the Guest an interrupt. */ | ||
432 | static void trigger_irq(int fd, u32 irq) | 665 | static void trigger_irq(int fd, u32 irq) |
433 | { | 666 | { |
434 | u32 buf[] = { LHREQ_IRQ, irq }; | 667 | u32 buf[] = { LHREQ_IRQ, irq }; |
@@ -436,6 +669,10 @@ static void trigger_irq(int fd, u32 irq) | |||
436 | err(1, "Triggering irq %i", irq); | 669 | err(1, "Triggering irq %i", irq); |
437 | } | 670 | } |
438 | 671 | ||
672 | /* This simply sets up an iovec array where we can put data to be discarded. | ||
673 | * This happens when the Guest doesn't want or can't handle the input: we have | ||
674 | * to get rid of it somewhere, and if we bury it in the ceiling space it will | ||
675 | * start to smell after a week. */ | ||
439 | static void discard_iovec(struct iovec *iov, unsigned int *num) | 676 | static void discard_iovec(struct iovec *iov, unsigned int *num) |
440 | { | 677 | { |
441 | static char discard_buf[1024]; | 678 | static char discard_buf[1024]; |
@@ -444,19 +681,24 @@ static void discard_iovec(struct iovec *iov, unsigned int *num) | |||
444 | iov->iov_len = sizeof(discard_buf); | 681 | iov->iov_len = sizeof(discard_buf); |
445 | } | 682 | } |
446 | 683 | ||
684 | /* Here is the input terminal setting we save, and the routine to restore them | ||
685 | * on exit so the user can see what they type next. */ | ||
447 | static struct termios orig_term; | 686 | static struct termios orig_term; |
448 | static void restore_term(void) | 687 | static void restore_term(void) |
449 | { | 688 | { |
450 | tcsetattr(STDIN_FILENO, TCSANOW, &orig_term); | 689 | tcsetattr(STDIN_FILENO, TCSANOW, &orig_term); |
451 | } | 690 | } |
452 | 691 | ||
692 | /* We associate some data with the console for our exit hack. */ | ||
453 | struct console_abort | 693 | struct console_abort |
454 | { | 694 | { |
695 | /* How many times have they hit ^C? */ | ||
455 | int count; | 696 | int count; |
697 | /* When did they start? */ | ||
456 | struct timeval start; | 698 | struct timeval start; |
457 | }; | 699 | }; |
458 | 700 | ||
459 | /* We DMA input to buffer bound at start of console page. */ | 701 | /* This is the routine which handles console input (ie. stdin). */ |
460 | static bool handle_console_input(int fd, struct device *dev) | 702 | static bool handle_console_input(int fd, struct device *dev) |
461 | { | 703 | { |
462 | u32 irq = 0, *lenp; | 704 | u32 irq = 0, *lenp; |
@@ -465,24 +707,38 @@ static bool handle_console_input(int fd, struct device *dev) | |||
465 | struct iovec iov[LGUEST_MAX_DMA_SECTIONS]; | 707 | struct iovec iov[LGUEST_MAX_DMA_SECTIONS]; |
466 | struct console_abort *abort = dev->priv; | 708 | struct console_abort *abort = dev->priv; |
467 | 709 | ||
710 | /* First we get the console buffer from the Guest. The key is dev->mem | ||
711 | * which was set to 0 in setup_console(). */ | ||
468 | lenp = get_dma_buffer(fd, dev->mem, iov, &num, &irq); | 712 | lenp = get_dma_buffer(fd, dev->mem, iov, &num, &irq); |
469 | if (!lenp) { | 713 | if (!lenp) { |
714 | /* If it's not ready for input, warn and set up to discard. */ | ||
470 | warn("console: no dma buffer!"); | 715 | warn("console: no dma buffer!"); |
471 | discard_iovec(iov, &num); | 716 | discard_iovec(iov, &num); |
472 | } | 717 | } |
473 | 718 | ||
719 | /* This is why we convert to iovecs: the readv() call uses them, and so | ||
720 | * it reads straight into the Guest's buffer. */ | ||
474 | len = readv(dev->fd, iov, num); | 721 | len = readv(dev->fd, iov, num); |
475 | if (len <= 0) { | 722 | if (len <= 0) { |
723 | /* This implies that the console is closed, is /dev/null, or | ||
724 | * something went terribly wrong. We still go through the rest | ||
725 | * of the logic, though, especially the exit handling below. */ | ||
476 | warnx("Failed to get console input, ignoring console."); | 726 | warnx("Failed to get console input, ignoring console."); |
477 | len = 0; | 727 | len = 0; |
478 | } | 728 | } |
479 | 729 | ||
730 | /* If we read the data into the Guest, fill in the length and send the | ||
731 | * interrupt. */ | ||
480 | if (lenp) { | 732 | if (lenp) { |
481 | *lenp = len; | 733 | *lenp = len; |
482 | trigger_irq(fd, irq); | 734 | trigger_irq(fd, irq); |
483 | } | 735 | } |
484 | 736 | ||
485 | /* Three ^C within one second? Exit. */ | 737 | /* Three ^C within one second? Exit. |
738 | * | ||
739 | * This is such a hack, but works surprisingly well. Each ^C has to be | ||
740 | * in a buffer by itself, so they can't be too fast. But we check that | ||
741 | * we get three within about a second, so they can't be too slow. */ | ||
486 | if (len == 1 && ((char *)iov[0].iov_base)[0] == 3) { | 742 | if (len == 1 && ((char *)iov[0].iov_base)[0] == 3) { |
487 | if (!abort->count++) | 743 | if (!abort->count++) |
488 | gettimeofday(&abort->start, NULL); | 744 | gettimeofday(&abort->start, NULL); |
@@ -490,43 +746,60 @@ static bool handle_console_input(int fd, struct device *dev) | |||
490 | struct timeval now; | 746 | struct timeval now; |
491 | gettimeofday(&now, NULL); | 747 | gettimeofday(&now, NULL); |
492 | if (now.tv_sec <= abort->start.tv_sec+1) { | 748 | if (now.tv_sec <= abort->start.tv_sec+1) { |
493 | /* Make sure waker is not blocked in BREAK */ | ||
494 | u32 args[] = { LHREQ_BREAK, 0 }; | 749 | u32 args[] = { LHREQ_BREAK, 0 }; |
750 | /* Close the fd so Waker will know it has to | ||
751 | * exit. */ | ||
495 | close(waker_fd); | 752 | close(waker_fd); |
753 | /* Just in case waker is blocked in BREAK, send | ||
754 | * unbreak now. */ | ||
496 | write(fd, args, sizeof(args)); | 755 | write(fd, args, sizeof(args)); |
497 | exit(2); | 756 | exit(2); |
498 | } | 757 | } |
499 | abort->count = 0; | 758 | abort->count = 0; |
500 | } | 759 | } |
501 | } else | 760 | } else |
761 | /* Any other key resets the abort counter. */ | ||
502 | abort->count = 0; | 762 | abort->count = 0; |
503 | 763 | ||
764 | /* Now, if we didn't read anything, put the input terminal back and | ||
765 | * return failure (meaning, don't call us again). */ | ||
504 | if (!len) { | 766 | if (!len) { |
505 | restore_term(); | 767 | restore_term(); |
506 | return false; | 768 | return false; |
507 | } | 769 | } |
770 | /* Everything went OK! */ | ||
508 | return true; | 771 | return true; |
509 | } | 772 | } |
510 | 773 | ||
774 | /* Handling console output is much simpler than input. */ | ||
511 | static u32 handle_console_output(int fd, const struct iovec *iov, | 775 | static u32 handle_console_output(int fd, const struct iovec *iov, |
512 | unsigned num, struct device*dev) | 776 | unsigned num, struct device*dev) |
513 | { | 777 | { |
778 | /* Whatever the Guest sends, write it to standard output. Return the | ||
779 | * number of bytes written. */ | ||
514 | return writev(STDOUT_FILENO, iov, num); | 780 | return writev(STDOUT_FILENO, iov, num); |
515 | } | 781 | } |
516 | 782 | ||
783 | /* Guest->Host network output is also pretty easy. */ | ||
517 | static u32 handle_tun_output(int fd, const struct iovec *iov, | 784 | static u32 handle_tun_output(int fd, const struct iovec *iov, |
518 | unsigned num, struct device *dev) | 785 | unsigned num, struct device *dev) |
519 | { | 786 | { |
520 | /* Now we've seen output, we should warn if we can't get buffers. */ | 787 | /* We put a flag in the "priv" pointer of the network device, and set |
788 | * it as soon as we see output. We'll see why in handle_tun_input() */ | ||
521 | *(bool *)dev->priv = true; | 789 | *(bool *)dev->priv = true; |
790 | /* Whatever packet the Guest sent us, write it out to the tun | ||
791 | * device. */ | ||
522 | return writev(dev->fd, iov, num); | 792 | return writev(dev->fd, iov, num); |
523 | } | 793 | } |
524 | 794 | ||
795 | /* This matches the peer_key() in lguest_net.c. The key for any given slot | ||
796 | * is the address of the network device's page plus 4 * the slot number. */ | ||
525 | static unsigned long peer_offset(unsigned int peernum) | 797 | static unsigned long peer_offset(unsigned int peernum) |
526 | { | 798 | { |
527 | return 4 * peernum; | 799 | return 4 * peernum; |
528 | } | 800 | } |
529 | 801 | ||
802 | /* This is where we handle a packet coming in from the tun device */ | ||
530 | static bool handle_tun_input(int fd, struct device *dev) | 803 | static bool handle_tun_input(int fd, struct device *dev) |
531 | { | 804 | { |
532 | u32 irq = 0, *lenp; | 805 | u32 irq = 0, *lenp; |
@@ -534,17 +807,28 @@ static bool handle_tun_input(int fd, struct device *dev) | |||
534 | unsigned num; | 807 | unsigned num; |
535 | struct iovec iov[LGUEST_MAX_DMA_SECTIONS]; | 808 | struct iovec iov[LGUEST_MAX_DMA_SECTIONS]; |
536 | 809 | ||
810 | /* First we get a buffer the Guest has bound to its key. */ | ||
537 | lenp = get_dma_buffer(fd, dev->mem+peer_offset(NET_PEERNUM), iov, &num, | 811 | lenp = get_dma_buffer(fd, dev->mem+peer_offset(NET_PEERNUM), iov, &num, |
538 | &irq); | 812 | &irq); |
539 | if (!lenp) { | 813 | if (!lenp) { |
814 | /* Now, it's expected that if we try to send a packet too | ||
815 | * early, the Guest won't be ready yet. This is why we set a | ||
816 | * flag when the Guest sends its first packet. If it's sent a | ||
817 | * packet we assume it should be ready to receive them. | ||
818 | * | ||
819 | * Actually, this is what the status bits in the descriptor are | ||
820 | * for: we should *use* them. FIXME! */ | ||
540 | if (*(bool *)dev->priv) | 821 | if (*(bool *)dev->priv) |
541 | warn("network: no dma buffer!"); | 822 | warn("network: no dma buffer!"); |
542 | discard_iovec(iov, &num); | 823 | discard_iovec(iov, &num); |
543 | } | 824 | } |
544 | 825 | ||
826 | /* Read the packet from the device directly into the Guest's buffer. */ | ||
545 | len = readv(dev->fd, iov, num); | 827 | len = readv(dev->fd, iov, num); |
546 | if (len <= 0) | 828 | if (len <= 0) |
547 | err(1, "reading network"); | 829 | err(1, "reading network"); |
830 | |||
831 | /* Write the used_len, and trigger the interrupt for the Guest */ | ||
548 | if (lenp) { | 832 | if (lenp) { |
549 | *lenp = len; | 833 | *lenp = len; |
550 | trigger_irq(fd, irq); | 834 | trigger_irq(fd, irq); |
@@ -552,9 +836,13 @@ static bool handle_tun_input(int fd, struct device *dev) | |||
552 | verbose("tun input packet len %i [%02x %02x] (%s)\n", len, | 836 | verbose("tun input packet len %i [%02x %02x] (%s)\n", len, |
553 | ((u8 *)iov[0].iov_base)[0], ((u8 *)iov[0].iov_base)[1], | 837 | ((u8 *)iov[0].iov_base)[0], ((u8 *)iov[0].iov_base)[1], |
554 | lenp ? "sent" : "discarded"); | 838 | lenp ? "sent" : "discarded"); |
839 | /* All good. */ | ||
555 | return true; | 840 | return true; |
556 | } | 841 | } |
557 | 842 | ||
843 | /* The last device handling routine is block output: the Guest has sent a DMA | ||
844 | * to the block device. It will have placed the command it wants in the | ||
845 | * "struct lguest_block_page". */ | ||
558 | static u32 handle_block_output(int fd, const struct iovec *iov, | 846 | static u32 handle_block_output(int fd, const struct iovec *iov, |
559 | unsigned num, struct device *dev) | 847 | unsigned num, struct device *dev) |
560 | { | 848 | { |
@@ -564,36 +852,64 @@ static u32 handle_block_output(int fd, const struct iovec *iov, | |||
564 | struct iovec reply[LGUEST_MAX_DMA_SECTIONS]; | 852 | struct iovec reply[LGUEST_MAX_DMA_SECTIONS]; |
565 | off64_t device_len, off = (off64_t)p->sector * 512; | 853 | off64_t device_len, off = (off64_t)p->sector * 512; |
566 | 854 | ||
855 | /* First we extract the device length from the dev->priv pointer. */ | ||
567 | device_len = *(off64_t *)dev->priv; | 856 | device_len = *(off64_t *)dev->priv; |
568 | 857 | ||
858 | /* We first check that the read or write is within the length of the | ||
859 | * block file. */ | ||
569 | if (off >= device_len) | 860 | if (off >= device_len) |
570 | err(1, "Bad offset %llu vs %llu", off, device_len); | 861 | err(1, "Bad offset %llu vs %llu", off, device_len); |
862 | /* Move to the right location in the block file. This shouldn't fail, | ||
863 | * but best to check. */ | ||
571 | if (lseek64(dev->fd, off, SEEK_SET) != off) | 864 | if (lseek64(dev->fd, off, SEEK_SET) != off) |
572 | err(1, "Bad seek to sector %i", p->sector); | 865 | err(1, "Bad seek to sector %i", p->sector); |
573 | 866 | ||
574 | verbose("Block: %s at offset %llu\n", p->type ? "WRITE" : "READ", off); | 867 | verbose("Block: %s at offset %llu\n", p->type ? "WRITE" : "READ", off); |
575 | 868 | ||
869 | /* They were supposed to bind a reply buffer at key equal to the start | ||
870 | * of the block device memory. We need this to tell them when the | ||
871 | * request is finished. */ | ||
576 | lenp = get_dma_buffer(fd, dev->mem, reply, &reply_num, &irq); | 872 | lenp = get_dma_buffer(fd, dev->mem, reply, &reply_num, &irq); |
577 | if (!lenp) | 873 | if (!lenp) |
578 | err(1, "Block request didn't give us a dma buffer"); | 874 | err(1, "Block request didn't give us a dma buffer"); |
579 | 875 | ||
580 | if (p->type) { | 876 | if (p->type) { |
877 | /* A write request. The DMA they sent contained the data, so | ||
878 | * write it out. */ | ||
581 | len = writev(dev->fd, iov, num); | 879 | len = writev(dev->fd, iov, num); |
880 | /* Grr... Now we know how long the "struct lguest_dma" they | ||
881 | * sent was, we make sure they didn't try to write over the end | ||
882 | * of the block file (possibly extending it). */ | ||
582 | if (off + len > device_len) { | 883 | if (off + len > device_len) { |
884 | /* Trim it back to the correct length */ | ||
583 | ftruncate(dev->fd, device_len); | 885 | ftruncate(dev->fd, device_len); |
886 | /* Die, bad Guest, die. */ | ||
584 | errx(1, "Write past end %llu+%u", off, len); | 887 | errx(1, "Write past end %llu+%u", off, len); |
585 | } | 888 | } |
889 | /* The reply length is 0: we just send back an empty DMA to | ||
890 | * interrupt them and tell them the write is finished. */ | ||
586 | *lenp = 0; | 891 | *lenp = 0; |
587 | } else { | 892 | } else { |
893 | /* A read request. They sent an empty DMA to start the | ||
894 | * request, and we put the read contents into the reply | ||
895 | * buffer. */ | ||
588 | len = readv(dev->fd, reply, reply_num); | 896 | len = readv(dev->fd, reply, reply_num); |
589 | *lenp = len; | 897 | *lenp = len; |
590 | } | 898 | } |
591 | 899 | ||
900 | /* The result is 1 (done), 2 if there was an error (short read or | ||
901 | * write). */ | ||
592 | p->result = 1 + (p->bytes != len); | 902 | p->result = 1 + (p->bytes != len); |
903 | /* Now tell them we've used their reply buffer. */ | ||
593 | trigger_irq(fd, irq); | 904 | trigger_irq(fd, irq); |
905 | |||
906 | /* We're supposed to return the number of bytes of the output buffer we | ||
907 | * used. But the block device uses the "result" field instead, so we | ||
908 | * don't bother. */ | ||
594 | return 0; | 909 | return 0; |
595 | } | 910 | } |
596 | 911 | ||
912 | /* This is the generic routine we call when the Guest sends some DMA out. */ | ||
597 | static void handle_output(int fd, unsigned long dma, unsigned long key, | 913 | static void handle_output(int fd, unsigned long dma, unsigned long key, |
598 | struct device_list *devices) | 914 | struct device_list *devices) |
599 | { | 915 | { |
@@ -602,30 +918,53 @@ static void handle_output(int fd, unsigned long dma, unsigned long key, | |||
602 | struct iovec iov[LGUEST_MAX_DMA_SECTIONS]; | 918 | struct iovec iov[LGUEST_MAX_DMA_SECTIONS]; |
603 | unsigned num = 0; | 919 | unsigned num = 0; |
604 | 920 | ||
921 | /* Convert the "struct lguest_dma" they're sending to a "struct | ||
922 | * iovec". */ | ||
605 | lenp = dma2iov(dma, iov, &num); | 923 | lenp = dma2iov(dma, iov, &num); |
924 | |||
925 | /* Check each device: if they expect output to this key, tell them to | ||
926 | * handle it. */ | ||
606 | for (i = devices->dev; i; i = i->next) { | 927 | for (i = devices->dev; i; i = i->next) { |
607 | if (i->handle_output && key == i->watch_key) { | 928 | if (i->handle_output && key == i->watch_key) { |
929 | /* We write the result straight into the used_len field | ||
930 | * for them. */ | ||
608 | *lenp = i->handle_output(fd, iov, num, i); | 931 | *lenp = i->handle_output(fd, iov, num, i); |
609 | return; | 932 | return; |
610 | } | 933 | } |
611 | } | 934 | } |
935 | |||
936 | /* This can happen: the kernel sends any SEND_DMA which doesn't match | ||
937 | * another Guest to us. It could be that another Guest just left a | ||
938 | * network, for example. But it's unusual. */ | ||
612 | warnx("Pending dma %p, key %p", (void *)dma, (void *)key); | 939 | warnx("Pending dma %p, key %p", (void *)dma, (void *)key); |
613 | } | 940 | } |
614 | 941 | ||
942 | /* This is called when the waker wakes us up: check for incoming file | ||
943 | * descriptors. */ | ||
615 | static void handle_input(int fd, struct device_list *devices) | 944 | static void handle_input(int fd, struct device_list *devices) |
616 | { | 945 | { |
946 | /* select() wants a zeroed timeval to mean "don't wait". */ | ||
617 | struct timeval poll = { .tv_sec = 0, .tv_usec = 0 }; | 947 | struct timeval poll = { .tv_sec = 0, .tv_usec = 0 }; |
618 | 948 | ||
619 | for (;;) { | 949 | for (;;) { |
620 | struct device *i; | 950 | struct device *i; |
621 | fd_set fds = devices->infds; | 951 | fd_set fds = devices->infds; |
622 | 952 | ||
953 | /* If nothing is ready, we're done. */ | ||
623 | if (select(devices->max_infd+1, &fds, NULL, NULL, &poll) == 0) | 954 | if (select(devices->max_infd+1, &fds, NULL, NULL, &poll) == 0) |
624 | break; | 955 | break; |
625 | 956 | ||
957 | /* Otherwise, call the device(s) which have readable | ||
958 | * file descriptors and a method of handling them. */ | ||
626 | for (i = devices->dev; i; i = i->next) { | 959 | for (i = devices->dev; i; i = i->next) { |
627 | if (i->handle_input && FD_ISSET(i->fd, &fds)) { | 960 | if (i->handle_input && FD_ISSET(i->fd, &fds)) { |
961 | /* If handle_input() returns false, it means we | ||
962 | * should no longer service it. | ||
963 | * handle_console_input() does this. */ | ||
628 | if (!i->handle_input(fd, i)) { | 964 | if (!i->handle_input(fd, i)) { |
965 | /* Clear it from the set of input file | ||
966 | * descriptors kept at the head of the | ||
967 | * device list. */ | ||
629 | FD_CLR(i->fd, &devices->infds); | 968 | FD_CLR(i->fd, &devices->infds); |
630 | /* Tell waker to ignore it too... */ | 969 | /* Tell waker to ignore it too... */ |
631 | write(waker_fd, &i->fd, sizeof(i->fd)); | 970 | write(waker_fd, &i->fd, sizeof(i->fd)); |
@@ -635,6 +974,15 @@ static void handle_input(int fd, struct device_list *devices) | |||
635 | } | 974 | } |
636 | } | 975 | } |
637 | 976 | ||
977 | /*L:190 | ||
978 | * Device Setup | ||
979 | * | ||
980 | * All devices need a descriptor so the Guest knows it exists, and a "struct | ||
981 | * device" so the Launcher can keep track of it. We have common helper | ||
982 | * routines to allocate them. | ||
983 | * | ||
984 | * This routine allocates a new "struct lguest_device_desc" from descriptor | ||
985 | * table in the devices array just above the Guest's normal memory. */ | ||
638 | static struct lguest_device_desc * | 986 | static struct lguest_device_desc * |
639 | new_dev_desc(struct lguest_device_desc *descs, | 987 | new_dev_desc(struct lguest_device_desc *descs, |
640 | u16 type, u16 features, u16 num_pages) | 988 | u16 type, u16 features, u16 num_pages) |
@@ -646,6 +994,8 @@ new_dev_desc(struct lguest_device_desc *descs, | |||
646 | descs[i].type = type; | 994 | descs[i].type = type; |
647 | descs[i].features = features; | 995 | descs[i].features = features; |
648 | descs[i].num_pages = num_pages; | 996 | descs[i].num_pages = num_pages; |
997 | /* If they said the device needs memory, we allocate | ||
998 | * that now, bumping up the top of Guest memory. */ | ||
649 | if (num_pages) { | 999 | if (num_pages) { |
650 | map_zeroed_pages(top, num_pages); | 1000 | map_zeroed_pages(top, num_pages); |
651 | descs[i].pfn = top/getpagesize(); | 1001 | descs[i].pfn = top/getpagesize(); |
@@ -657,6 +1007,9 @@ new_dev_desc(struct lguest_device_desc *descs, | |||
657 | errx(1, "too many devices"); | 1007 | errx(1, "too many devices"); |
658 | } | 1008 | } |
659 | 1009 | ||
1010 | /* This monster routine does all the creation and setup of a new device, | ||
1011 | * including caling new_dev_desc() to allocate the descriptor and device | ||
1012 | * memory. */ | ||
660 | static struct device *new_device(struct device_list *devices, | 1013 | static struct device *new_device(struct device_list *devices, |
661 | u16 type, u16 num_pages, u16 features, | 1014 | u16 type, u16 num_pages, u16 features, |
662 | int fd, | 1015 | int fd, |
@@ -669,12 +1022,18 @@ static struct device *new_device(struct device_list *devices, | |||
669 | { | 1022 | { |
670 | struct device *dev = malloc(sizeof(*dev)); | 1023 | struct device *dev = malloc(sizeof(*dev)); |
671 | 1024 | ||
672 | /* Append to device list. */ | 1025 | /* Append to device list. Prepending to a single-linked list is |
1026 | * easier, but the user expects the devices to be arranged on the bus | ||
1027 | * in command-line order. The first network device on the command line | ||
1028 | * is eth0, the first block device /dev/lgba, etc. */ | ||
673 | *devices->lastdev = dev; | 1029 | *devices->lastdev = dev; |
674 | dev->next = NULL; | 1030 | dev->next = NULL; |
675 | devices->lastdev = &dev->next; | 1031 | devices->lastdev = &dev->next; |
676 | 1032 | ||
1033 | /* Now we populate the fields one at a time. */ | ||
677 | dev->fd = fd; | 1034 | dev->fd = fd; |
1035 | /* If we have an input handler for this file descriptor, then we add it | ||
1036 | * to the device_list's fdset and maxfd. */ | ||
678 | if (handle_input) | 1037 | if (handle_input) |
679 | set_fd(dev->fd, devices); | 1038 | set_fd(dev->fd, devices); |
680 | dev->desc = new_dev_desc(devices->descs, type, features, num_pages); | 1039 | dev->desc = new_dev_desc(devices->descs, type, features, num_pages); |
@@ -685,27 +1044,37 @@ static struct device *new_device(struct device_list *devices, | |||
685 | return dev; | 1044 | return dev; |
686 | } | 1045 | } |
687 | 1046 | ||
1047 | /* Our first setup routine is the console. It's a fairly simple device, but | ||
1048 | * UNIX tty handling makes it uglier than it could be. */ | ||
688 | static void setup_console(struct device_list *devices) | 1049 | static void setup_console(struct device_list *devices) |
689 | { | 1050 | { |
690 | struct device *dev; | 1051 | struct device *dev; |
691 | 1052 | ||
1053 | /* If we can save the initial standard input settings... */ | ||
692 | if (tcgetattr(STDIN_FILENO, &orig_term) == 0) { | 1054 | if (tcgetattr(STDIN_FILENO, &orig_term) == 0) { |
693 | struct termios term = orig_term; | 1055 | struct termios term = orig_term; |
1056 | /* Then we turn off echo, line buffering and ^C etc. We want a | ||
1057 | * raw input stream to the Guest. */ | ||
694 | term.c_lflag &= ~(ISIG|ICANON|ECHO); | 1058 | term.c_lflag &= ~(ISIG|ICANON|ECHO); |
695 | tcsetattr(STDIN_FILENO, TCSANOW, &term); | 1059 | tcsetattr(STDIN_FILENO, TCSANOW, &term); |
1060 | /* If we exit gracefully, the original settings will be | ||
1061 | * restored so the user can see what they're typing. */ | ||
696 | atexit(restore_term); | 1062 | atexit(restore_term); |
697 | } | 1063 | } |
698 | 1064 | ||
699 | /* We don't currently require a page for the console. */ | 1065 | /* We don't currently require any memory for the console, so we ask for |
1066 | * 0 pages. */ | ||
700 | dev = new_device(devices, LGUEST_DEVICE_T_CONSOLE, 0, 0, | 1067 | dev = new_device(devices, LGUEST_DEVICE_T_CONSOLE, 0, 0, |
701 | STDIN_FILENO, handle_console_input, | 1068 | STDIN_FILENO, handle_console_input, |
702 | LGUEST_CONSOLE_DMA_KEY, handle_console_output); | 1069 | LGUEST_CONSOLE_DMA_KEY, handle_console_output); |
1070 | /* We store the console state in dev->priv, and initialize it. */ | ||
703 | dev->priv = malloc(sizeof(struct console_abort)); | 1071 | dev->priv = malloc(sizeof(struct console_abort)); |
704 | ((struct console_abort *)dev->priv)->count = 0; | 1072 | ((struct console_abort *)dev->priv)->count = 0; |
705 | verbose("device %p: console\n", | 1073 | verbose("device %p: console\n", |
706 | (void *)(dev->desc->pfn * getpagesize())); | 1074 | (void *)(dev->desc->pfn * getpagesize())); |
707 | } | 1075 | } |
708 | 1076 | ||
1077 | /* Setting up a block file is also fairly straightforward. */ | ||
709 | static void setup_block_file(const char *filename, struct device_list *devices) | 1078 | static void setup_block_file(const char *filename, struct device_list *devices) |
710 | { | 1079 | { |
711 | int fd; | 1080 | int fd; |
@@ -713,20 +1082,47 @@ static void setup_block_file(const char *filename, struct device_list *devices) | |||
713 | off64_t *device_len; | 1082 | off64_t *device_len; |
714 | struct lguest_block_page *p; | 1083 | struct lguest_block_page *p; |
715 | 1084 | ||
1085 | /* We open with O_LARGEFILE because otherwise we get stuck at 2G. We | ||
1086 | * open with O_DIRECT because otherwise our benchmarks go much too | ||
1087 | * fast. */ | ||
716 | fd = open_or_die(filename, O_RDWR|O_LARGEFILE|O_DIRECT); | 1088 | fd = open_or_die(filename, O_RDWR|O_LARGEFILE|O_DIRECT); |
1089 | |||
1090 | /* We want one page, and have no input handler (the block file never | ||
1091 | * has anything interesting to say to us). Our timing will be quite | ||
1092 | * random, so it should be a reasonable randomness source. */ | ||
717 | dev = new_device(devices, LGUEST_DEVICE_T_BLOCK, 1, | 1093 | dev = new_device(devices, LGUEST_DEVICE_T_BLOCK, 1, |
718 | LGUEST_DEVICE_F_RANDOMNESS, | 1094 | LGUEST_DEVICE_F_RANDOMNESS, |
719 | fd, NULL, 0, handle_block_output); | 1095 | fd, NULL, 0, handle_block_output); |
1096 | |||
1097 | /* We store the device size in the private area */ | ||
720 | device_len = dev->priv = malloc(sizeof(*device_len)); | 1098 | device_len = dev->priv = malloc(sizeof(*device_len)); |
1099 | /* This is the safe way of establishing the size of our device: it | ||
1100 | * might be a normal file or an actual block device like /dev/hdb. */ | ||
721 | *device_len = lseek64(fd, 0, SEEK_END); | 1101 | *device_len = lseek64(fd, 0, SEEK_END); |
722 | p = dev->mem; | ||
723 | 1102 | ||
1103 | /* The device memory is a "struct lguest_block_page". It's zeroed | ||
1104 | * already, we just need to put in the device size. Block devices | ||
1105 | * think in sectors (ie. 512 byte chunks), so we translate here. */ | ||
1106 | p = dev->mem; | ||
724 | p->num_sectors = *device_len/512; | 1107 | p->num_sectors = *device_len/512; |
725 | verbose("device %p: block %i sectors\n", | 1108 | verbose("device %p: block %i sectors\n", |
726 | (void *)(dev->desc->pfn * getpagesize()), p->num_sectors); | 1109 | (void *)(dev->desc->pfn * getpagesize()), p->num_sectors); |
727 | } | 1110 | } |
728 | 1111 | ||
729 | /* We use fnctl locks to reserve network slots (autocleanup!) */ | 1112 | /* |
1113 | * Network Devices. | ||
1114 | * | ||
1115 | * Setting up network devices is quite a pain, because we have three types. | ||
1116 | * First, we have the inter-Guest network. This is a file which is mapped into | ||
1117 | * the address space of the Guests who are on the network. Because it is a | ||
1118 | * shared mapping, the same page underlies all the devices, and they can send | ||
1119 | * DMA to each other. | ||
1120 | * | ||
1121 | * Remember from our network driver, the Guest is told what slot in the page it | ||
1122 | * is to use. We use exclusive fnctl locks to reserve a slot. If another | ||
1123 | * Guest is using a slot, the lock will fail and we try another. Because fnctl | ||
1124 | * locks are cleaned up automatically when we die, this cleverly means that our | ||
1125 | * reservation on the slot will vanish if we crash. */ | ||
730 | static unsigned int find_slot(int netfd, const char *filename) | 1126 | static unsigned int find_slot(int netfd, const char *filename) |
731 | { | 1127 | { |
732 | struct flock fl; | 1128 | struct flock fl; |
@@ -734,26 +1130,33 @@ static unsigned int find_slot(int netfd, const char *filename) | |||
734 | fl.l_type = F_WRLCK; | 1130 | fl.l_type = F_WRLCK; |
735 | fl.l_whence = SEEK_SET; | 1131 | fl.l_whence = SEEK_SET; |
736 | fl.l_len = 1; | 1132 | fl.l_len = 1; |
1133 | /* Try a 1 byte lock in each possible position number */ | ||
737 | for (fl.l_start = 0; | 1134 | for (fl.l_start = 0; |
738 | fl.l_start < getpagesize()/sizeof(struct lguest_net); | 1135 | fl.l_start < getpagesize()/sizeof(struct lguest_net); |
739 | fl.l_start++) { | 1136 | fl.l_start++) { |
1137 | /* If we succeed, return the slot number. */ | ||
740 | if (fcntl(netfd, F_SETLK, &fl) == 0) | 1138 | if (fcntl(netfd, F_SETLK, &fl) == 0) |
741 | return fl.l_start; | 1139 | return fl.l_start; |
742 | } | 1140 | } |
743 | errx(1, "No free slots in network file %s", filename); | 1141 | errx(1, "No free slots in network file %s", filename); |
744 | } | 1142 | } |
745 | 1143 | ||
1144 | /* This function sets up the network file */ | ||
746 | static void setup_net_file(const char *filename, | 1145 | static void setup_net_file(const char *filename, |
747 | struct device_list *devices) | 1146 | struct device_list *devices) |
748 | { | 1147 | { |
749 | int netfd; | 1148 | int netfd; |
750 | struct device *dev; | 1149 | struct device *dev; |
751 | 1150 | ||
1151 | /* We don't use open_or_die() here: for friendliness we create the file | ||
1152 | * if it doesn't already exist. */ | ||
752 | netfd = open(filename, O_RDWR, 0); | 1153 | netfd = open(filename, O_RDWR, 0); |
753 | if (netfd < 0) { | 1154 | if (netfd < 0) { |
754 | if (errno == ENOENT) { | 1155 | if (errno == ENOENT) { |
755 | netfd = open(filename, O_RDWR|O_CREAT, 0600); | 1156 | netfd = open(filename, O_RDWR|O_CREAT, 0600); |
756 | if (netfd >= 0) { | 1157 | if (netfd >= 0) { |
1158 | /* If we succeeded, initialize the file with a | ||
1159 | * blank page. */ | ||
757 | char page[getpagesize()]; | 1160 | char page[getpagesize()]; |
758 | memset(page, 0, sizeof(page)); | 1161 | memset(page, 0, sizeof(page)); |
759 | write(netfd, page, sizeof(page)); | 1162 | write(netfd, page, sizeof(page)); |
@@ -763,11 +1166,15 @@ static void setup_net_file(const char *filename, | |||
763 | err(1, "cannot open net file '%s'", filename); | 1166 | err(1, "cannot open net file '%s'", filename); |
764 | } | 1167 | } |
765 | 1168 | ||
1169 | /* We need 1 page, and the features indicate the slot to use and that | ||
1170 | * no checksum is needed. We never touch this device again; it's | ||
1171 | * between the Guests on the network, so we don't register input or | ||
1172 | * output handlers. */ | ||
766 | dev = new_device(devices, LGUEST_DEVICE_T_NET, 1, | 1173 | dev = new_device(devices, LGUEST_DEVICE_T_NET, 1, |
767 | find_slot(netfd, filename)|LGUEST_NET_F_NOCSUM, | 1174 | find_slot(netfd, filename)|LGUEST_NET_F_NOCSUM, |
768 | -1, NULL, 0, NULL); | 1175 | -1, NULL, 0, NULL); |
769 | 1176 | ||
770 | /* We overwrite the /dev/zero mapping with the actual file. */ | 1177 | /* Map the shared file. */ |
771 | if (mmap(dev->mem, getpagesize(), PROT_READ|PROT_WRITE, | 1178 | if (mmap(dev->mem, getpagesize(), PROT_READ|PROT_WRITE, |
772 | MAP_FIXED|MAP_SHARED, netfd, 0) != dev->mem) | 1179 | MAP_FIXED|MAP_SHARED, netfd, 0) != dev->mem) |
773 | err(1, "could not mmap '%s'", filename); | 1180 | err(1, "could not mmap '%s'", filename); |
@@ -775,6 +1182,7 @@ static void setup_net_file(const char *filename, | |||
775 | (void *)(dev->desc->pfn * getpagesize()), filename, | 1182 | (void *)(dev->desc->pfn * getpagesize()), filename, |
776 | dev->desc->features & ~LGUEST_NET_F_NOCSUM); | 1183 | dev->desc->features & ~LGUEST_NET_F_NOCSUM); |
777 | } | 1184 | } |
1185 | /*:*/ | ||
778 | 1186 | ||
779 | static u32 str2ip(const char *ipaddr) | 1187 | static u32 str2ip(const char *ipaddr) |
780 | { | 1188 | { |
@@ -784,7 +1192,11 @@ static u32 str2ip(const char *ipaddr) | |||
784 | return (byte[0] << 24) | (byte[1] << 16) | (byte[2] << 8) | byte[3]; | 1192 | return (byte[0] << 24) | (byte[1] << 16) | (byte[2] << 8) | byte[3]; |
785 | } | 1193 | } |
786 | 1194 | ||
787 | /* adapted from libbridge */ | 1195 | /* This code is "adapted" from libbridge: it attaches the Host end of the |
1196 | * network device to the bridge device specified by the command line. | ||
1197 | * | ||
1198 | * This is yet another James Morris contribution (I'm an IP-level guy, so I | ||
1199 | * dislike bridging), and I just try not to break it. */ | ||
788 | static void add_to_bridge(int fd, const char *if_name, const char *br_name) | 1200 | static void add_to_bridge(int fd, const char *if_name, const char *br_name) |
789 | { | 1201 | { |
790 | int ifidx; | 1202 | int ifidx; |
@@ -803,12 +1215,16 @@ static void add_to_bridge(int fd, const char *if_name, const char *br_name) | |||
803 | err(1, "can't add %s to bridge %s", if_name, br_name); | 1215 | err(1, "can't add %s to bridge %s", if_name, br_name); |
804 | } | 1216 | } |
805 | 1217 | ||
1218 | /* This sets up the Host end of the network device with an IP address, brings | ||
1219 | * it up so packets will flow, the copies the MAC address into the hwaddr | ||
1220 | * pointer (in practice, the Host's slot in the network device's memory). */ | ||
806 | static void configure_device(int fd, const char *devname, u32 ipaddr, | 1221 | static void configure_device(int fd, const char *devname, u32 ipaddr, |
807 | unsigned char hwaddr[6]) | 1222 | unsigned char hwaddr[6]) |
808 | { | 1223 | { |
809 | struct ifreq ifr; | 1224 | struct ifreq ifr; |
810 | struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr; | 1225 | struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr; |
811 | 1226 | ||
1227 | /* Don't read these incantations. Just cut & paste them like I did! */ | ||
812 | memset(&ifr, 0, sizeof(ifr)); | 1228 | memset(&ifr, 0, sizeof(ifr)); |
813 | strcpy(ifr.ifr_name, devname); | 1229 | strcpy(ifr.ifr_name, devname); |
814 | sin->sin_family = AF_INET; | 1230 | sin->sin_family = AF_INET; |
@@ -819,12 +1235,19 @@ static void configure_device(int fd, const char *devname, u32 ipaddr, | |||
819 | if (ioctl(fd, SIOCSIFFLAGS, &ifr) != 0) | 1235 | if (ioctl(fd, SIOCSIFFLAGS, &ifr) != 0) |
820 | err(1, "Bringing interface %s up", devname); | 1236 | err(1, "Bringing interface %s up", devname); |
821 | 1237 | ||
1238 | /* SIOC stands for Socket I/O Control. G means Get (vs S for Set | ||
1239 | * above). IF means Interface, and HWADDR is hardware address. | ||
1240 | * Simple! */ | ||
822 | if (ioctl(fd, SIOCGIFHWADDR, &ifr) != 0) | 1241 | if (ioctl(fd, SIOCGIFHWADDR, &ifr) != 0) |
823 | err(1, "getting hw address for %s", devname); | 1242 | err(1, "getting hw address for %s", devname); |
824 | |||
825 | memcpy(hwaddr, ifr.ifr_hwaddr.sa_data, 6); | 1243 | memcpy(hwaddr, ifr.ifr_hwaddr.sa_data, 6); |
826 | } | 1244 | } |
827 | 1245 | ||
1246 | /*L:195 The other kind of network is a Host<->Guest network. This can either | ||
1247 | * use briding or routing, but the principle is the same: it uses the "tun" | ||
1248 | * device to inject packets into the Host as if they came in from a normal | ||
1249 | * network card. We just shunt packets between the Guest and the tun | ||
1250 | * device. */ | ||
828 | static void setup_tun_net(const char *arg, struct device_list *devices) | 1251 | static void setup_tun_net(const char *arg, struct device_list *devices) |
829 | { | 1252 | { |
830 | struct device *dev; | 1253 | struct device *dev; |
@@ -833,36 +1256,56 @@ static void setup_tun_net(const char *arg, struct device_list *devices) | |||
833 | u32 ip; | 1256 | u32 ip; |
834 | const char *br_name = NULL; | 1257 | const char *br_name = NULL; |
835 | 1258 | ||
1259 | /* We open the /dev/net/tun device and tell it we want a tap device. A | ||
1260 | * tap device is like a tun device, only somehow different. To tell | ||
1261 | * the truth, I completely blundered my way through this code, but it | ||
1262 | * works now! */ | ||
836 | netfd = open_or_die("/dev/net/tun", O_RDWR); | 1263 | netfd = open_or_die("/dev/net/tun", O_RDWR); |
837 | memset(&ifr, 0, sizeof(ifr)); | 1264 | memset(&ifr, 0, sizeof(ifr)); |
838 | ifr.ifr_flags = IFF_TAP | IFF_NO_PI; | 1265 | ifr.ifr_flags = IFF_TAP | IFF_NO_PI; |
839 | strcpy(ifr.ifr_name, "tap%d"); | 1266 | strcpy(ifr.ifr_name, "tap%d"); |
840 | if (ioctl(netfd, TUNSETIFF, &ifr) != 0) | 1267 | if (ioctl(netfd, TUNSETIFF, &ifr) != 0) |
841 | err(1, "configuring /dev/net/tun"); | 1268 | err(1, "configuring /dev/net/tun"); |
1269 | /* We don't need checksums calculated for packets coming in this | ||
1270 | * device: trust us! */ | ||
842 | ioctl(netfd, TUNSETNOCSUM, 1); | 1271 | ioctl(netfd, TUNSETNOCSUM, 1); |
843 | 1272 | ||
844 | /* You will be peer 1: we should create enough jitter to randomize */ | 1273 | /* We create the net device with 1 page, using the features field of |
1274 | * the descriptor to tell the Guest it is in slot 1 (NET_PEERNUM), and | ||
1275 | * that the device has fairly random timing. We do *not* specify | ||
1276 | * LGUEST_NET_F_NOCSUM: these packets can reach the real world. | ||
1277 | * | ||
1278 | * We will put our MAC address is slot 0 for the Guest to see, so | ||
1279 | * it will send packets to us using the key "peer_offset(0)": */ | ||
845 | dev = new_device(devices, LGUEST_DEVICE_T_NET, 1, | 1280 | dev = new_device(devices, LGUEST_DEVICE_T_NET, 1, |
846 | NET_PEERNUM|LGUEST_DEVICE_F_RANDOMNESS, netfd, | 1281 | NET_PEERNUM|LGUEST_DEVICE_F_RANDOMNESS, netfd, |
847 | handle_tun_input, peer_offset(0), handle_tun_output); | 1282 | handle_tun_input, peer_offset(0), handle_tun_output); |
1283 | |||
1284 | /* We keep a flag which says whether we've seen packets come out from | ||
1285 | * this network device. */ | ||
848 | dev->priv = malloc(sizeof(bool)); | 1286 | dev->priv = malloc(sizeof(bool)); |
849 | *(bool *)dev->priv = false; | 1287 | *(bool *)dev->priv = false; |
850 | 1288 | ||
1289 | /* We need a socket to perform the magic network ioctls to bring up the | ||
1290 | * tap interface, connect to the bridge etc. Any socket will do! */ | ||
851 | ipfd = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); | 1291 | ipfd = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); |
852 | if (ipfd < 0) | 1292 | if (ipfd < 0) |
853 | err(1, "opening IP socket"); | 1293 | err(1, "opening IP socket"); |
854 | 1294 | ||
1295 | /* If the command line was --tunnet=bridge:<name> do bridging. */ | ||
855 | if (!strncmp(BRIDGE_PFX, arg, strlen(BRIDGE_PFX))) { | 1296 | if (!strncmp(BRIDGE_PFX, arg, strlen(BRIDGE_PFX))) { |
856 | ip = INADDR_ANY; | 1297 | ip = INADDR_ANY; |
857 | br_name = arg + strlen(BRIDGE_PFX); | 1298 | br_name = arg + strlen(BRIDGE_PFX); |
858 | add_to_bridge(ipfd, ifr.ifr_name, br_name); | 1299 | add_to_bridge(ipfd, ifr.ifr_name, br_name); |
859 | } else | 1300 | } else /* It is an IP address to set up the device with */ |
860 | ip = str2ip(arg); | 1301 | ip = str2ip(arg); |
861 | 1302 | ||
862 | /* We are peer 0, ie. first slot. */ | 1303 | /* We are peer 0, ie. first slot, so we hand dev->mem to this routine |
1304 | * to write the MAC address at the start of the device memory. */ | ||
863 | configure_device(ipfd, ifr.ifr_name, ip, dev->mem); | 1305 | configure_device(ipfd, ifr.ifr_name, ip, dev->mem); |
864 | 1306 | ||
865 | /* Set "promisc" bit: we want every single packet. */ | 1307 | /* Set "promisc" bit: we want every single packet if we're going to |
1308 | * bridge to other machines (and otherwise it doesn't matter). */ | ||
866 | *((u8 *)dev->mem) |= 0x1; | 1309 | *((u8 *)dev->mem) |= 0x1; |
867 | 1310 | ||
868 | close(ipfd); | 1311 | close(ipfd); |
@@ -873,7 +1316,10 @@ static void setup_tun_net(const char *arg, struct device_list *devices) | |||
873 | if (br_name) | 1316 | if (br_name) |
874 | verbose("attached to bridge: %s\n", br_name); | 1317 | verbose("attached to bridge: %s\n", br_name); |
875 | } | 1318 | } |
1319 | /* That's the end of device setup. */ | ||
876 | 1320 | ||
1321 | /*L:220 Finally we reach the core of the Launcher, which runs the Guest, serves | ||
1322 | * its input and output, and finally, lays it to rest. */ | ||
877 | static void __attribute__((noreturn)) | 1323 | static void __attribute__((noreturn)) |
878 | run_guest(int lguest_fd, struct device_list *device_list) | 1324 | run_guest(int lguest_fd, struct device_list *device_list) |
879 | { | 1325 | { |
@@ -885,20 +1331,37 @@ run_guest(int lguest_fd, struct device_list *device_list) | |||
885 | /* We read from the /dev/lguest device to run the Guest. */ | 1331 | /* We read from the /dev/lguest device to run the Guest. */ |
886 | readval = read(lguest_fd, arr, sizeof(arr)); | 1332 | readval = read(lguest_fd, arr, sizeof(arr)); |
887 | 1333 | ||
1334 | /* The read can only really return sizeof(arr) (the Guest did a | ||
1335 | * SEND_DMA to us), or an error. */ | ||
1336 | |||
1337 | /* For a successful read, arr[0] is the address of the "struct | ||
1338 | * lguest_dma", and arr[1] is the key the Guest sent to. */ | ||
888 | if (readval == sizeof(arr)) { | 1339 | if (readval == sizeof(arr)) { |
889 | handle_output(lguest_fd, arr[0], arr[1], device_list); | 1340 | handle_output(lguest_fd, arr[0], arr[1], device_list); |
890 | continue; | 1341 | continue; |
1342 | /* ENOENT means the Guest died. Reading tells us why. */ | ||
891 | } else if (errno == ENOENT) { | 1343 | } else if (errno == ENOENT) { |
892 | char reason[1024] = { 0 }; | 1344 | char reason[1024] = { 0 }; |
893 | read(lguest_fd, reason, sizeof(reason)-1); | 1345 | read(lguest_fd, reason, sizeof(reason)-1); |
894 | errx(1, "%s", reason); | 1346 | errx(1, "%s", reason); |
1347 | /* EAGAIN means the waker wanted us to look at some input. | ||
1348 | * Anything else means a bug or incompatible change. */ | ||
895 | } else if (errno != EAGAIN) | 1349 | } else if (errno != EAGAIN) |
896 | err(1, "Running guest failed"); | 1350 | err(1, "Running guest failed"); |
1351 | |||
1352 | /* Service input, then unset the BREAK which releases | ||
1353 | * the Waker. */ | ||
897 | handle_input(lguest_fd, device_list); | 1354 | handle_input(lguest_fd, device_list); |
898 | if (write(lguest_fd, args, sizeof(args)) < 0) | 1355 | if (write(lguest_fd, args, sizeof(args)) < 0) |
899 | err(1, "Resetting break"); | 1356 | err(1, "Resetting break"); |
900 | } | 1357 | } |
901 | } | 1358 | } |
1359 | /* | ||
1360 | * This is the end of the Launcher. | ||
1361 | * | ||
1362 | * But wait! We've seen I/O from the Launcher, and we've seen I/O from the | ||
1363 | * Drivers. If we were to see the Host kernel I/O code, our understanding | ||
1364 | * would be complete... :*/ | ||
902 | 1365 | ||
903 | static struct option opts[] = { | 1366 | static struct option opts[] = { |
904 | { "verbose", 0, NULL, 'v' }, | 1367 | { "verbose", 0, NULL, 'v' }, |
@@ -916,20 +1379,49 @@ static void usage(void) | |||
916 | "<mem-in-mb> vmlinux [args...]"); | 1379 | "<mem-in-mb> vmlinux [args...]"); |
917 | } | 1380 | } |
918 | 1381 | ||
1382 | /*L:100 The Launcher code itself takes us out into userspace, that scary place | ||
1383 | * where pointers run wild and free! Unfortunately, like most userspace | ||
1384 | * programs, it's quite boring (which is why everyone like to hack on the | ||
1385 | * kernel!). Perhaps if you make up an Lguest Drinking Game at this point, it | ||
1386 | * will get you through this section. Or, maybe not. | ||
1387 | * | ||
1388 | * The Launcher binary sits up high, usually starting at address 0xB8000000. | ||
1389 | * Everything below this is the "physical" memory for the Guest. For example, | ||
1390 | * if the Guest were to write a "1" at physical address 0, we would see a "1" | ||
1391 | * in the Launcher at "(int *)0". Guest physical == Launcher virtual. | ||
1392 | * | ||
1393 | * This can be tough to get your head around, but usually it just means that we | ||
1394 | * don't need to do any conversion when the Guest gives us it's "physical" | ||
1395 | * addresses. | ||
1396 | */ | ||
919 | int main(int argc, char *argv[]) | 1397 | int main(int argc, char *argv[]) |
920 | { | 1398 | { |
1399 | /* Memory, top-level pagetable, code startpoint, PAGE_OFFSET and size | ||
1400 | * of the (optional) initrd. */ | ||
921 | unsigned long mem = 0, pgdir, start, page_offset, initrd_size = 0; | 1401 | unsigned long mem = 0, pgdir, start, page_offset, initrd_size = 0; |
1402 | /* A temporary and the /dev/lguest file descriptor. */ | ||
922 | int i, c, lguest_fd; | 1403 | int i, c, lguest_fd; |
1404 | /* The list of Guest devices, based on command line arguments. */ | ||
923 | struct device_list device_list; | 1405 | struct device_list device_list; |
1406 | /* The boot information for the Guest: at guest-physical address 0. */ | ||
924 | void *boot = (void *)0; | 1407 | void *boot = (void *)0; |
1408 | /* If they specify an initrd file to load. */ | ||
925 | const char *initrd_name = NULL; | 1409 | const char *initrd_name = NULL; |
926 | 1410 | ||
1411 | /* First we initialize the device list. Since console and network | ||
1412 | * device receive input from a file descriptor, we keep an fdset | ||
1413 | * (infds) and the maximum fd number (max_infd) with the head of the | ||
1414 | * list. We also keep a pointer to the last device, for easy appending | ||
1415 | * to the list. */ | ||
927 | device_list.max_infd = -1; | 1416 | device_list.max_infd = -1; |
928 | device_list.dev = NULL; | 1417 | device_list.dev = NULL; |
929 | device_list.lastdev = &device_list.dev; | 1418 | device_list.lastdev = &device_list.dev; |
930 | FD_ZERO(&device_list.infds); | 1419 | FD_ZERO(&device_list.infds); |
931 | 1420 | ||
932 | /* We need to know how much memory so we can allocate devices. */ | 1421 | /* We need to know how much memory so we can set up the device |
1422 | * descriptor and memory pages for the devices as we parse the command | ||
1423 | * line. So we quickly look through the arguments to find the amount | ||
1424 | * of memory now. */ | ||
933 | for (i = 1; i < argc; i++) { | 1425 | for (i = 1; i < argc; i++) { |
934 | if (argv[i][0] != '-') { | 1426 | if (argv[i][0] != '-') { |
935 | mem = top = atoi(argv[i]) * 1024 * 1024; | 1427 | mem = top = atoi(argv[i]) * 1024 * 1024; |
@@ -938,6 +1430,8 @@ int main(int argc, char *argv[]) | |||
938 | break; | 1430 | break; |
939 | } | 1431 | } |
940 | } | 1432 | } |
1433 | |||
1434 | /* The options are fairly straight-forward */ | ||
941 | while ((c = getopt_long(argc, argv, "v", opts, NULL)) != EOF) { | 1435 | while ((c = getopt_long(argc, argv, "v", opts, NULL)) != EOF) { |
942 | switch (c) { | 1436 | switch (c) { |
943 | case 'v': | 1437 | case 'v': |
@@ -960,42 +1454,59 @@ int main(int argc, char *argv[]) | |||
960 | usage(); | 1454 | usage(); |
961 | } | 1455 | } |
962 | } | 1456 | } |
1457 | /* After the other arguments we expect memory and kernel image name, | ||
1458 | * followed by command line arguments for the kernel. */ | ||
963 | if (optind + 2 > argc) | 1459 | if (optind + 2 > argc) |
964 | usage(); | 1460 | usage(); |
965 | 1461 | ||
966 | /* We need a console device */ | 1462 | /* We always have a console device */ |
967 | setup_console(&device_list); | 1463 | setup_console(&device_list); |
968 | 1464 | ||
969 | /* First we map /dev/zero over all of guest-physical memory. */ | 1465 | /* We start by mapping anonymous pages over all of guest-physical |
1466 | * memory range. This fills it with 0, and ensures that the Guest | ||
1467 | * won't be killed when it tries to access it. */ | ||
970 | map_zeroed_pages(0, mem / getpagesize()); | 1468 | map_zeroed_pages(0, mem / getpagesize()); |
971 | 1469 | ||
972 | /* Now we load the kernel */ | 1470 | /* Now we load the kernel */ |
973 | start = load_kernel(open_or_die(argv[optind+1], O_RDONLY), | 1471 | start = load_kernel(open_or_die(argv[optind+1], O_RDONLY), |
974 | &page_offset); | 1472 | &page_offset); |
975 | 1473 | ||
976 | /* Map the initrd image if requested */ | 1474 | /* Map the initrd image if requested (at top of physical memory) */ |
977 | if (initrd_name) { | 1475 | if (initrd_name) { |
978 | initrd_size = load_initrd(initrd_name, mem); | 1476 | initrd_size = load_initrd(initrd_name, mem); |
1477 | /* These are the location in the Linux boot header where the | ||
1478 | * start and size of the initrd are expected to be found. */ | ||
979 | *(unsigned long *)(boot+0x218) = mem - initrd_size; | 1479 | *(unsigned long *)(boot+0x218) = mem - initrd_size; |
980 | *(unsigned long *)(boot+0x21c) = initrd_size; | 1480 | *(unsigned long *)(boot+0x21c) = initrd_size; |
1481 | /* The bootloader type 0xFF means "unknown"; that's OK. */ | ||
981 | *(unsigned char *)(boot+0x210) = 0xFF; | 1482 | *(unsigned char *)(boot+0x210) = 0xFF; |
982 | } | 1483 | } |
983 | 1484 | ||
984 | /* Set up the initial linar pagetables. */ | 1485 | /* Set up the initial linear pagetables, starting below the initrd. */ |
985 | pgdir = setup_pagetables(mem, initrd_size, page_offset); | 1486 | pgdir = setup_pagetables(mem, initrd_size, page_offset); |
986 | 1487 | ||
987 | /* E820 memory map: ours is a simple, single region. */ | 1488 | /* The Linux boot header contains an "E820" memory map: ours is a |
1489 | * simple, single region. */ | ||
988 | *(char*)(boot+E820NR) = 1; | 1490 | *(char*)(boot+E820NR) = 1; |
989 | *((struct e820entry *)(boot+E820MAP)) | 1491 | *((struct e820entry *)(boot+E820MAP)) |
990 | = ((struct e820entry) { 0, mem, E820_RAM }); | 1492 | = ((struct e820entry) { 0, mem, E820_RAM }); |
991 | /* Command line pointer and command line (at 4096) */ | 1493 | /* The boot header contains a command line pointer: we put the command |
1494 | * line after the boot header (at address 4096) */ | ||
992 | *(void **)(boot + 0x228) = boot + 4096; | 1495 | *(void **)(boot + 0x228) = boot + 4096; |
993 | concat(boot + 4096, argv+optind+2); | 1496 | concat(boot + 4096, argv+optind+2); |
994 | /* Paravirt type: 1 == lguest */ | 1497 | |
1498 | /* The guest type value of "1" tells the Guest it's under lguest. */ | ||
995 | *(int *)(boot + 0x23c) = 1; | 1499 | *(int *)(boot + 0x23c) = 1; |
996 | 1500 | ||
1501 | /* We tell the kernel to initialize the Guest: this returns the open | ||
1502 | * /dev/lguest file descriptor. */ | ||
997 | lguest_fd = tell_kernel(pgdir, start, page_offset); | 1503 | lguest_fd = tell_kernel(pgdir, start, page_offset); |
1504 | |||
1505 | /* We fork off a child process, which wakes the Launcher whenever one | ||
1506 | * of the input file descriptors needs attention. Otherwise we would | ||
1507 | * run the Guest until it tries to output something. */ | ||
998 | waker_fd = setup_waker(lguest_fd, &device_list); | 1508 | waker_fd = setup_waker(lguest_fd, &device_list); |
999 | 1509 | ||
1510 | /* Finally, run the Guest. This doesn't return. */ | ||
1000 | run_guest(lguest_fd, &device_list); | 1511 | run_guest(lguest_fd, &device_list); |
1001 | } | 1512 | } |
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index 2cea0c80c992..1eb05f9a56b6 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c | |||
@@ -208,24 +208,39 @@ static int emulate_insn(struct lguest *lg) | |||
208 | return 1; | 208 | return 1; |
209 | } | 209 | } |
210 | 210 | ||
211 | /*L:305 | ||
212 | * Dealing With Guest Memory. | ||
213 | * | ||
214 | * When the Guest gives us (what it thinks is) a physical address, we can use | ||
215 | * the normal copy_from_user() & copy_to_user() on that address: remember, | ||
216 | * Guest physical == Launcher virtual. | ||
217 | * | ||
218 | * But we can't trust the Guest: it might be trying to access the Launcher | ||
219 | * code. We have to check that the range is below the pfn_limit the Launcher | ||
220 | * gave us. We have to make sure that addr + len doesn't give us a false | ||
221 | * positive by overflowing, too. */ | ||
211 | int lguest_address_ok(const struct lguest *lg, | 222 | int lguest_address_ok(const struct lguest *lg, |
212 | unsigned long addr, unsigned long len) | 223 | unsigned long addr, unsigned long len) |
213 | { | 224 | { |
214 | return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr); | 225 | return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr); |
215 | } | 226 | } |
216 | 227 | ||
217 | /* Just like get_user, but don't let guest access lguest binary. */ | 228 | /* This is a convenient routine to get a 32-bit value from the Guest (a very |
229 | * common operation). Here we can see how useful the kill_lguest() routine we | ||
230 | * met in the Launcher can be: we return a random value (0) instead of needing | ||
231 | * to return an error. */ | ||
218 | u32 lgread_u32(struct lguest *lg, unsigned long addr) | 232 | u32 lgread_u32(struct lguest *lg, unsigned long addr) |
219 | { | 233 | { |
220 | u32 val = 0; | 234 | u32 val = 0; |
221 | 235 | ||
222 | /* Don't let them access lguest binary */ | 236 | /* Don't let them access lguest binary. */ |
223 | if (!lguest_address_ok(lg, addr, sizeof(val)) | 237 | if (!lguest_address_ok(lg, addr, sizeof(val)) |
224 | || get_user(val, (u32 __user *)addr) != 0) | 238 | || get_user(val, (u32 __user *)addr) != 0) |
225 | kill_guest(lg, "bad read address %#lx", addr); | 239 | kill_guest(lg, "bad read address %#lx", addr); |
226 | return val; | 240 | return val; |
227 | } | 241 | } |
228 | 242 | ||
243 | /* Same thing for writing a value. */ | ||
229 | void lgwrite_u32(struct lguest *lg, unsigned long addr, u32 val) | 244 | void lgwrite_u32(struct lguest *lg, unsigned long addr, u32 val) |
230 | { | 245 | { |
231 | if (!lguest_address_ok(lg, addr, sizeof(val)) | 246 | if (!lguest_address_ok(lg, addr, sizeof(val)) |
@@ -233,6 +248,9 @@ void lgwrite_u32(struct lguest *lg, unsigned long addr, u32 val) | |||
233 | kill_guest(lg, "bad write address %#lx", addr); | 248 | kill_guest(lg, "bad write address %#lx", addr); |
234 | } | 249 | } |
235 | 250 | ||
251 | /* This routine is more generic, and copies a range of Guest bytes into a | ||
252 | * buffer. If the copy_from_user() fails, we fill the buffer with zeroes, so | ||
253 | * the caller doesn't end up using uninitialized kernel memory. */ | ||
236 | void lgread(struct lguest *lg, void *b, unsigned long addr, unsigned bytes) | 254 | void lgread(struct lguest *lg, void *b, unsigned long addr, unsigned bytes) |
237 | { | 255 | { |
238 | if (!lguest_address_ok(lg, addr, bytes) | 256 | if (!lguest_address_ok(lg, addr, bytes) |
@@ -243,6 +261,7 @@ void lgread(struct lguest *lg, void *b, unsigned long addr, unsigned bytes) | |||
243 | } | 261 | } |
244 | } | 262 | } |
245 | 263 | ||
264 | /* Similarly, our generic routine to copy into a range of Guest bytes. */ | ||
246 | void lgwrite(struct lguest *lg, unsigned long addr, const void *b, | 265 | void lgwrite(struct lguest *lg, unsigned long addr, const void *b, |
247 | unsigned bytes) | 266 | unsigned bytes) |
248 | { | 267 | { |
@@ -250,6 +269,7 @@ void lgwrite(struct lguest *lg, unsigned long addr, const void *b, | |||
250 | || copy_to_user((void __user *)addr, b, bytes) != 0) | 269 | || copy_to_user((void __user *)addr, b, bytes) != 0) |
251 | kill_guest(lg, "bad write address %#lx len %u", addr, bytes); | 270 | kill_guest(lg, "bad write address %#lx len %u", addr, bytes); |
252 | } | 271 | } |
272 | /* (end of memory access helper routines) :*/ | ||
253 | 273 | ||
254 | static void set_ts(void) | 274 | static void set_ts(void) |
255 | { | 275 | { |
diff --git a/drivers/lguest/io.c b/drivers/lguest/io.c index d2f02f0653ca..da288128e44f 100644 --- a/drivers/lguest/io.c +++ b/drivers/lguest/io.c | |||
@@ -27,8 +27,36 @@ | |||
27 | #include <linux/uaccess.h> | 27 | #include <linux/uaccess.h> |
28 | #include "lg.h" | 28 | #include "lg.h" |
29 | 29 | ||
30 | /*L:300 | ||
31 | * I/O | ||
32 | * | ||
33 | * Getting data in and out of the Guest is quite an art. There are numerous | ||
34 | * ways to do it, and they all suck differently. We try to keep things fairly | ||
35 | * close to "real" hardware so our Guest's drivers don't look like an alien | ||
36 | * visitation in the middle of the Linux code, and yet make sure that Guests | ||
37 | * can talk directly to other Guests, not just the Launcher. | ||
38 | * | ||
39 | * To do this, the Guest gives us a key when it binds or sends DMA buffers. | ||
40 | * The key corresponds to a "physical" address inside the Guest (ie. a virtual | ||
41 | * address inside the Launcher process). We don't, however, use this key | ||
42 | * directly. | ||
43 | * | ||
44 | * We want Guests which share memory to be able to DMA to each other: two | ||
45 | * Launchers can mmap memory the same file, then the Guests can communicate. | ||
46 | * Fortunately, the futex code provides us with a way to get a "union | ||
47 | * futex_key" corresponding to the memory lying at a virtual address: if the | ||
48 | * two processes share memory, the "union futex_key" for that memory will match | ||
49 | * even if the memory is mapped at different addresses in each. So we always | ||
50 | * convert the keys to "union futex_key"s to compare them. | ||
51 | * | ||
52 | * Before we dive into this though, we need to look at another set of helper | ||
53 | * routines used throughout the Host kernel code to access Guest memory. | ||
54 | :*/ | ||
30 | static struct list_head dma_hash[61]; | 55 | static struct list_head dma_hash[61]; |
31 | 56 | ||
57 | /* An unfortunate side effect of the Linux double-linked list implementation is | ||
58 | * that there's no good way to statically initialize an array of linked | ||
59 | * lists. */ | ||
32 | void lguest_io_init(void) | 60 | void lguest_io_init(void) |
33 | { | 61 | { |
34 | unsigned int i; | 62 | unsigned int i; |
@@ -60,6 +88,19 @@ kill: | |||
60 | return 0; | 88 | return 0; |
61 | } | 89 | } |
62 | 90 | ||
91 | /*L:330 This is our hash function, using the wonderful Jenkins hash. | ||
92 | * | ||
93 | * The futex key is a union with three parts: an unsigned long word, a pointer, | ||
94 | * and an int "offset". We could use jhash_2words() which takes three u32s. | ||
95 | * (Ok, the hash functions are great: the naming sucks though). | ||
96 | * | ||
97 | * It's nice to be portable to 64-bit platforms, so we use the more generic | ||
98 | * jhash2(), which takes an array of u32, the number of u32s, and an initial | ||
99 | * u32 to roll in. This is uglier, but breaks down to almost the same code on | ||
100 | * 32-bit platforms like this one. | ||
101 | * | ||
102 | * We want a position in the array, so we modulo ARRAY_SIZE(dma_hash) (ie. 61). | ||
103 | */ | ||
63 | static unsigned int hash(const union futex_key *key) | 104 | static unsigned int hash(const union futex_key *key) |
64 | { | 105 | { |
65 | return jhash2((u32*)&key->both.word, | 106 | return jhash2((u32*)&key->both.word, |
@@ -68,6 +109,9 @@ static unsigned int hash(const union futex_key *key) | |||
68 | % ARRAY_SIZE(dma_hash); | 109 | % ARRAY_SIZE(dma_hash); |
69 | } | 110 | } |
70 | 111 | ||
112 | /* This is a convenience routine to compare two keys. It's a much bemoaned C | ||
113 | * weakness that it doesn't allow '==' on structures or unions, so we have to | ||
114 | * open-code it like this. */ | ||
71 | static inline int key_eq(const union futex_key *a, const union futex_key *b) | 115 | static inline int key_eq(const union futex_key *a, const union futex_key *b) |
72 | { | 116 | { |
73 | return (a->both.word == b->both.word | 117 | return (a->both.word == b->both.word |
@@ -75,22 +119,36 @@ static inline int key_eq(const union futex_key *a, const union futex_key *b) | |||
75 | && a->both.offset == b->both.offset); | 119 | && a->both.offset == b->both.offset); |
76 | } | 120 | } |
77 | 121 | ||
78 | /* Must hold read lock on dmainfo owner's current->mm->mmap_sem */ | 122 | /*L:360 OK, when we need to actually free up a Guest's DMA array we do several |
123 | * things, so we have a convenient function to do it. | ||
124 | * | ||
125 | * The caller must hold a read lock on dmainfo owner's current->mm->mmap_sem | ||
126 | * for the drop_futex_key_refs(). */ | ||
79 | static void unlink_dma(struct lguest_dma_info *dmainfo) | 127 | static void unlink_dma(struct lguest_dma_info *dmainfo) |
80 | { | 128 | { |
129 | /* You locked this too, right? */ | ||
81 | BUG_ON(!mutex_is_locked(&lguest_lock)); | 130 | BUG_ON(!mutex_is_locked(&lguest_lock)); |
131 | /* This is how we know that the entry is free. */ | ||
82 | dmainfo->interrupt = 0; | 132 | dmainfo->interrupt = 0; |
133 | /* Remove it from the hash table. */ | ||
83 | list_del(&dmainfo->list); | 134 | list_del(&dmainfo->list); |
135 | /* Drop the references we were holding (to the inode or mm). */ | ||
84 | drop_futex_key_refs(&dmainfo->key); | 136 | drop_futex_key_refs(&dmainfo->key); |
85 | } | 137 | } |
86 | 138 | ||
139 | /*L:350 This is the routine which we call when the Guest asks to unregister a | ||
140 | * DMA array attached to a given key. Returns true if the array was found. */ | ||
87 | static int unbind_dma(struct lguest *lg, | 141 | static int unbind_dma(struct lguest *lg, |
88 | const union futex_key *key, | 142 | const union futex_key *key, |
89 | unsigned long dmas) | 143 | unsigned long dmas) |
90 | { | 144 | { |
91 | int i, ret = 0; | 145 | int i, ret = 0; |
92 | 146 | ||
147 | /* We don't bother with the hash table, just look through all this | ||
148 | * Guest's DMA arrays. */ | ||
93 | for (i = 0; i < LGUEST_MAX_DMA; i++) { | 149 | for (i = 0; i < LGUEST_MAX_DMA; i++) { |
150 | /* In theory it could have more than one array on the same key, | ||
151 | * or one array on multiple keys, so we check both */ | ||
94 | if (key_eq(key, &lg->dma[i].key) && dmas == lg->dma[i].dmas) { | 152 | if (key_eq(key, &lg->dma[i].key) && dmas == lg->dma[i].dmas) { |
95 | unlink_dma(&lg->dma[i]); | 153 | unlink_dma(&lg->dma[i]); |
96 | ret = 1; | 154 | ret = 1; |
@@ -100,51 +158,91 @@ static int unbind_dma(struct lguest *lg, | |||
100 | return ret; | 158 | return ret; |
101 | } | 159 | } |
102 | 160 | ||
161 | /*L:340 BIND_DMA: this is the hypercall which sets up an array of "struct | ||
162 | * lguest_dma" for receiving I/O. | ||
163 | * | ||
164 | * The Guest wants to bind an array of "struct lguest_dma"s to a particular key | ||
165 | * to receive input. This only happens when the Guest is setting up a new | ||
166 | * device, so it doesn't have to be very fast. | ||
167 | * | ||
168 | * It returns 1 on a successful registration (it can fail if we hit the limit | ||
169 | * of registrations for this Guest). | ||
170 | */ | ||
103 | int bind_dma(struct lguest *lg, | 171 | int bind_dma(struct lguest *lg, |
104 | unsigned long ukey, unsigned long dmas, u16 numdmas, u8 interrupt) | 172 | unsigned long ukey, unsigned long dmas, u16 numdmas, u8 interrupt) |
105 | { | 173 | { |
106 | unsigned int i; | 174 | unsigned int i; |
107 | int ret = 0; | 175 | int ret = 0; |
108 | union futex_key key; | 176 | union futex_key key; |
177 | /* Futex code needs the mmap_sem. */ | ||
109 | struct rw_semaphore *fshared = ¤t->mm->mmap_sem; | 178 | struct rw_semaphore *fshared = ¤t->mm->mmap_sem; |
110 | 179 | ||
180 | /* Invalid interrupt? (We could kill the guest here). */ | ||
111 | if (interrupt >= LGUEST_IRQS) | 181 | if (interrupt >= LGUEST_IRQS) |
112 | return 0; | 182 | return 0; |
113 | 183 | ||
184 | /* We need to grab the Big Lguest Lock, because other Guests may be | ||
185 | * trying to look through this Guest's DMAs to send something while | ||
186 | * we're doing this. */ | ||
114 | mutex_lock(&lguest_lock); | 187 | mutex_lock(&lguest_lock); |
115 | down_read(fshared); | 188 | down_read(fshared); |
116 | if (get_futex_key((u32 __user *)ukey, fshared, &key) != 0) { | 189 | if (get_futex_key((u32 __user *)ukey, fshared, &key) != 0) { |
117 | kill_guest(lg, "bad dma key %#lx", ukey); | 190 | kill_guest(lg, "bad dma key %#lx", ukey); |
118 | goto unlock; | 191 | goto unlock; |
119 | } | 192 | } |
193 | |||
194 | /* We want to keep this key valid once we drop mmap_sem, so we have to | ||
195 | * hold a reference. */ | ||
120 | get_futex_key_refs(&key); | 196 | get_futex_key_refs(&key); |
121 | 197 | ||
198 | /* If the Guest specified an interrupt of 0, that means they want to | ||
199 | * unregister this array of "struct lguest_dma"s. */ | ||
122 | if (interrupt == 0) | 200 | if (interrupt == 0) |
123 | ret = unbind_dma(lg, &key, dmas); | 201 | ret = unbind_dma(lg, &key, dmas); |
124 | else { | 202 | else { |
203 | /* Look through this Guest's dma array for an unused entry. */ | ||
125 | for (i = 0; i < LGUEST_MAX_DMA; i++) { | 204 | for (i = 0; i < LGUEST_MAX_DMA; i++) { |
205 | /* If the interrupt is non-zero, the entry is already | ||
206 | * used. */ | ||
126 | if (lg->dma[i].interrupt) | 207 | if (lg->dma[i].interrupt) |
127 | continue; | 208 | continue; |
128 | 209 | ||
210 | /* OK, a free one! Fill on our details. */ | ||
129 | lg->dma[i].dmas = dmas; | 211 | lg->dma[i].dmas = dmas; |
130 | lg->dma[i].num_dmas = numdmas; | 212 | lg->dma[i].num_dmas = numdmas; |
131 | lg->dma[i].next_dma = 0; | 213 | lg->dma[i].next_dma = 0; |
132 | lg->dma[i].key = key; | 214 | lg->dma[i].key = key; |
133 | lg->dma[i].guestid = lg->guestid; | 215 | lg->dma[i].guestid = lg->guestid; |
134 | lg->dma[i].interrupt = interrupt; | 216 | lg->dma[i].interrupt = interrupt; |
217 | |||
218 | /* Now we add it to the hash table: the position | ||
219 | * depends on the futex key that we got. */ | ||
135 | list_add(&lg->dma[i].list, &dma_hash[hash(&key)]); | 220 | list_add(&lg->dma[i].list, &dma_hash[hash(&key)]); |
221 | /* Success! */ | ||
136 | ret = 1; | 222 | ret = 1; |
137 | goto unlock; | 223 | goto unlock; |
138 | } | 224 | } |
139 | } | 225 | } |
226 | /* If we didn't find a slot to put the key in, drop the reference | ||
227 | * again. */ | ||
140 | drop_futex_key_refs(&key); | 228 | drop_futex_key_refs(&key); |
141 | unlock: | 229 | unlock: |
230 | /* Unlock and out. */ | ||
142 | up_read(fshared); | 231 | up_read(fshared); |
143 | mutex_unlock(&lguest_lock); | 232 | mutex_unlock(&lguest_lock); |
144 | return ret; | 233 | return ret; |
145 | } | 234 | } |
146 | 235 | ||
147 | /* lgread from another guest */ | 236 | /*L:385 Note that our routines to access a different Guest's memory are called |
237 | * lgread_other() and lgwrite_other(): these names emphasize that they are only | ||
238 | * used when the Guest is *not* the current Guest. | ||
239 | * | ||
240 | * The interface for copying from another process's memory is called | ||
241 | * access_process_vm(), with a final argument of 0 for a read, and 1 for a | ||
242 | * write. | ||
243 | * | ||
244 | * We need lgread_other() to read the destination Guest's "struct lguest_dma" | ||
245 | * array. */ | ||
148 | static int lgread_other(struct lguest *lg, | 246 | static int lgread_other(struct lguest *lg, |
149 | void *buf, u32 addr, unsigned bytes) | 247 | void *buf, u32 addr, unsigned bytes) |
150 | { | 248 | { |
@@ -157,7 +255,8 @@ static int lgread_other(struct lguest *lg, | |||
157 | return 1; | 255 | return 1; |
158 | } | 256 | } |
159 | 257 | ||
160 | /* lgwrite to another guest */ | 258 | /* "lgwrite()" to another Guest: used to update the destination "used_len" once |
259 | * we've transferred data into the buffer. */ | ||
161 | static int lgwrite_other(struct lguest *lg, u32 addr, | 260 | static int lgwrite_other(struct lguest *lg, u32 addr, |
162 | const void *buf, unsigned bytes) | 261 | const void *buf, unsigned bytes) |
163 | { | 262 | { |
@@ -170,6 +269,15 @@ static int lgwrite_other(struct lguest *lg, u32 addr, | |||
170 | return 1; | 269 | return 1; |
171 | } | 270 | } |
172 | 271 | ||
272 | /*L:400 This is the generic engine which copies from a source "struct | ||
273 | * lguest_dma" from this Guest into another Guest's "struct lguest_dma". The | ||
274 | * destination Guest's pages have already been mapped, as contained in the | ||
275 | * pages array. | ||
276 | * | ||
277 | * If you're wondering if there's a nice "copy from one process to another" | ||
278 | * routine, so was I. But Linux isn't really set up to copy between two | ||
279 | * unrelated processes, so we have to write it ourselves. | ||
280 | */ | ||
173 | static u32 copy_data(struct lguest *srclg, | 281 | static u32 copy_data(struct lguest *srclg, |
174 | const struct lguest_dma *src, | 282 | const struct lguest_dma *src, |
175 | const struct lguest_dma *dst, | 283 | const struct lguest_dma *dst, |
@@ -178,33 +286,59 @@ static u32 copy_data(struct lguest *srclg, | |||
178 | unsigned int totlen, si, di, srcoff, dstoff; | 286 | unsigned int totlen, si, di, srcoff, dstoff; |
179 | void *maddr = NULL; | 287 | void *maddr = NULL; |
180 | 288 | ||
289 | /* We return the total length transferred. */ | ||
181 | totlen = 0; | 290 | totlen = 0; |
291 | |||
292 | /* We keep indexes into the source and destination "struct lguest_dma", | ||
293 | * and an offset within each region. */ | ||
182 | si = di = 0; | 294 | si = di = 0; |
183 | srcoff = dstoff = 0; | 295 | srcoff = dstoff = 0; |
296 | |||
297 | /* We loop until the source or destination is exhausted. */ | ||
184 | while (si < LGUEST_MAX_DMA_SECTIONS && src->len[si] | 298 | while (si < LGUEST_MAX_DMA_SECTIONS && src->len[si] |
185 | && di < LGUEST_MAX_DMA_SECTIONS && dst->len[di]) { | 299 | && di < LGUEST_MAX_DMA_SECTIONS && dst->len[di]) { |
300 | /* We can only transfer the rest of the src buffer, or as much | ||
301 | * as will fit into the destination buffer. */ | ||
186 | u32 len = min(src->len[si] - srcoff, dst->len[di] - dstoff); | 302 | u32 len = min(src->len[si] - srcoff, dst->len[di] - dstoff); |
187 | 303 | ||
304 | /* For systems using "highmem" we need to use kmap() to access | ||
305 | * the page we want. We often use the same page over and over, | ||
306 | * so rather than kmap() it on every loop, we set the maddr | ||
307 | * pointer to NULL when we need to move to the next | ||
308 | * destination page. */ | ||
188 | if (!maddr) | 309 | if (!maddr) |
189 | maddr = kmap(pages[di]); | 310 | maddr = kmap(pages[di]); |
190 | 311 | ||
191 | /* FIXME: This is not completely portable, since | 312 | /* Copy directly from (this Guest's) source address to the |
192 | archs do different things for copy_to_user_page. */ | 313 | * destination Guest's kmap()ed buffer. Note that maddr points |
314 | * to the start of the page: we need to add the offset of the | ||
315 | * destination address and offset within the buffer. */ | ||
316 | |||
317 | /* FIXME: This is not completely portable. I looked at | ||
318 | * copy_to_user_page(), and some arch's seem to need special | ||
319 | * flushes. x86 is fine. */ | ||
193 | if (copy_from_user(maddr + (dst->addr[di] + dstoff)%PAGE_SIZE, | 320 | if (copy_from_user(maddr + (dst->addr[di] + dstoff)%PAGE_SIZE, |
194 | (void __user *)src->addr[si], len) != 0) { | 321 | (void __user *)src->addr[si], len) != 0) { |
322 | /* If a copy failed, it's the source's fault. */ | ||
195 | kill_guest(srclg, "bad address in sending DMA"); | 323 | kill_guest(srclg, "bad address in sending DMA"); |
196 | totlen = 0; | 324 | totlen = 0; |
197 | break; | 325 | break; |
198 | } | 326 | } |
199 | 327 | ||
328 | /* Increment the total and src & dst offsets */ | ||
200 | totlen += len; | 329 | totlen += len; |
201 | srcoff += len; | 330 | srcoff += len; |
202 | dstoff += len; | 331 | dstoff += len; |
332 | |||
333 | /* Presumably we reached the end of the src or dest buffers: */ | ||
203 | if (srcoff == src->len[si]) { | 334 | if (srcoff == src->len[si]) { |
335 | /* Move to the next buffer at offset 0 */ | ||
204 | si++; | 336 | si++; |
205 | srcoff = 0; | 337 | srcoff = 0; |
206 | } | 338 | } |
207 | if (dstoff == dst->len[di]) { | 339 | if (dstoff == dst->len[di]) { |
340 | /* We need to unmap that destination page and reset | ||
341 | * maddr ready for the next one. */ | ||
208 | kunmap(pages[di]); | 342 | kunmap(pages[di]); |
209 | maddr = NULL; | 343 | maddr = NULL; |
210 | di++; | 344 | di++; |
@@ -212,13 +346,15 @@ static u32 copy_data(struct lguest *srclg, | |||
212 | } | 346 | } |
213 | } | 347 | } |
214 | 348 | ||
349 | /* If we still had a page mapped at the end, unmap now. */ | ||
215 | if (maddr) | 350 | if (maddr) |
216 | kunmap(pages[di]); | 351 | kunmap(pages[di]); |
217 | 352 | ||
218 | return totlen; | 353 | return totlen; |
219 | } | 354 | } |
220 | 355 | ||
221 | /* Src is us, ie. current. */ | 356 | /*L:390 This is how we transfer a "struct lguest_dma" from the source Guest |
357 | * (the current Guest which called SEND_DMA) to another Guest. */ | ||
222 | static u32 do_dma(struct lguest *srclg, const struct lguest_dma *src, | 358 | static u32 do_dma(struct lguest *srclg, const struct lguest_dma *src, |
223 | struct lguest *dstlg, const struct lguest_dma *dst) | 359 | struct lguest *dstlg, const struct lguest_dma *dst) |
224 | { | 360 | { |
@@ -226,23 +362,31 @@ static u32 do_dma(struct lguest *srclg, const struct lguest_dma *src, | |||
226 | u32 ret; | 362 | u32 ret; |
227 | struct page *pages[LGUEST_MAX_DMA_SECTIONS]; | 363 | struct page *pages[LGUEST_MAX_DMA_SECTIONS]; |
228 | 364 | ||
365 | /* We check that both source and destination "struct lguest_dma"s are | ||
366 | * within the bounds of the source and destination Guests */ | ||
229 | if (!check_dma_list(dstlg, dst) || !check_dma_list(srclg, src)) | 367 | if (!check_dma_list(dstlg, dst) || !check_dma_list(srclg, src)) |
230 | return 0; | 368 | return 0; |
231 | 369 | ||
232 | /* First get the destination pages */ | 370 | /* We need to map the pages which correspond to each parts of |
371 | * destination buffer. */ | ||
233 | for (i = 0; i < LGUEST_MAX_DMA_SECTIONS; i++) { | 372 | for (i = 0; i < LGUEST_MAX_DMA_SECTIONS; i++) { |
234 | if (dst->len[i] == 0) | 373 | if (dst->len[i] == 0) |
235 | break; | 374 | break; |
375 | /* get_user_pages() is a complicated function, especially since | ||
376 | * we only want a single page. But it works, and returns the | ||
377 | * number of pages. Note that we're holding the destination's | ||
378 | * mmap_sem, as get_user_pages() requires. */ | ||
236 | if (get_user_pages(dstlg->tsk, dstlg->mm, | 379 | if (get_user_pages(dstlg->tsk, dstlg->mm, |
237 | dst->addr[i], 1, 1, 1, pages+i, NULL) | 380 | dst->addr[i], 1, 1, 1, pages+i, NULL) |
238 | != 1) { | 381 | != 1) { |
382 | /* This means the destination gave us a bogus buffer */ | ||
239 | kill_guest(dstlg, "Error mapping DMA pages"); | 383 | kill_guest(dstlg, "Error mapping DMA pages"); |
240 | ret = 0; | 384 | ret = 0; |
241 | goto drop_pages; | 385 | goto drop_pages; |
242 | } | 386 | } |
243 | } | 387 | } |
244 | 388 | ||
245 | /* Now copy until we run out of src or dst. */ | 389 | /* Now copy the data until we run out of src or dst. */ |
246 | ret = copy_data(srclg, src, dst, pages); | 390 | ret = copy_data(srclg, src, dst, pages); |
247 | 391 | ||
248 | drop_pages: | 392 | drop_pages: |
@@ -251,6 +395,11 @@ drop_pages: | |||
251 | return ret; | 395 | return ret; |
252 | } | 396 | } |
253 | 397 | ||
398 | /*L:380 Transferring data from one Guest to another is not as simple as I'd | ||
399 | * like. We've found the "struct lguest_dma_info" bound to the same address as | ||
400 | * the send, we need to copy into it. | ||
401 | * | ||
402 | * This function returns true if the destination array was empty. */ | ||
254 | static int dma_transfer(struct lguest *srclg, | 403 | static int dma_transfer(struct lguest *srclg, |
255 | unsigned long udma, | 404 | unsigned long udma, |
256 | struct lguest_dma_info *dst) | 405 | struct lguest_dma_info *dst) |
@@ -259,15 +408,23 @@ static int dma_transfer(struct lguest *srclg, | |||
259 | struct lguest *dstlg; | 408 | struct lguest *dstlg; |
260 | u32 i, dma = 0; | 409 | u32 i, dma = 0; |
261 | 410 | ||
411 | /* From the "struct lguest_dma_info" we found in the hash, grab the | ||
412 | * Guest. */ | ||
262 | dstlg = &lguests[dst->guestid]; | 413 | dstlg = &lguests[dst->guestid]; |
263 | /* Get our dma list. */ | 414 | /* Read in the source "struct lguest_dma" handed to SEND_DMA. */ |
264 | lgread(srclg, &src_dma, udma, sizeof(src_dma)); | 415 | lgread(srclg, &src_dma, udma, sizeof(src_dma)); |
265 | 416 | ||
266 | /* We can't deadlock against them dmaing to us, because this | 417 | /* We need the destination's mmap_sem, and we already hold the source's |
267 | * is all under the lguest_lock. */ | 418 | * mmap_sem for the futex key lookup. Normally this would suggest that |
419 | * we could deadlock if the destination Guest was trying to send to | ||
420 | * this source Guest at the same time, which is another reason that all | ||
421 | * I/O is done under the big lguest_lock. */ | ||
268 | down_read(&dstlg->mm->mmap_sem); | 422 | down_read(&dstlg->mm->mmap_sem); |
269 | 423 | ||
424 | /* Look through the destination DMA array for an available buffer. */ | ||
270 | for (i = 0; i < dst->num_dmas; i++) { | 425 | for (i = 0; i < dst->num_dmas; i++) { |
426 | /* We keep a "next_dma" pointer which often helps us avoid | ||
427 | * looking at lots of previously-filled entries. */ | ||
271 | dma = (dst->next_dma + i) % dst->num_dmas; | 428 | dma = (dst->next_dma + i) % dst->num_dmas; |
272 | if (!lgread_other(dstlg, &dst_dma, | 429 | if (!lgread_other(dstlg, &dst_dma, |
273 | dst->dmas + dma * sizeof(struct lguest_dma), | 430 | dst->dmas + dma * sizeof(struct lguest_dma), |
@@ -277,30 +434,46 @@ static int dma_transfer(struct lguest *srclg, | |||
277 | if (!dst_dma.used_len) | 434 | if (!dst_dma.used_len) |
278 | break; | 435 | break; |
279 | } | 436 | } |
437 | |||
438 | /* If we found a buffer, we do the actual data copy. */ | ||
280 | if (i != dst->num_dmas) { | 439 | if (i != dst->num_dmas) { |
281 | unsigned long used_lenp; | 440 | unsigned long used_lenp; |
282 | unsigned int ret; | 441 | unsigned int ret; |
283 | 442 | ||
284 | ret = do_dma(srclg, &src_dma, dstlg, &dst_dma); | 443 | ret = do_dma(srclg, &src_dma, dstlg, &dst_dma); |
285 | /* Put used length in src. */ | 444 | /* Put used length in the source "struct lguest_dma"'s used_len |
445 | * field. It's a little tricky to figure out where that is, | ||
446 | * though. */ | ||
286 | lgwrite_u32(srclg, | 447 | lgwrite_u32(srclg, |
287 | udma+offsetof(struct lguest_dma, used_len), ret); | 448 | udma+offsetof(struct lguest_dma, used_len), ret); |
449 | /* Tranferring 0 bytes is OK if the source buffer was empty. */ | ||
288 | if (ret == 0 && src_dma.len[0] != 0) | 450 | if (ret == 0 && src_dma.len[0] != 0) |
289 | goto fail; | 451 | goto fail; |
290 | 452 | ||
291 | /* Make sure destination sees contents before length. */ | 453 | /* The destination Guest might be running on a different CPU: |
454 | * we have to make sure that it will see the "used_len" field | ||
455 | * change to non-zero *after* it sees the data we copied into | ||
456 | * the buffer. Hence a write memory barrier. */ | ||
292 | wmb(); | 457 | wmb(); |
458 | /* Figuring out where the destination's used_len field for this | ||
459 | * "struct lguest_dma" in the array is also a little ugly. */ | ||
293 | used_lenp = dst->dmas | 460 | used_lenp = dst->dmas |
294 | + dma * sizeof(struct lguest_dma) | 461 | + dma * sizeof(struct lguest_dma) |
295 | + offsetof(struct lguest_dma, used_len); | 462 | + offsetof(struct lguest_dma, used_len); |
296 | lgwrite_other(dstlg, used_lenp, &ret, sizeof(ret)); | 463 | lgwrite_other(dstlg, used_lenp, &ret, sizeof(ret)); |
464 | /* Move the cursor for next time. */ | ||
297 | dst->next_dma++; | 465 | dst->next_dma++; |
298 | } | 466 | } |
299 | up_read(&dstlg->mm->mmap_sem); | 467 | up_read(&dstlg->mm->mmap_sem); |
300 | 468 | ||
301 | /* Do this last so dst doesn't simply sleep on lock. */ | 469 | /* We trigger the destination interrupt, even if the destination was |
470 | * empty and we didn't transfer anything: this gives them a chance to | ||
471 | * wake up and refill. */ | ||
302 | set_bit(dst->interrupt, dstlg->irqs_pending); | 472 | set_bit(dst->interrupt, dstlg->irqs_pending); |
473 | /* Wake up the destination process. */ | ||
303 | wake_up_process(dstlg->tsk); | 474 | wake_up_process(dstlg->tsk); |
475 | /* If we passed the last "struct lguest_dma", the receive had no | ||
476 | * buffers left. */ | ||
304 | return i == dst->num_dmas; | 477 | return i == dst->num_dmas; |
305 | 478 | ||
306 | fail: | 479 | fail: |
@@ -308,6 +481,8 @@ fail: | |||
308 | return 0; | 481 | return 0; |
309 | } | 482 | } |
310 | 483 | ||
484 | /*L:370 This is the counter-side to the BIND_DMA hypercall; the SEND_DMA | ||
485 | * hypercall. We find out who's listening, and send to them. */ | ||
311 | void send_dma(struct lguest *lg, unsigned long ukey, unsigned long udma) | 486 | void send_dma(struct lguest *lg, unsigned long ukey, unsigned long udma) |
312 | { | 487 | { |
313 | union futex_key key; | 488 | union futex_key key; |
@@ -317,31 +492,43 @@ void send_dma(struct lguest *lg, unsigned long ukey, unsigned long udma) | |||
317 | again: | 492 | again: |
318 | mutex_lock(&lguest_lock); | 493 | mutex_lock(&lguest_lock); |
319 | down_read(fshared); | 494 | down_read(fshared); |
495 | /* Get the futex key for the key the Guest gave us */ | ||
320 | if (get_futex_key((u32 __user *)ukey, fshared, &key) != 0) { | 496 | if (get_futex_key((u32 __user *)ukey, fshared, &key) != 0) { |
321 | kill_guest(lg, "bad sending DMA key"); | 497 | kill_guest(lg, "bad sending DMA key"); |
322 | goto unlock; | 498 | goto unlock; |
323 | } | 499 | } |
324 | /* Shared mapping? Look for other guests... */ | 500 | /* Since the key must be a multiple of 4, the futex key uses the lower |
501 | * bit of the "offset" field (which would always be 0) to indicate a | ||
502 | * mapping which is shared with other processes (ie. Guests). */ | ||
325 | if (key.shared.offset & 1) { | 503 | if (key.shared.offset & 1) { |
326 | struct lguest_dma_info *i; | 504 | struct lguest_dma_info *i; |
505 | /* Look through the hash for other Guests. */ | ||
327 | list_for_each_entry(i, &dma_hash[hash(&key)], list) { | 506 | list_for_each_entry(i, &dma_hash[hash(&key)], list) { |
507 | /* Don't send to ourselves. */ | ||
328 | if (i->guestid == lg->guestid) | 508 | if (i->guestid == lg->guestid) |
329 | continue; | 509 | continue; |
330 | if (!key_eq(&key, &i->key)) | 510 | if (!key_eq(&key, &i->key)) |
331 | continue; | 511 | continue; |
332 | 512 | ||
513 | /* If dma_transfer() tells us the destination has no | ||
514 | * available buffers, we increment "empty". */ | ||
333 | empty += dma_transfer(lg, udma, i); | 515 | empty += dma_transfer(lg, udma, i); |
334 | break; | 516 | break; |
335 | } | 517 | } |
518 | /* If the destination is empty, we release our locks and | ||
519 | * give the destination Guest a brief chance to restock. */ | ||
336 | if (empty == 1) { | 520 | if (empty == 1) { |
337 | /* Give any recipients one chance to restock. */ | 521 | /* Give any recipients one chance to restock. */ |
338 | up_read(¤t->mm->mmap_sem); | 522 | up_read(¤t->mm->mmap_sem); |
339 | mutex_unlock(&lguest_lock); | 523 | mutex_unlock(&lguest_lock); |
524 | /* Next time, we won't try again. */ | ||
340 | empty++; | 525 | empty++; |
341 | goto again; | 526 | goto again; |
342 | } | 527 | } |
343 | } else { | 528 | } else { |
344 | /* Private mapping: tell our userspace. */ | 529 | /* Private mapping: Guest is sending to its Launcher. We set |
530 | * the "dma_is_pending" flag so that the main loop will exit | ||
531 | * and the Launcher's read() from /dev/lguest will return. */ | ||
345 | lg->dma_is_pending = 1; | 532 | lg->dma_is_pending = 1; |
346 | lg->pending_dma = udma; | 533 | lg->pending_dma = udma; |
347 | lg->pending_key = ukey; | 534 | lg->pending_key = ukey; |
@@ -350,6 +537,7 @@ unlock: | |||
350 | up_read(fshared); | 537 | up_read(fshared); |
351 | mutex_unlock(&lguest_lock); | 538 | mutex_unlock(&lguest_lock); |
352 | } | 539 | } |
540 | /*:*/ | ||
353 | 541 | ||
354 | void release_all_dma(struct lguest *lg) | 542 | void release_all_dma(struct lguest *lg) |
355 | { | 543 | { |
@@ -365,7 +553,8 @@ void release_all_dma(struct lguest *lg) | |||
365 | up_read(&lg->mm->mmap_sem); | 553 | up_read(&lg->mm->mmap_sem); |
366 | } | 554 | } |
367 | 555 | ||
368 | /* Userspace wants a dma buffer from this guest. */ | 556 | /*L:320 This routine looks for a DMA buffer registered by the Guest on the |
557 | * given key (using the BIND_DMA hypercall). */ | ||
369 | unsigned long get_dma_buffer(struct lguest *lg, | 558 | unsigned long get_dma_buffer(struct lguest *lg, |
370 | unsigned long ukey, unsigned long *interrupt) | 559 | unsigned long ukey, unsigned long *interrupt) |
371 | { | 560 | { |
@@ -374,15 +563,29 @@ unsigned long get_dma_buffer(struct lguest *lg, | |||
374 | struct lguest_dma_info *i; | 563 | struct lguest_dma_info *i; |
375 | struct rw_semaphore *fshared = ¤t->mm->mmap_sem; | 564 | struct rw_semaphore *fshared = ¤t->mm->mmap_sem; |
376 | 565 | ||
566 | /* Take the Big Lguest Lock to stop other Guests sending this Guest DMA | ||
567 | * at the same time. */ | ||
377 | mutex_lock(&lguest_lock); | 568 | mutex_lock(&lguest_lock); |
569 | /* To match between Guests sharing the same underlying memory we steal | ||
570 | * code from the futex infrastructure. This requires that we hold the | ||
571 | * "mmap_sem" for our process (the Launcher), and pass it to the futex | ||
572 | * code. */ | ||
378 | down_read(fshared); | 573 | down_read(fshared); |
574 | |||
575 | /* This can fail if it's not a valid address, or if the address is not | ||
576 | * divisible by 4 (the futex code needs that, we don't really). */ | ||
379 | if (get_futex_key((u32 __user *)ukey, fshared, &key) != 0) { | 577 | if (get_futex_key((u32 __user *)ukey, fshared, &key) != 0) { |
380 | kill_guest(lg, "bad registered DMA buffer"); | 578 | kill_guest(lg, "bad registered DMA buffer"); |
381 | goto unlock; | 579 | goto unlock; |
382 | } | 580 | } |
581 | /* Search the hash table for matching entries (the Launcher can only | ||
582 | * send to its own Guest for the moment, so the entry must be for this | ||
583 | * Guest) */ | ||
383 | list_for_each_entry(i, &dma_hash[hash(&key)], list) { | 584 | list_for_each_entry(i, &dma_hash[hash(&key)], list) { |
384 | if (key_eq(&key, &i->key) && i->guestid == lg->guestid) { | 585 | if (key_eq(&key, &i->key) && i->guestid == lg->guestid) { |
385 | unsigned int j; | 586 | unsigned int j; |
587 | /* Look through the registered DMA array for an | ||
588 | * available buffer. */ | ||
386 | for (j = 0; j < i->num_dmas; j++) { | 589 | for (j = 0; j < i->num_dmas; j++) { |
387 | struct lguest_dma dma; | 590 | struct lguest_dma dma; |
388 | 591 | ||
@@ -391,6 +594,8 @@ unsigned long get_dma_buffer(struct lguest *lg, | |||
391 | if (dma.used_len == 0) | 594 | if (dma.used_len == 0) |
392 | break; | 595 | break; |
393 | } | 596 | } |
597 | /* Store the interrupt the Guest wants when the buffer | ||
598 | * is used. */ | ||
394 | *interrupt = i->interrupt; | 599 | *interrupt = i->interrupt; |
395 | break; | 600 | break; |
396 | } | 601 | } |
@@ -400,4 +605,12 @@ unlock: | |||
400 | mutex_unlock(&lguest_lock); | 605 | mutex_unlock(&lguest_lock); |
401 | return ret; | 606 | return ret; |
402 | } | 607 | } |
608 | /*:*/ | ||
403 | 609 | ||
610 | /*L:410 This really has completed the Launcher. Not only have we now finished | ||
611 | * the longest chapter in our journey, but this also means we are over halfway | ||
612 | * through! | ||
613 | * | ||
614 | * Enough prevaricating around the bush: it is time for us to dive into the | ||
615 | * core of the Host, in "make Host". | ||
616 | */ | ||
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h index 3e2ddfbc816e..3b9dc123a7df 100644 --- a/drivers/lguest/lg.h +++ b/drivers/lguest/lg.h | |||
@@ -244,6 +244,30 @@ unsigned long get_dma_buffer(struct lguest *lg, unsigned long key, | |||
244 | /* hypercalls.c: */ | 244 | /* hypercalls.c: */ |
245 | void do_hypercalls(struct lguest *lg); | 245 | void do_hypercalls(struct lguest *lg); |
246 | 246 | ||
247 | /*L:035 | ||
248 | * Let's step aside for the moment, to study one important routine that's used | ||
249 | * widely in the Host code. | ||
250 | * | ||
251 | * There are many cases where the Guest does something invalid, like pass crap | ||
252 | * to a hypercall. Since only the Guest kernel can make hypercalls, it's quite | ||
253 | * acceptable to simply terminate the Guest and give the Launcher a nicely | ||
254 | * formatted reason. It's also simpler for the Guest itself, which doesn't | ||
255 | * need to check most hypercalls for "success"; if you're still running, it | ||
256 | * succeeded. | ||
257 | * | ||
258 | * Once this is called, the Guest will never run again, so most Host code can | ||
259 | * call this then continue as if nothing had happened. This means many | ||
260 | * functions don't have to explicitly return an error code, which keeps the | ||
261 | * code simple. | ||
262 | * | ||
263 | * It also means that this can be called more than once: only the first one is | ||
264 | * remembered. The only trick is that we still need to kill the Guest even if | ||
265 | * we can't allocate memory to store the reason. Linux has a neat way of | ||
266 | * packing error codes into invalid pointers, so we use that here. | ||
267 | * | ||
268 | * Like any macro which uses an "if", it is safely wrapped in a run-once "do { | ||
269 | * } while(0)". | ||
270 | */ | ||
247 | #define kill_guest(lg, fmt...) \ | 271 | #define kill_guest(lg, fmt...) \ |
248 | do { \ | 272 | do { \ |
249 | if (!(lg)->dead) { \ | 273 | if (!(lg)->dead) { \ |
@@ -252,6 +276,7 @@ do { \ | |||
252 | (lg)->dead = ERR_PTR(-ENOMEM); \ | 276 | (lg)->dead = ERR_PTR(-ENOMEM); \ |
253 | } \ | 277 | } \ |
254 | } while(0) | 278 | } while(0) |
279 | /* (End of aside) :*/ | ||
255 | 280 | ||
256 | static inline unsigned long guest_pa(struct lguest *lg, unsigned long vaddr) | 281 | static inline unsigned long guest_pa(struct lguest *lg, unsigned long vaddr) |
257 | { | 282 | { |
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index 6ae86f20ce3d..80d1b58c7698 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c | |||
@@ -9,33 +9,62 @@ | |||
9 | #include <linux/fs.h> | 9 | #include <linux/fs.h> |
10 | #include "lg.h" | 10 | #include "lg.h" |
11 | 11 | ||
12 | /*L:030 setup_regs() doesn't really belong in this file, but it gives us an | ||
13 | * early glimpse deeper into the Host so it's worth having here. | ||
14 | * | ||
15 | * Most of the Guest's registers are left alone: we used get_zeroed_page() to | ||
16 | * allocate the structure, so they will be 0. */ | ||
12 | static void setup_regs(struct lguest_regs *regs, unsigned long start) | 17 | static void setup_regs(struct lguest_regs *regs, unsigned long start) |
13 | { | 18 | { |
14 | /* Write out stack in format lguest expects, so we can switch to it. */ | 19 | /* There are four "segment" registers which the Guest needs to boot: |
20 | * The "code segment" register (cs) refers to the kernel code segment | ||
21 | * __KERNEL_CS, and the "data", "extra" and "stack" segment registers | ||
22 | * refer to the kernel data segment __KERNEL_DS. | ||
23 | * | ||
24 | * The privilege level is packed into the lower bits. The Guest runs | ||
25 | * at privilege level 1 (GUEST_PL).*/ | ||
15 | regs->ds = regs->es = regs->ss = __KERNEL_DS|GUEST_PL; | 26 | regs->ds = regs->es = regs->ss = __KERNEL_DS|GUEST_PL; |
16 | regs->cs = __KERNEL_CS|GUEST_PL; | 27 | regs->cs = __KERNEL_CS|GUEST_PL; |
17 | regs->eflags = 0x202; /* Interrupts enabled. */ | 28 | |
29 | /* The "eflags" register contains miscellaneous flags. Bit 1 (0x002) | ||
30 | * is supposed to always be "1". Bit 9 (0x200) controls whether | ||
31 | * interrupts are enabled. We always leave interrupts enabled while | ||
32 | * running the Guest. */ | ||
33 | regs->eflags = 0x202; | ||
34 | |||
35 | /* The "Extended Instruction Pointer" register says where the Guest is | ||
36 | * running. */ | ||
18 | regs->eip = start; | 37 | regs->eip = start; |
19 | /* esi points to our boot information (physical address 0) */ | 38 | |
39 | /* %esi points to our boot information, at physical address 0, so don't | ||
40 | * touch it. */ | ||
20 | } | 41 | } |
21 | 42 | ||
22 | /* + addr */ | 43 | /*L:310 To send DMA into the Guest, the Launcher needs to be able to ask for a |
44 | * DMA buffer. This is done by writing LHREQ_GETDMA and the key to | ||
45 | * /dev/lguest. */ | ||
23 | static long user_get_dma(struct lguest *lg, const u32 __user *input) | 46 | static long user_get_dma(struct lguest *lg, const u32 __user *input) |
24 | { | 47 | { |
25 | unsigned long key, udma, irq; | 48 | unsigned long key, udma, irq; |
26 | 49 | ||
50 | /* Fetch the key they wrote to us. */ | ||
27 | if (get_user(key, input) != 0) | 51 | if (get_user(key, input) != 0) |
28 | return -EFAULT; | 52 | return -EFAULT; |
53 | /* Look for a free Guest DMA buffer bound to that key. */ | ||
29 | udma = get_dma_buffer(lg, key, &irq); | 54 | udma = get_dma_buffer(lg, key, &irq); |
30 | if (!udma) | 55 | if (!udma) |
31 | return -ENOENT; | 56 | return -ENOENT; |
32 | 57 | ||
33 | /* We put irq number in udma->used_len. */ | 58 | /* We need to tell the Launcher what interrupt the Guest expects after |
59 | * the buffer is filled. We stash it in udma->used_len. */ | ||
34 | lgwrite_u32(lg, udma + offsetof(struct lguest_dma, used_len), irq); | 60 | lgwrite_u32(lg, udma + offsetof(struct lguest_dma, used_len), irq); |
61 | |||
62 | /* The (guest-physical) address of the DMA buffer is returned from | ||
63 | * the write(). */ | ||
35 | return udma; | 64 | return udma; |
36 | } | 65 | } |
37 | 66 | ||
38 | /* To force the Guest to stop running and return to the Launcher, the | 67 | /*L:315 To force the Guest to stop running and return to the Launcher, the |
39 | * Waker sets writes LHREQ_BREAK and the value "1" to /dev/lguest. The | 68 | * Waker sets writes LHREQ_BREAK and the value "1" to /dev/lguest. The |
40 | * Launcher then writes LHREQ_BREAK and "0" to release the Waker. */ | 69 | * Launcher then writes LHREQ_BREAK and "0" to release the Waker. */ |
41 | static int break_guest_out(struct lguest *lg, const u32 __user *input) | 70 | static int break_guest_out(struct lguest *lg, const u32 __user *input) |
@@ -59,7 +88,8 @@ static int break_guest_out(struct lguest *lg, const u32 __user *input) | |||
59 | } | 88 | } |
60 | } | 89 | } |
61 | 90 | ||
62 | /* + irq */ | 91 | /*L:050 Sending an interrupt is done by writing LHREQ_IRQ and an interrupt |
92 | * number to /dev/lguest. */ | ||
63 | static int user_send_irq(struct lguest *lg, const u32 __user *input) | 93 | static int user_send_irq(struct lguest *lg, const u32 __user *input) |
64 | { | 94 | { |
65 | u32 irq; | 95 | u32 irq; |
@@ -68,14 +98,19 @@ static int user_send_irq(struct lguest *lg, const u32 __user *input) | |||
68 | return -EFAULT; | 98 | return -EFAULT; |
69 | if (irq >= LGUEST_IRQS) | 99 | if (irq >= LGUEST_IRQS) |
70 | return -EINVAL; | 100 | return -EINVAL; |
101 | /* Next time the Guest runs, the core code will see if it can deliver | ||
102 | * this interrupt. */ | ||
71 | set_bit(irq, lg->irqs_pending); | 103 | set_bit(irq, lg->irqs_pending); |
72 | return 0; | 104 | return 0; |
73 | } | 105 | } |
74 | 106 | ||
107 | /*L:040 Once our Guest is initialized, the Launcher makes it run by reading | ||
108 | * from /dev/lguest. */ | ||
75 | static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) | 109 | static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) |
76 | { | 110 | { |
77 | struct lguest *lg = file->private_data; | 111 | struct lguest *lg = file->private_data; |
78 | 112 | ||
113 | /* You must write LHREQ_INITIALIZE first! */ | ||
79 | if (!lg) | 114 | if (!lg) |
80 | return -EINVAL; | 115 | return -EINVAL; |
81 | 116 | ||
@@ -83,27 +118,52 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) | |||
83 | if (current != lg->tsk) | 118 | if (current != lg->tsk) |
84 | return -EPERM; | 119 | return -EPERM; |
85 | 120 | ||
121 | /* If the guest is already dead, we indicate why */ | ||
86 | if (lg->dead) { | 122 | if (lg->dead) { |
87 | size_t len; | 123 | size_t len; |
88 | 124 | ||
125 | /* lg->dead either contains an error code, or a string. */ | ||
89 | if (IS_ERR(lg->dead)) | 126 | if (IS_ERR(lg->dead)) |
90 | return PTR_ERR(lg->dead); | 127 | return PTR_ERR(lg->dead); |
91 | 128 | ||
129 | /* We can only return as much as the buffer they read with. */ | ||
92 | len = min(size, strlen(lg->dead)+1); | 130 | len = min(size, strlen(lg->dead)+1); |
93 | if (copy_to_user(user, lg->dead, len) != 0) | 131 | if (copy_to_user(user, lg->dead, len) != 0) |
94 | return -EFAULT; | 132 | return -EFAULT; |
95 | return len; | 133 | return len; |
96 | } | 134 | } |
97 | 135 | ||
136 | /* If we returned from read() last time because the Guest sent DMA, | ||
137 | * clear the flag. */ | ||
98 | if (lg->dma_is_pending) | 138 | if (lg->dma_is_pending) |
99 | lg->dma_is_pending = 0; | 139 | lg->dma_is_pending = 0; |
100 | 140 | ||
141 | /* Run the Guest until something interesting happens. */ | ||
101 | return run_guest(lg, (unsigned long __user *)user); | 142 | return run_guest(lg, (unsigned long __user *)user); |
102 | } | 143 | } |
103 | 144 | ||
104 | /* Take: pfnlimit, pgdir, start, pageoffset. */ | 145 | /*L:020 The initialization write supplies 4 32-bit values (in addition to the |
146 | * 32-bit LHREQ_INITIALIZE value). These are: | ||
147 | * | ||
148 | * pfnlimit: The highest (Guest-physical) page number the Guest should be | ||
149 | * allowed to access. The Launcher has to live in Guest memory, so it sets | ||
150 | * this to ensure the Guest can't reach it. | ||
151 | * | ||
152 | * pgdir: The (Guest-physical) address of the top of the initial Guest | ||
153 | * pagetables (which are set up by the Launcher). | ||
154 | * | ||
155 | * start: The first instruction to execute ("eip" in x86-speak). | ||
156 | * | ||
157 | * page_offset: The PAGE_OFFSET constant in the Guest kernel. We should | ||
158 | * probably wean the code off this, but it's a very useful constant! Any | ||
159 | * address above this is within the Guest kernel, and any kernel address can | ||
160 | * quickly converted from physical to virtual by adding PAGE_OFFSET. It's | ||
161 | * 0xC0000000 (3G) by default, but it's configurable at kernel build time. | ||
162 | */ | ||
105 | static int initialize(struct file *file, const u32 __user *input) | 163 | static int initialize(struct file *file, const u32 __user *input) |
106 | { | 164 | { |
165 | /* "struct lguest" contains everything we (the Host) know about a | ||
166 | * Guest. */ | ||
107 | struct lguest *lg; | 167 | struct lguest *lg; |
108 | int err, i; | 168 | int err, i; |
109 | u32 args[4]; | 169 | u32 args[4]; |
@@ -111,7 +171,7 @@ static int initialize(struct file *file, const u32 __user *input) | |||
111 | /* We grab the Big Lguest lock, which protects the global array | 171 | /* We grab the Big Lguest lock, which protects the global array |
112 | * "lguests" and multiple simultaneous initializations. */ | 172 | * "lguests" and multiple simultaneous initializations. */ |
113 | mutex_lock(&lguest_lock); | 173 | mutex_lock(&lguest_lock); |
114 | 174 | /* You can't initialize twice! Close the device and start again... */ | |
115 | if (file->private_data) { | 175 | if (file->private_data) { |
116 | err = -EBUSY; | 176 | err = -EBUSY; |
117 | goto unlock; | 177 | goto unlock; |
@@ -122,37 +182,70 @@ static int initialize(struct file *file, const u32 __user *input) | |||
122 | goto unlock; | 182 | goto unlock; |
123 | } | 183 | } |
124 | 184 | ||
185 | /* Find an unused guest. */ | ||
125 | i = find_free_guest(); | 186 | i = find_free_guest(); |
126 | if (i < 0) { | 187 | if (i < 0) { |
127 | err = -ENOSPC; | 188 | err = -ENOSPC; |
128 | goto unlock; | 189 | goto unlock; |
129 | } | 190 | } |
191 | /* OK, we have an index into the "lguest" array: "lg" is a convenient | ||
192 | * pointer. */ | ||
130 | lg = &lguests[i]; | 193 | lg = &lguests[i]; |
194 | |||
195 | /* Populate the easy fields of our "struct lguest" */ | ||
131 | lg->guestid = i; | 196 | lg->guestid = i; |
132 | lg->pfn_limit = args[0]; | 197 | lg->pfn_limit = args[0]; |
133 | lg->page_offset = args[3]; | 198 | lg->page_offset = args[3]; |
199 | |||
200 | /* We need a complete page for the Guest registers: they are accessible | ||
201 | * to the Guest and we can only grant it access to whole pages. */ | ||
134 | lg->regs_page = get_zeroed_page(GFP_KERNEL); | 202 | lg->regs_page = get_zeroed_page(GFP_KERNEL); |
135 | if (!lg->regs_page) { | 203 | if (!lg->regs_page) { |
136 | err = -ENOMEM; | 204 | err = -ENOMEM; |
137 | goto release_guest; | 205 | goto release_guest; |
138 | } | 206 | } |
207 | /* We actually put the registers at the bottom of the page. */ | ||
139 | lg->regs = (void *)lg->regs_page + PAGE_SIZE - sizeof(*lg->regs); | 208 | lg->regs = (void *)lg->regs_page + PAGE_SIZE - sizeof(*lg->regs); |
140 | 209 | ||
210 | /* Initialize the Guest's shadow page tables, using the toplevel | ||
211 | * address the Launcher gave us. This allocates memory, so can | ||
212 | * fail. */ | ||
141 | err = init_guest_pagetable(lg, args[1]); | 213 | err = init_guest_pagetable(lg, args[1]); |
142 | if (err) | 214 | if (err) |
143 | goto free_regs; | 215 | goto free_regs; |
144 | 216 | ||
217 | /* Now we initialize the Guest's registers, handing it the start | ||
218 | * address. */ | ||
145 | setup_regs(lg->regs, args[2]); | 219 | setup_regs(lg->regs, args[2]); |
220 | |||
221 | /* There are a couple of GDT entries the Guest expects when first | ||
222 | * booting. */ | ||
146 | setup_guest_gdt(lg); | 223 | setup_guest_gdt(lg); |
224 | |||
225 | /* The timer for lguest's clock needs initialization. */ | ||
147 | init_clockdev(lg); | 226 | init_clockdev(lg); |
227 | |||
228 | /* We keep a pointer to the Launcher task (ie. current task) for when | ||
229 | * other Guests want to wake this one (inter-Guest I/O). */ | ||
148 | lg->tsk = current; | 230 | lg->tsk = current; |
231 | /* We need to keep a pointer to the Launcher's memory map, because if | ||
232 | * the Launcher dies we need to clean it up. If we don't keep a | ||
233 | * reference, it is destroyed before close() is called. */ | ||
149 | lg->mm = get_task_mm(lg->tsk); | 234 | lg->mm = get_task_mm(lg->tsk); |
235 | |||
236 | /* Initialize the queue for the waker to wait on */ | ||
150 | init_waitqueue_head(&lg->break_wq); | 237 | init_waitqueue_head(&lg->break_wq); |
238 | |||
239 | /* We remember which CPU's pages this Guest used last, for optimization | ||
240 | * when the same Guest runs on the same CPU twice. */ | ||
151 | lg->last_pages = NULL; | 241 | lg->last_pages = NULL; |
242 | |||
243 | /* We keep our "struct lguest" in the file's private_data. */ | ||
152 | file->private_data = lg; | 244 | file->private_data = lg; |
153 | 245 | ||
154 | mutex_unlock(&lguest_lock); | 246 | mutex_unlock(&lguest_lock); |
155 | 247 | ||
248 | /* And because this is a write() call, we return the length used. */ | ||
156 | return sizeof(args); | 249 | return sizeof(args); |
157 | 250 | ||
158 | free_regs: | 251 | free_regs: |
@@ -164,9 +257,15 @@ unlock: | |||
164 | return err; | 257 | return err; |
165 | } | 258 | } |
166 | 259 | ||
260 | /*L:010 The first operation the Launcher does must be a write. All writes | ||
261 | * start with a 32 bit number: for the first write this must be | ||
262 | * LHREQ_INITIALIZE to set up the Guest. After that the Launcher can use | ||
263 | * writes of other values to get DMA buffers and send interrupts. */ | ||
167 | static ssize_t write(struct file *file, const char __user *input, | 264 | static ssize_t write(struct file *file, const char __user *input, |
168 | size_t size, loff_t *off) | 265 | size_t size, loff_t *off) |
169 | { | 266 | { |
267 | /* Once the guest is initialized, we hold the "struct lguest" in the | ||
268 | * file private data. */ | ||
170 | struct lguest *lg = file->private_data; | 269 | struct lguest *lg = file->private_data; |
171 | u32 req; | 270 | u32 req; |
172 | 271 | ||
@@ -174,8 +273,11 @@ static ssize_t write(struct file *file, const char __user *input, | |||
174 | return -EFAULT; | 273 | return -EFAULT; |
175 | input += sizeof(req); | 274 | input += sizeof(req); |
176 | 275 | ||
276 | /* If you haven't initialized, you must do that first. */ | ||
177 | if (req != LHREQ_INITIALIZE && !lg) | 277 | if (req != LHREQ_INITIALIZE && !lg) |
178 | return -EINVAL; | 278 | return -EINVAL; |
279 | |||
280 | /* Once the Guest is dead, all you can do is read() why it died. */ | ||
179 | if (lg && lg->dead) | 281 | if (lg && lg->dead) |
180 | return -ENOENT; | 282 | return -ENOENT; |
181 | 283 | ||
@@ -197,33 +299,72 @@ static ssize_t write(struct file *file, const char __user *input, | |||
197 | } | 299 | } |
198 | } | 300 | } |
199 | 301 | ||
302 | /*L:060 The final piece of interface code is the close() routine. It reverses | ||
303 | * everything done in initialize(). This is usually called because the | ||
304 | * Launcher exited. | ||
305 | * | ||
306 | * Note that the close routine returns 0 or a negative error number: it can't | ||
307 | * really fail, but it can whine. I blame Sun for this wart, and K&R C for | ||
308 | * letting them do it. :*/ | ||
200 | static int close(struct inode *inode, struct file *file) | 309 | static int close(struct inode *inode, struct file *file) |
201 | { | 310 | { |
202 | struct lguest *lg = file->private_data; | 311 | struct lguest *lg = file->private_data; |
203 | 312 | ||
313 | /* If we never successfully initialized, there's nothing to clean up */ | ||
204 | if (!lg) | 314 | if (!lg) |
205 | return 0; | 315 | return 0; |
206 | 316 | ||
317 | /* We need the big lock, to protect from inter-guest I/O and other | ||
318 | * Launchers initializing guests. */ | ||
207 | mutex_lock(&lguest_lock); | 319 | mutex_lock(&lguest_lock); |
208 | /* Cancels the hrtimer set via LHCALL_SET_CLOCKEVENT. */ | 320 | /* Cancels the hrtimer set via LHCALL_SET_CLOCKEVENT. */ |
209 | hrtimer_cancel(&lg->hrt); | 321 | hrtimer_cancel(&lg->hrt); |
322 | /* Free any DMA buffers the Guest had bound. */ | ||
210 | release_all_dma(lg); | 323 | release_all_dma(lg); |
324 | /* Free up the shadow page tables for the Guest. */ | ||
211 | free_guest_pagetable(lg); | 325 | free_guest_pagetable(lg); |
326 | /* Now all the memory cleanups are done, it's safe to release the | ||
327 | * Launcher's memory management structure. */ | ||
212 | mmput(lg->mm); | 328 | mmput(lg->mm); |
329 | /* If lg->dead doesn't contain an error code it will be NULL or a | ||
330 | * kmalloc()ed string, either of which is ok to hand to kfree(). */ | ||
213 | if (!IS_ERR(lg->dead)) | 331 | if (!IS_ERR(lg->dead)) |
214 | kfree(lg->dead); | 332 | kfree(lg->dead); |
333 | /* We can free up the register page we allocated. */ | ||
215 | free_page(lg->regs_page); | 334 | free_page(lg->regs_page); |
335 | /* We clear the entire structure, which also marks it as free for the | ||
336 | * next user. */ | ||
216 | memset(lg, 0, sizeof(*lg)); | 337 | memset(lg, 0, sizeof(*lg)); |
338 | /* Release lock and exit. */ | ||
217 | mutex_unlock(&lguest_lock); | 339 | mutex_unlock(&lguest_lock); |
340 | |||
218 | return 0; | 341 | return 0; |
219 | } | 342 | } |
220 | 343 | ||
344 | /*L:000 | ||
345 | * Welcome to our journey through the Launcher! | ||
346 | * | ||
347 | * The Launcher is the Host userspace program which sets up, runs and services | ||
348 | * the Guest. In fact, many comments in the Drivers which refer to "the Host" | ||
349 | * doing things are inaccurate: the Launcher does all the device handling for | ||
350 | * the Guest. The Guest can't tell what's done by the the Launcher and what by | ||
351 | * the Host. | ||
352 | * | ||
353 | * Just to confuse you: to the Host kernel, the Launcher *is* the Guest and we | ||
354 | * shall see more of that later. | ||
355 | * | ||
356 | * We begin our understanding with the Host kernel interface which the Launcher | ||
357 | * uses: reading and writing a character device called /dev/lguest. All the | ||
358 | * work happens in the read(), write() and close() routines: */ | ||
221 | static struct file_operations lguest_fops = { | 359 | static struct file_operations lguest_fops = { |
222 | .owner = THIS_MODULE, | 360 | .owner = THIS_MODULE, |
223 | .release = close, | 361 | .release = close, |
224 | .write = write, | 362 | .write = write, |
225 | .read = read, | 363 | .read = read, |
226 | }; | 364 | }; |
365 | |||
366 | /* This is a textbook example of a "misc" character device. Populate a "struct | ||
367 | * miscdevice" and register it with misc_register(). */ | ||
227 | static struct miscdevice lguest_dev = { | 368 | static struct miscdevice lguest_dev = { |
228 | .minor = MISC_DYNAMIC_MINOR, | 369 | .minor = MISC_DYNAMIC_MINOR, |
229 | .name = "lguest", | 370 | .name = "lguest", |