aboutsummaryrefslogtreecommitdiffstats
path: root/Documentation/lguest/lguest.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2007-10-21 21:29:44 -0400
committerRusty Russell <rusty@rustcorp.com.au>2007-10-23 01:49:57 -0400
commit814a0e5cdfbd384f4bf7a8443f9c3b885f413d58 (patch)
tree4cb82a6ec1db54ce00fbc3700762cd3dc46a0b4e /Documentation/lguest/lguest.c
parent1f5a29022ac66bc90cbe2a2162f56e9cd7b393ef (diff)
Revert lguest magic and use hook in head.S
Version 2.07 of the boot protocol uses 0x23C for the hardware_subarch field, that for lguest is "1". This allows us to use the standard boot entry point rather than the "GenuineLguest" string hack. The standard entry point also clears the BSS and copies the boot parameters and commandline for us, saving more code. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'Documentation/lguest/lguest.c')
-rw-r--r--Documentation/lguest/lguest.c42
1 files changed, 13 insertions, 29 deletions
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c
index cbf4becd2667..004c5c6aba6a 100644
--- a/Documentation/lguest/lguest.c
+++ b/Documentation/lguest/lguest.c
@@ -251,23 +251,6 @@ static void *get_pages(unsigned int num)
251 return addr; 251 return addr;
252} 252}
253 253
254/* To find out where to start we look for the magic Guest string, which marks
255 * the code we see in lguest_asm.S. This is a hack which we are currently
256 * plotting to replace with the normal Linux entry point. */
257static unsigned long entry_point(const void *start, const void *end)
258{
259 const void *p;
260
261 /* The scan gives us the physical starting address. We boot with
262 * pagetables set up with virtual and physical the same, so that's
263 * OK. */
264 for (p = start; p < end; p++)
265 if (memcmp(p, "GenuineLguest", strlen("GenuineLguest")) == 0)
266 return to_guest_phys(p + strlen("GenuineLguest"));
267
268 errx(1, "Is this image a genuine lguest?");
269}
270
271/* This routine is used to load the kernel or initrd. It tries mmap, but if 254/* This routine is used to load the kernel or initrd. It tries mmap, but if
272 * that fails (Plan 9's kernel file isn't nicely aligned on page boundaries), 255 * that fails (Plan 9's kernel file isn't nicely aligned on page boundaries),
273 * it falls back to reading the memory in. */ 256 * it falls back to reading the memory in. */
@@ -303,7 +286,6 @@ static void map_at(int fd, void *addr, unsigned long offset, unsigned long len)
303 * We return the starting address. */ 286 * We return the starting address. */
304static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr) 287static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr)
305{ 288{
306 void *start = (void *)-1, *end = NULL;
307 Elf32_Phdr phdr[ehdr->e_phnum]; 289 Elf32_Phdr phdr[ehdr->e_phnum];
308 unsigned int i; 290 unsigned int i;
309 291
@@ -335,19 +317,13 @@ static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr)
335 verbose("Section %i: size %i addr %p\n", 317 verbose("Section %i: size %i addr %p\n",
336 i, phdr[i].p_memsz, (void *)phdr[i].p_paddr); 318 i, phdr[i].p_memsz, (void *)phdr[i].p_paddr);
337 319
338 /* We track the first and last address we mapped, so we can
339 * tell entry_point() where to scan. */
340 if (from_guest_phys(phdr[i].p_paddr) < start)
341 start = from_guest_phys(phdr[i].p_paddr);
342 if (from_guest_phys(phdr[i].p_paddr) + phdr[i].p_filesz > end)
343 end=from_guest_phys(phdr[i].p_paddr)+phdr[i].p_filesz;
344
345 /* We map this section of the file at its physical address. */ 320 /* We map this section of the file at its physical address. */
346 map_at(elf_fd, from_guest_phys(phdr[i].p_paddr), 321 map_at(elf_fd, from_guest_phys(phdr[i].p_paddr),
347 phdr[i].p_offset, phdr[i].p_filesz); 322 phdr[i].p_offset, phdr[i].p_filesz);
348 } 323 }
349 324
350 return entry_point(start, end); 325 /* The entry point is given in the ELF header. */
326 return ehdr->e_entry;
351} 327}
352 328
353/*L:160 Unfortunately the entire ELF image isn't compressed: the segments 329/*L:160 Unfortunately the entire ELF image isn't compressed: the segments
@@ -374,7 +350,8 @@ static unsigned long unpack_bzimage(int fd)
374 350
375 verbose("Unpacked size %i addr %p\n", len, img); 351 verbose("Unpacked size %i addr %p\n", len, img);
376 352
377 return entry_point(img, img + len); 353 /* The entry point for a bzImage is always the first byte */
354 return (unsigned long)img;
378} 355}
379 356
380/*L:150 A bzImage, unlike an ELF file, is not meant to be loaded. You're 357/*L:150 A bzImage, unlike an ELF file, is not meant to be loaded. You're
@@ -1684,8 +1661,15 @@ int main(int argc, char *argv[])
1684 *(u32 *)(boot + 0x228) = 4096; 1661 *(u32 *)(boot + 0x228) = 4096;
1685 concat(boot + 4096, argv+optind+2); 1662 concat(boot + 4096, argv+optind+2);
1686 1663
1687 /* The guest type value of "1" tells the Guest it's under lguest. */ 1664 /* Boot protocol version: 2.07 supports the fields for lguest. */
1688 *(int *)(boot + 0x23c) = 1; 1665 *(u16 *)(boot + 0x206) = 0x207;
1666
1667 /* The hardware_subarch value of "1" tells the Guest it's an lguest. */
1668 *(u32 *)(boot + 0x23c) = 1;
1669
1670 /* Set bit 6 of the loadflags (aka. KEEP_SEGMENTS) so the entry path
1671 * does not try to reload segment registers. */
1672 *(u8 *)(boot + 0x211) |= (1 << 6);
1689 1673
1690 /* We tell the kernel to initialize the Guest: this returns the open 1674 /* We tell the kernel to initialize the Guest: this returns the open
1691 * /dev/lguest file descriptor. */ 1675 * /dev/lguest file descriptor. */