aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/boot/main.c
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2007-03-04 22:24:52 -0500
committerPaul Mackerras <paulus@samba.org>2007-03-12 22:35:01 -0400
commitad9d2716cfc1cda5a7e0d7bc0db45e3af8a4adbb (patch)
treed91ea074bd35c61c86b3c012a41622d5016fc919 /arch/powerpc/boot/main.c
parentcfbff8a3802542c4d8b2290c49b1a59128c4a380 (diff)
[POWERPC] zImage: Add more flexible gunzip convenience functions
At present, arch/powerpc/boot/main.c includes a gunzip() function which is a convenient wrapper around zlib. However, it doesn't conveniently allow decompressing part of an image to one location, then the remainder to a different address. This patch adds a new set of more flexible convenience wrappers around zlib, moving them to their own file, gunzip_util.c, in the process. These wrappers allow decompressing sections of the compressed image to different locations. In addition, they transparently handle uncompressed data, avoiding special case code to handle uncompressed vmlinux images. The patch also converts main.c to use the new wrappers, using the new flexibility to avoid decompressing the vmlinux's ELF header twice as we did previously. That in turn means we avoid extending our allocations for the vmlinux to allow space for the extra copy of the ELF header. Signed-off-by: David Gibson <dwg@au1.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/boot/main.c')
-rw-r--r--arch/powerpc/boot/main.c113
1 files changed, 20 insertions, 93 deletions
diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c
index 6f6b50d238b6..404620a9e733 100644
--- a/arch/powerpc/boot/main.c
+++ b/arch/powerpc/boot/main.c
@@ -14,8 +14,8 @@
14#include "page.h" 14#include "page.h"
15#include "string.h" 15#include "string.h"
16#include "stdio.h" 16#include "stdio.h"
17#include "zlib.h"
18#include "ops.h" 17#include "ops.h"
18#include "gunzip_util.h"
19#include "flatdevtree.h" 19#include "flatdevtree.h"
20 20
21extern void flush_cache(void *, unsigned long); 21extern void flush_cache(void *, unsigned long);
@@ -30,6 +30,8 @@ extern char _initrd_end[];
30extern char _dtb_start[]; 30extern char _dtb_start[];
31extern char _dtb_end[]; 31extern char _dtb_end[];
32 32
33static struct gunzip_state gzstate;
34
33struct addr_range { 35struct addr_range {
34 unsigned long addr; 36 unsigned long addr;
35 unsigned long size; 37 unsigned long size;
@@ -42,71 +44,12 @@ static struct addr_range initrd;
42static unsigned long elfoffset; 44static unsigned long elfoffset;
43static int is_64bit; 45static int is_64bit;
44 46
45/* scratch space for gunzip; 46912 is from zlib_inflate_workspacesize() */
46static char scratch[46912];
47static char elfheader[256]; 47static char elfheader[256];
48 48
49typedef void (*kernel_entry_t)(unsigned long, unsigned long, void *); 49typedef void (*kernel_entry_t)(unsigned long, unsigned long, void *);
50 50
51#undef DEBUG 51#undef DEBUG
52 52
53#define HEAD_CRC 2
54#define EXTRA_FIELD 4
55#define ORIG_NAME 8
56#define COMMENT 0x10
57#define RESERVED 0xe0
58
59static void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp)
60{
61 z_stream s;
62 int r, i, flags;
63
64 /* skip header */
65 i = 10;
66 flags = src[3];
67 if (src[2] != Z_DEFLATED || (flags & RESERVED) != 0) {
68 printf("bad gzipped data\n\r");
69 exit();
70 }
71 if ((flags & EXTRA_FIELD) != 0)
72 i = 12 + src[10] + (src[11] << 8);
73 if ((flags & ORIG_NAME) != 0)
74 while (src[i++] != 0)
75 ;
76 if ((flags & COMMENT) != 0)
77 while (src[i++] != 0)
78 ;
79 if ((flags & HEAD_CRC) != 0)
80 i += 2;
81 if (i >= *lenp) {
82 printf("gunzip: ran out of data in header\n\r");
83 exit();
84 }
85
86 if (zlib_inflate_workspacesize() > sizeof(scratch)) {
87 printf("gunzip needs more mem\n");
88 exit();
89 }
90 memset(&s, 0, sizeof(s));
91 s.workspace = scratch;
92 r = zlib_inflateInit2(&s, -MAX_WBITS);
93 if (r != Z_OK) {
94 printf("inflateInit2 returned %d\n\r", r);
95 exit();
96 }
97 s.next_in = src + i;
98 s.avail_in = *lenp - i;
99 s.next_out = dst;
100 s.avail_out = dstlen;
101 r = zlib_inflate(&s, Z_FULL_FLUSH);
102 if (r != Z_OK && r != Z_STREAM_END) {
103 printf("inflate returned %d msg: %s\n\r", r, s.msg);
104 exit();
105 }
106 *lenp = s.next_out - (unsigned char *) dst;
107 zlib_inflateEnd(&s);
108}
109
110static int is_elf64(void *hdr) 53static int is_elf64(void *hdr)
111{ 54{
112 Elf64_Ehdr *elf64 = hdr; 55 Elf64_Ehdr *elf64 = hdr;
@@ -132,8 +75,8 @@ static int is_elf64(void *hdr)
132 return 0; 75 return 0;
133 76
134 elfoffset = (unsigned long)elf64ph->p_offset; 77 elfoffset = (unsigned long)elf64ph->p_offset;
135 vmlinux.size = (unsigned long)elf64ph->p_filesz + elfoffset; 78 vmlinux.size = (unsigned long)elf64ph->p_filesz;
136 vmlinux.memsize = (unsigned long)elf64ph->p_memsz + elfoffset; 79 vmlinux.memsize = (unsigned long)elf64ph->p_memsz;
137 80
138 is_64bit = 1; 81 is_64bit = 1;
139 return 1; 82 return 1;
@@ -164,8 +107,8 @@ static int is_elf32(void *hdr)
164 return 0; 107 return 0;
165 108
166 elfoffset = elf32ph->p_offset; 109 elfoffset = elf32ph->p_offset;
167 vmlinux.size = elf32ph->p_filesz + elf32ph->p_offset; 110 vmlinux.size = elf32ph->p_filesz;
168 vmlinux.memsize = elf32ph->p_memsz + elf32ph->p_offset; 111 vmlinux.memsize = elf32ph->p_memsz;
169 return 1; 112 return 1;
170} 113}
171 114
@@ -177,13 +120,8 @@ static void prep_kernel(unsigned long a1, unsigned long a2)
177 vmlinuz.size = (unsigned long)(_vmlinux_end - _vmlinux_start); 120 vmlinuz.size = (unsigned long)(_vmlinux_end - _vmlinux_start);
178 121
179 /* gunzip the ELF header of the kernel */ 122 /* gunzip the ELF header of the kernel */
180 if (*(unsigned short *)vmlinuz.addr == 0x1f8b) { 123 gunzip_start(&gzstate, (void *)vmlinuz.addr, vmlinuz.size);
181 len = vmlinuz.size; 124 gunzip_exactly(&gzstate, elfheader, sizeof(elfheader));
182 gunzip(elfheader, sizeof(elfheader),
183 (unsigned char *)vmlinuz.addr, &len);
184 } else
185 memcpy(elfheader, (const void *)vmlinuz.addr,
186 sizeof(elfheader));
187 125
188 if (!is_elf64(elfheader) && !is_elf32(elfheader)) { 126 if (!is_elf64(elfheader) && !is_elf32(elfheader)) {
189 printf("Error: not a valid PPC32 or PPC64 ELF file!\n\r"); 127 printf("Error: not a valid PPC32 or PPC64 ELF file!\n\r");
@@ -192,10 +130,10 @@ static void prep_kernel(unsigned long a1, unsigned long a2)
192 if (platform_ops.image_hdr) 130 if (platform_ops.image_hdr)
193 platform_ops.image_hdr(elfheader); 131 platform_ops.image_hdr(elfheader);
194 132
195 /* We need to alloc the memsize plus the file offset since gzip 133 /* We need to alloc the memsize: gzip will expand the kernel
196 * will expand the header (file offset), then the kernel, then 134 * text/data, then possible rubbish we don't care about. But
197 * possible rubbish we don't care about. But the kernel bss must 135 * the kernel bss must be claimed (it will be zero'd by the
198 * be claimed (it will be zero'd by the kernel itself) 136 * kernel itself)
199 */ 137 */
200 printf("Allocating 0x%lx bytes for kernel ...\n\r", vmlinux.memsize); 138 printf("Allocating 0x%lx bytes for kernel ...\n\r", vmlinux.memsize);
201 vmlinux.addr = (unsigned long)malloc(vmlinux.memsize); 139 vmlinux.addr = (unsigned long)malloc(vmlinux.memsize);
@@ -237,24 +175,13 @@ static void prep_kernel(unsigned long a1, unsigned long a2)
237 } 175 }
238 176
239 /* Eventually gunzip the kernel */ 177 /* Eventually gunzip the kernel */
240 if (*(unsigned short *)vmlinuz.addr == 0x1f8b) { 178 printf("gunzipping (0x%lx <- 0x%lx:0x%0lx)...",
241 printf("gunzipping (0x%lx <- 0x%lx:0x%0lx)...", 179 vmlinux.addr, vmlinuz.addr, vmlinuz.addr+vmlinuz.size);
242 vmlinux.addr, vmlinuz.addr, vmlinuz.addr+vmlinuz.size); 180 /* discard up to the actual load data */
243 len = vmlinuz.size; 181 gunzip_discard(&gzstate, elfoffset - sizeof(elfheader));
244 gunzip((void *)vmlinux.addr, vmlinux.memsize, 182 len = gunzip_finish(&gzstate, (void *)vmlinux.addr,
245 (unsigned char *)vmlinuz.addr, &len); 183 vmlinux.memsize);
246 printf("done 0x%lx bytes\n\r", len); 184 printf("done 0x%lx bytes\n\r", len);
247 } else {
248 memmove((void *)vmlinux.addr,(void *)vmlinuz.addr,
249 vmlinuz.size);
250 }
251
252 /* Skip over the ELF header */
253#ifdef DEBUG
254 printf("... skipping 0x%lx bytes of ELF header\n\r",
255 elfoffset);
256#endif
257 vmlinux.addr += elfoffset;
258 185
259 flush_cache((void *)vmlinux.addr, vmlinux.size); 186 flush_cache((void *)vmlinux.addr, vmlinux.size);
260} 187}