aboutsummaryrefslogtreecommitdiffstats
path: root/lib/zlib_inflate
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-01-13 00:19:34 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-01-13 19:13:39 -0500
commit6846ee5ca68d81e6baccf0d56221d7a00c1be18b (patch)
treef2c1fdbeb24ca3de7ba23a4c0b3d2ce176abcf41 /lib/zlib_inflate
parent8866f9df4a5b91a4e514ccc76472261a644a3848 (diff)
zlib: Fix build of powerpc boot wrapper
Commit ac4c2a3bbe5db5fc570b1d0ee1e474db7cb22585 broke the build of all powerpc boot wrappers. It attempts to add an include of autoconf.h but used the wrong path for it. It also adds -D__KERNEL__ to our boot wrapper, both things that we pretty much didn't do on purpose so far. We want our boot wrapper to remain independent enough of the kernel for various reasons, one of them being that you can "wrap" an existing kernel at distro install time which allows to ship one kernel image and a set of boot wrappers for different platforms, the wrappers don't have to be built out of the same kernel build tree. It's also incorrect to do what the patch does in our boot environment since we may not have a proper alignment exception handler which means we may not be able to fixup the few cases where an unaligned access will need SW emulation (depends on the core variant, could be when crossing page or segment boundaries for example). This patch fixes it by putting the old code back in and using the new "fancy" variant only when CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is set, which happens not to be set on powerpc since we don't include autoconf.h. It also reverts the changes to our boot wrapper Makefile. This means that x86 should, afaik, keep the optimisations since its boot wrapper does include autoconf.h and define __KERNEL__ (though I doubt they make that much different outside of slow embedded processors). Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib/zlib_inflate')
-rw-r--r--lib/zlib_inflate/inffast.c32
1 files changed, 30 insertions, 2 deletions
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c
index 05e1559fa156..215447c55261 100644
--- a/lib/zlib_inflate/inffast.c
+++ b/lib/zlib_inflate/inffast.c
@@ -4,12 +4,25 @@
4 */ 4 */
5 5
6#include <linux/zutil.h> 6#include <linux/zutil.h>
7#include <asm/unaligned.h>
8#include <asm/byteorder.h>
9#include "inftrees.h" 7#include "inftrees.h"
10#include "inflate.h" 8#include "inflate.h"
11#include "inffast.h" 9#include "inffast.h"
12 10
11/* Only do the unaligned "Faster" variant when
12 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is set
13 *
14 * On powerpc, it won't be as we don't include autoconf.h
15 * automatically for the boot wrapper, which is intended as
16 * we run in an environment where we may not be able to deal
17 * with (even rare) alignment faults. In addition, we do not
18 * define __KERNEL__ for arch/powerpc/boot unlike x86
19 */
20
21#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
22#include <asm/unaligned.h>
23#include <asm/byteorder.h>
24#endif
25
13#ifndef ASMINF 26#ifndef ASMINF
14 27
15/* Allow machine dependent optimization for post-increment or pre-increment. 28/* Allow machine dependent optimization for post-increment or pre-increment.
@@ -243,6 +256,7 @@ void inflate_fast(z_streamp strm, unsigned start)
243 } 256 }
244 } 257 }
245 else { 258 else {
259#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
246 unsigned short *sout; 260 unsigned short *sout;
247 unsigned long loops; 261 unsigned long loops;
248 262
@@ -284,6 +298,20 @@ void inflate_fast(z_streamp strm, unsigned start)
284 } 298 }
285 if (len & 1) 299 if (len & 1)
286 PUP(out) = PUP(from); 300 PUP(out) = PUP(from);
301#else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
302 from = out - dist; /* copy direct from output */
303 do { /* minimum length is three */
304 PUP(out) = PUP(from);
305 PUP(out) = PUP(from);
306 PUP(out) = PUP(from);
307 len -= 3;
308 } while (len > 2);
309 if (len) {
310 PUP(out) = PUP(from);
311 if (len > 1)
312 PUP(out) = PUP(from);
313 }
314#endif /* !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
287 } 315 }
288 } 316 }
289 else if ((op & 64) == 0) { /* 2nd level distance code */ 317 else if ((op & 64) == 0) { /* 2nd level distance code */