diff options
| -rw-r--r-- | arch/h8300/Kconfig | 1 | ||||
| -rw-r--r-- | arch/h8300/include/asm/hash.h | 53 |
2 files changed, 54 insertions, 0 deletions
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index 986ea84caaed..6c583dbbc119 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig | |||
| @@ -20,6 +20,7 @@ config H8300 | |||
| 20 | select HAVE_KERNEL_GZIP | 20 | select HAVE_KERNEL_GZIP |
| 21 | select HAVE_KERNEL_LZO | 21 | select HAVE_KERNEL_LZO |
| 22 | select HAVE_ARCH_KGDB | 22 | select HAVE_ARCH_KGDB |
| 23 | select HAVE_ARCH_HASH | ||
| 23 | 24 | ||
| 24 | config RWSEM_GENERIC_SPINLOCK | 25 | config RWSEM_GENERIC_SPINLOCK |
| 25 | def_bool y | 26 | def_bool y |
diff --git a/arch/h8300/include/asm/hash.h b/arch/h8300/include/asm/hash.h new file mode 100644 index 000000000000..04cfbd2bd850 --- /dev/null +++ b/arch/h8300/include/asm/hash.h | |||
| @@ -0,0 +1,53 @@ | |||
| 1 | #ifndef _ASM_HASH_H | ||
| 2 | #define _ASM_HASH_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * The later H8SX models have a 32x32-bit multiply, but the H8/300H | ||
| 6 | * and H8S have only 16x16->32. Since it's tolerably compact, this is | ||
| 7 | * basically an inlined version of the __mulsi3 code. Since the inputs | ||
| 8 | * are not expected to be small, it's also simplfied by skipping the | ||
| 9 | * early-out checks. | ||
| 10 | * | ||
| 11 | * (Since neither CPU has any multi-bit shift instructions, a | ||
| 12 | * shift-and-add version is a non-starter.) | ||
| 13 | * | ||
| 14 | * TODO: come up with an arch-specific version of the hashing in fs/namei.c, | ||
| 15 | * since that is heavily dependent on rotates. Which, as mentioned, suck | ||
| 16 | * horribly on H8. | ||
| 17 | */ | ||
| 18 | |||
| 19 | #if defined(CONFIG_CPU_H300H) || defined(CONFIG_CPU_H8S) | ||
| 20 | |||
| 21 | #define HAVE_ARCH__HASH_32 1 | ||
| 22 | |||
| 23 | /* | ||
| 24 | * Multiply by k = 0x61C88647. Fitting this into three registers requires | ||
| 25 | * one extra instruction, but reducing register pressure will probably | ||
| 26 | * make that back and then some. | ||
| 27 | * | ||
| 28 | * GCC asm note: %e1 is the high half of operand %1, while %f1 is the | ||
| 29 | * low half. So if %1 is er4, then %e1 is e4 and %f1 is r4. | ||
| 30 | * | ||
| 31 | * This has been designed to modify x in place, since that's the most | ||
| 32 | * common usage, but preserve k, since hash_64() makes two calls in | ||
| 33 | * quick succession. | ||
| 34 | */ | ||
| 35 | static inline u32 __attribute_const__ __hash_32(u32 x) | ||
| 36 | { | ||
| 37 | u32 temp; | ||
| 38 | |||
| 39 | asm( "mov.w %e1,%f0" | ||
| 40 | "\n mulxu.w %f2,%0" /* klow * xhigh */ | ||
| 41 | "\n mov.w %f0,%e1" /* The extra instruction */ | ||
| 42 | "\n mov.w %f1,%f0" | ||
| 43 | "\n mulxu.w %e2,%0" /* khigh * xlow */ | ||
| 44 | "\n add.w %e1,%f0" | ||
| 45 | "\n mulxu.w %f2,%1" /* klow * xlow */ | ||
| 46 | "\n add.w %f0,%e1" | ||
| 47 | : "=&r" (temp), "=r" (x) | ||
| 48 | : "%r" (GOLDEN_RATIO_32), "1" (x)); | ||
| 49 | return x; | ||
| 50 | } | ||
| 51 | |||
| 52 | #endif | ||
| 53 | #endif /* _ASM_HASH_H */ | ||
