diff options
| author | Michal Simek <monstr@monstr.eu> | 2010-10-08 23:58:24 -0400 |
|---|---|---|
| committer | Michal Simek <monstr@monstr.eu> | 2010-10-21 01:52:01 -0400 |
| commit | 93e2e85139509338c68279c7260ebb68177b23a9 (patch) | |
| tree | 64171deb6d2f046da3cf7c5111f73e419be1d460 /arch/microblaze/lib | |
| parent | ccea0e6e49e4db8ee7968c183ecddb3e399c5f54 (diff) | |
microblaze: Separate library optimized functions
memcpy/memmove/memset
Signed-off-by: Michal Simek <monstr@monstr.eu>
Diffstat (limited to 'arch/microblaze/lib')
| -rw-r--r-- | arch/microblaze/lib/memcpy.c | 13 | ||||
| -rw-r--r-- | arch/microblaze/lib/memmove.c | 26 | ||||
| -rw-r--r-- | arch/microblaze/lib/memset.c | 22 |
3 files changed, 46 insertions, 15 deletions
diff --git a/arch/microblaze/lib/memcpy.c b/arch/microblaze/lib/memcpy.c index 014bac92bdff..ab2d115f9ee5 100644 --- a/arch/microblaze/lib/memcpy.c +++ b/arch/microblaze/lib/memcpy.c | |||
| @@ -33,17 +33,24 @@ | |||
| 33 | #include <asm/system.h> | 33 | #include <asm/system.h> |
| 34 | 34 | ||
| 35 | #ifdef __HAVE_ARCH_MEMCPY | 35 | #ifdef __HAVE_ARCH_MEMCPY |
| 36 | #ifndef CONFIG_OPT_LIB_FUNCTION | ||
| 36 | void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) | 37 | void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) |
| 37 | { | 38 | { |
| 38 | const char *src = v_src; | 39 | const char *src = v_src; |
| 39 | char *dst = v_dst; | 40 | char *dst = v_dst; |
| 40 | #ifndef CONFIG_OPT_LIB_FUNCTION | 41 | |
| 41 | /* Simple, byte oriented memcpy. */ | 42 | /* Simple, byte oriented memcpy. */ |
| 42 | while (c--) | 43 | while (c--) |
| 43 | *dst++ = *src++; | 44 | *dst++ = *src++; |
| 44 | 45 | ||
| 45 | return v_dst; | 46 | return v_dst; |
| 46 | #else | 47 | } |
| 48 | #else /* CONFIG_OPT_LIB_FUNCTION */ | ||
| 49 | void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) | ||
| 50 | { | ||
| 51 | const char *src = v_src; | ||
| 52 | char *dst = v_dst; | ||
| 53 | |||
| 47 | /* The following code tries to optimize the copy by using unsigned | 54 | /* The following code tries to optimize the copy by using unsigned |
| 48 | * alignment. This will work fine if both source and destination are | 55 | * alignment. This will work fine if both source and destination are |
| 49 | * aligned on the same boundary. However, if they are aligned on | 56 | * aligned on the same boundary. However, if they are aligned on |
| @@ -150,7 +157,7 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) | |||
| 150 | } | 157 | } |
| 151 | 158 | ||
| 152 | return v_dst; | 159 | return v_dst; |
| 153 | #endif | ||
| 154 | } | 160 | } |
| 161 | #endif /* CONFIG_OPT_LIB_FUNCTION */ | ||
| 155 | EXPORT_SYMBOL(memcpy); | 162 | EXPORT_SYMBOL(memcpy); |
| 156 | #endif /* __HAVE_ARCH_MEMCPY */ | 163 | #endif /* __HAVE_ARCH_MEMCPY */ |
diff --git a/arch/microblaze/lib/memmove.c b/arch/microblaze/lib/memmove.c index 0929198c5e68..1d3c0e7990e5 100644 --- a/arch/microblaze/lib/memmove.c +++ b/arch/microblaze/lib/memmove.c | |||
| @@ -31,16 +31,12 @@ | |||
| 31 | #include <linux/string.h> | 31 | #include <linux/string.h> |
| 32 | 32 | ||
| 33 | #ifdef __HAVE_ARCH_MEMMOVE | 33 | #ifdef __HAVE_ARCH_MEMMOVE |
| 34 | #ifndef CONFIG_OPT_LIB_FUNCTION | ||
| 34 | void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) | 35 | void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) |
| 35 | { | 36 | { |
| 36 | const char *src = v_src; | 37 | const char *src = v_src; |
| 37 | char *dst = v_dst; | 38 | char *dst = v_dst; |
| 38 | 39 | ||
| 39 | #ifdef CONFIG_OPT_LIB_FUNCTION | ||
| 40 | const uint32_t *i_src; | ||
| 41 | uint32_t *i_dst; | ||
| 42 | #endif | ||
| 43 | |||
| 44 | if (!c) | 40 | if (!c) |
| 45 | return v_dst; | 41 | return v_dst; |
| 46 | 42 | ||
| @@ -48,7 +44,6 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) | |||
| 48 | if (v_dst <= v_src) | 44 | if (v_dst <= v_src) |
| 49 | return memcpy(v_dst, v_src, c); | 45 | return memcpy(v_dst, v_src, c); |
| 50 | 46 | ||
| 51 | #ifndef CONFIG_OPT_LIB_FUNCTION | ||
| 52 | /* copy backwards, from end to beginning */ | 47 | /* copy backwards, from end to beginning */ |
| 53 | src += c; | 48 | src += c; |
| 54 | dst += c; | 49 | dst += c; |
| @@ -58,7 +53,22 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) | |||
| 58 | *--dst = *--src; | 53 | *--dst = *--src; |
| 59 | 54 | ||
| 60 | return v_dst; | 55 | return v_dst; |
| 61 | #else | 56 | } |
| 57 | #else /* CONFIG_OPT_LIB_FUNCTION */ | ||
| 58 | void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) | ||
| 59 | { | ||
| 60 | const char *src = v_src; | ||
| 61 | char *dst = v_dst; | ||
| 62 | const uint32_t *i_src; | ||
| 63 | uint32_t *i_dst; | ||
| 64 | |||
| 65 | if (!c) | ||
| 66 | return v_dst; | ||
| 67 | |||
| 68 | /* Use memcpy when source is higher than dest */ | ||
| 69 | if (v_dst <= v_src) | ||
| 70 | return memcpy(v_dst, v_src, c); | ||
| 71 | |||
| 62 | /* The following code tries to optimize the copy by using unsigned | 72 | /* The following code tries to optimize the copy by using unsigned |
| 63 | * alignment. This will work fine if both source and destination are | 73 | * alignment. This will work fine if both source and destination are |
| 64 | * aligned on the same boundary. However, if they are aligned on | 74 | * aligned on the same boundary. However, if they are aligned on |
| @@ -169,7 +179,7 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) | |||
| 169 | *--dst = *--src; | 179 | *--dst = *--src; |
| 170 | } | 180 | } |
| 171 | return v_dst; | 181 | return v_dst; |
| 172 | #endif | ||
| 173 | } | 182 | } |
| 183 | #endif /* CONFIG_OPT_LIB_FUNCTION */ | ||
| 174 | EXPORT_SYMBOL(memmove); | 184 | EXPORT_SYMBOL(memmove); |
| 175 | #endif /* __HAVE_ARCH_MEMMOVE */ | 185 | #endif /* __HAVE_ARCH_MEMMOVE */ |
diff --git a/arch/microblaze/lib/memset.c b/arch/microblaze/lib/memset.c index ecfb663e1fc1..834565d1607e 100644 --- a/arch/microblaze/lib/memset.c +++ b/arch/microblaze/lib/memset.c | |||
| @@ -31,17 +31,30 @@ | |||
| 31 | #include <linux/string.h> | 31 | #include <linux/string.h> |
| 32 | 32 | ||
| 33 | #ifdef __HAVE_ARCH_MEMSET | 33 | #ifdef __HAVE_ARCH_MEMSET |
| 34 | #ifndef CONFIG_OPT_LIB_FUNCTION | ||
| 35 | void *memset(void *v_src, int c, __kernel_size_t n) | ||
| 36 | { | ||
| 37 | char *src = v_src; | ||
| 38 | |||
| 39 | /* Truncate c to 8 bits */ | ||
| 40 | c = (c & 0xFF); | ||
| 41 | |||
| 42 | /* Simple, byte oriented memset or the rest of count. */ | ||
| 43 | while (n--) | ||
| 44 | *src++ = c; | ||
| 45 | |||
| 46 | return v_src; | ||
| 47 | } | ||
| 48 | #else /* CONFIG_OPT_LIB_FUNCTION */ | ||
| 34 | void *memset(void *v_src, int c, __kernel_size_t n) | 49 | void *memset(void *v_src, int c, __kernel_size_t n) |
| 35 | { | 50 | { |
| 36 | char *src = v_src; | 51 | char *src = v_src; |
| 37 | #ifdef CONFIG_OPT_LIB_FUNCTION | ||
| 38 | uint32_t *i_src; | 52 | uint32_t *i_src; |
| 39 | uint32_t w32 = 0; | 53 | uint32_t w32 = 0; |
| 40 | #endif | 54 | |
| 41 | /* Truncate c to 8 bits */ | 55 | /* Truncate c to 8 bits */ |
| 42 | c = (c & 0xFF); | 56 | c = (c & 0xFF); |
| 43 | 57 | ||
| 44 | #ifdef CONFIG_OPT_LIB_FUNCTION | ||
| 45 | if (unlikely(c)) { | 58 | if (unlikely(c)) { |
| 46 | /* Make a repeating word out of it */ | 59 | /* Make a repeating word out of it */ |
| 47 | w32 = c; | 60 | w32 = c; |
| @@ -72,12 +85,13 @@ void *memset(void *v_src, int c, __kernel_size_t n) | |||
| 72 | 85 | ||
| 73 | src = (void *)i_src; | 86 | src = (void *)i_src; |
| 74 | } | 87 | } |
| 75 | #endif | 88 | |
| 76 | /* Simple, byte oriented memset or the rest of count. */ | 89 | /* Simple, byte oriented memset or the rest of count. */ |
| 77 | while (n--) | 90 | while (n--) |
| 78 | *src++ = c; | 91 | *src++ = c; |
| 79 | 92 | ||
| 80 | return v_src; | 93 | return v_src; |
| 81 | } | 94 | } |
| 95 | #endif /* CONFIG_OPT_LIB_FUNCTION */ | ||
| 82 | EXPORT_SYMBOL(memset); | 96 | EXPORT_SYMBOL(memset); |
| 83 | #endif /* __HAVE_ARCH_MEMSET */ | 97 | #endif /* __HAVE_ARCH_MEMSET */ |
