diff options
author | Kirill A. Shutemov <kirill@shutemov.name> | 2009-09-15 05:26:33 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-09-15 17:07:02 -0400 |
commit | dca230f00d737353e2dffae489c916b41971921f (patch) | |
tree | 49490aab441deb87d7f9df5f0d737dad51d454fb /arch/arm/lib | |
parent | 910a17e57ab6cd22b300bde4ce5f633f175c7ccd (diff) |
ARM: 5701/1: ARM: copy_page.S: take into account the size of the cache line
Optimized version of copy_page() was written with assumption that cache
line size is 32 bytes. On Cortex-A8 cache line size is 64 bytes.
This patch tries to generalize copy_page() to work with any cache line
size if cache line size is multiple of 16 and page size is multiple of
two cache line size.
After this optimization we've got ~25% speedup on OMAP3(tested in
userspace).
There is test for kernelspace which trigger copy-on-write after fork():
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#define BUF_SIZE (10000*4096)
#define NFORK 200
int main(int argc, char **argv)
{
char *buf = malloc(BUF_SIZE);
int i;
memset(buf, 0, BUF_SIZE);
for(i = 0; i < NFORK; i++) {
if (fork()) {
wait(NULL);
} else {
int j;
for(j = 0; j < BUF_SIZE; j+= 4096)
buf[j] = (j & 0xFF) + 1;
break;
}
}
free(buf);
return 0;
}
Before optimization this test takes ~66 seconds, after optimization
takes ~56 seconds.
Signed-off-by: Siarhei Siamashka <siarhei.siamashka@nokia.com>
Signed-off-by: Kirill A. Shutemov <kirill@shutemov.name>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/lib')
-rw-r--r-- | arch/arm/lib/copy_page.S | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S index 6ae04db1ca4f..6ee2f6706f86 100644 --- a/arch/arm/lib/copy_page.S +++ b/arch/arm/lib/copy_page.S | |||
@@ -12,8 +12,9 @@ | |||
12 | #include <linux/linkage.h> | 12 | #include <linux/linkage.h> |
13 | #include <asm/assembler.h> | 13 | #include <asm/assembler.h> |
14 | #include <asm/asm-offsets.h> | 14 | #include <asm/asm-offsets.h> |
15 | #include <asm/cache.h> | ||
15 | 16 | ||
16 | #define COPY_COUNT (PAGE_SZ/64 PLD( -1 )) | 17 | #define COPY_COUNT (PAGE_SZ / (2 * L1_CACHE_BYTES) PLD( -1 )) |
17 | 18 | ||
18 | .text | 19 | .text |
19 | .align 5 | 20 | .align 5 |
@@ -26,17 +27,16 @@ | |||
26 | ENTRY(copy_page) | 27 | ENTRY(copy_page) |
27 | stmfd sp!, {r4, lr} @ 2 | 28 | stmfd sp!, {r4, lr} @ 2 |
28 | PLD( pld [r1, #0] ) | 29 | PLD( pld [r1, #0] ) |
29 | PLD( pld [r1, #32] ) | 30 | PLD( pld [r1, #L1_CACHE_BYTES] ) |
30 | mov r2, #COPY_COUNT @ 1 | 31 | mov r2, #COPY_COUNT @ 1 |
31 | ldmia r1!, {r3, r4, ip, lr} @ 4+1 | 32 | ldmia r1!, {r3, r4, ip, lr} @ 4+1 |
32 | 1: PLD( pld [r1, #64] ) | 33 | 1: PLD( pld [r1, #2 * L1_CACHE_BYTES]) |
33 | PLD( pld [r1, #96] ) | 34 | PLD( pld [r1, #3 * L1_CACHE_BYTES]) |
34 | 2: stmia r0!, {r3, r4, ip, lr} @ 4 | 35 | 2: |
35 | ldmia r1!, {r3, r4, ip, lr} @ 4+1 | 36 | .rept (2 * L1_CACHE_BYTES / 16 - 1) |
36 | stmia r0!, {r3, r4, ip, lr} @ 4 | ||
37 | ldmia r1!, {r3, r4, ip, lr} @ 4+1 | ||
38 | stmia r0!, {r3, r4, ip, lr} @ 4 | 37 | stmia r0!, {r3, r4, ip, lr} @ 4 |
39 | ldmia r1!, {r3, r4, ip, lr} @ 4 | 38 | ldmia r1!, {r3, r4, ip, lr} @ 4 |
39 | .endr | ||
40 | subs r2, r2, #1 @ 1 | 40 | subs r2, r2, #1 @ 1 |
41 | stmia r0!, {r3, r4, ip, lr} @ 4 | 41 | stmia r0!, {r3, r4, ip, lr} @ 4 |
42 | ldmgtia r1!, {r3, r4, ip, lr} @ 4 | 42 | ldmgtia r1!, {r3, r4, ip, lr} @ 4 |