aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/lib/copy_page_mck.S
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/ia64/lib/copy_page_mck.S
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/ia64/lib/copy_page_mck.S')
-rw-r--r--arch/ia64/lib/copy_page_mck.S185
1 files changed, 185 insertions, 0 deletions
diff --git a/arch/ia64/lib/copy_page_mck.S b/arch/ia64/lib/copy_page_mck.S
new file mode 100644
index 000000000000..3c45d60a81b4
--- /dev/null
+++ b/arch/ia64/lib/copy_page_mck.S
@@ -0,0 +1,185 @@
1/*
2 * McKinley-optimized version of copy_page().
3 *
4 * Copyright (C) 2002 Hewlett-Packard Co
5 * David Mosberger <davidm@hpl.hp.com>
6 *
7 * Inputs:
8 * in0: address of target page
9 * in1: address of source page
10 * Output:
11 * no return value
12 *
13 * General idea:
14 * - use regular loads and stores to prefetch data to avoid consuming M-slot just for
15 * lfetches => good for in-cache performance
16 * - avoid l2 bank-conflicts by not storing into the same 16-byte bank within a single
17 * cycle
18 *
19 * Principle of operation:
20 * First, note that L1 has a line-size of 64 bytes and L2 a line-size of 128 bytes.
21 * To avoid secondary misses in L2, we prefetch both source and destination with a line-size
22 * of 128 bytes. When both of these lines are in the L2 and the first half of the
23 * source line is in L1, we start copying the remaining words. The second half of the
24 * source line is prefetched in an earlier iteration, so that by the time we start
25 * accessing it, it's also present in the L1.
26 *
27 * We use a software-pipelined loop to control the overall operation. The pipeline
28 * has 2*PREFETCH_DIST+K stages. The first PREFETCH_DIST stages are used for prefetching
29 * source cache-lines. The second PREFETCH_DIST stages are used for prefetching destination
30 * cache-lines, the last K stages are used to copy the cache-line words not copied by
31 * the prefetches. The four relevant points in the pipelined are called A, B, C, D:
32 * p[A] is TRUE if a source-line should be prefetched, p[B] is TRUE if a destination-line
33 * should be prefetched, p[C] is TRUE if the second half of an L2 line should be brought
34 * into L1D and p[D] is TRUE if a cacheline needs to be copied.
35 *
36 * This all sounds very complicated, but thanks to the modulo-scheduled loop support,
37 * the resulting code is very regular and quite easy to follow (once you get the idea).
38 *
39 * As a secondary optimization, the first 2*PREFETCH_DIST iterations are implemented
40 * as the separate .prefetch_loop. Logically, this loop performs exactly like the
41 * main-loop (.line_copy), but has all known-to-be-predicated-off instructions removed,
42 * so that each loop iteration is faster (again, good for cached case).
43 *
44 * When reading the code, it helps to keep the following picture in mind:
45 *
46 * word 0 word 1
47 * +------+------+---
48 * | v[x] | t1 | ^
49 * | t2 | t3 | |
50 * | t4 | t5 | |
51 * | t6 | t7 | | 128 bytes
52 * | n[y] | t9 | | (L2 cache line)
53 * | t10 | t11 | |
54 * | t12 | t13 | |
55 * | t14 | t15 | v
56 * +------+------+---
57 *
58 * Here, v[x] is copied by the (memory) prefetch. n[y] is loaded at p[C]
59 * to fetch the second-half of the L2 cache line into L1, and the tX words are copied in
60 * an order that avoids bank conflicts.
61 */
62#include <asm/asmmacro.h>
63#include <asm/page.h>
64
65#define PREFETCH_DIST 8 // McKinley sustains 16 outstanding L2 misses (8 ld, 8 st)
66
67#define src0 r2
68#define src1 r3
69#define dst0 r9
70#define dst1 r10
71#define src_pre_mem r11
72#define dst_pre_mem r14
73#define src_pre_l2 r15
74#define dst_pre_l2 r16
75#define t1 r17
76#define t2 r18
77#define t3 r19
78#define t4 r20
79#define t5 t1 // alias!
80#define t6 t2 // alias!
81#define t7 t3 // alias!
82#define t9 t5 // alias!
83#define t10 t4 // alias!
84#define t11 t7 // alias!
85#define t12 t6 // alias!
86#define t14 t10 // alias!
87#define t13 r21
88#define t15 r22
89
90#define saved_lc r23
91#define saved_pr r24
92
93#define A 0
94#define B (PREFETCH_DIST)
95#define C (B + PREFETCH_DIST)
96#define D (C + 3)
97#define N (D + 1)
98#define Nrot ((N + 7) & ~7)
99
100GLOBAL_ENTRY(copy_page)
101 .prologue
102 alloc r8 = ar.pfs, 2, Nrot-2, 0, Nrot
103
104 .rotr v[2*PREFETCH_DIST], n[D-C+1]
105 .rotp p[N]
106
107 .save ar.lc, saved_lc
108 mov saved_lc = ar.lc
109 .save pr, saved_pr
110 mov saved_pr = pr
111 .body
112
113 mov src_pre_mem = in1
114 mov pr.rot = 0x10000
115 mov ar.ec = 1 // special unrolled loop
116
117 mov dst_pre_mem = in0
118 mov ar.lc = 2*PREFETCH_DIST - 1
119
120 add src_pre_l2 = 8*8, in1
121 add dst_pre_l2 = 8*8, in0
122 add src0 = 8, in1 // first t1 src
123 add src1 = 3*8, in1 // first t3 src
124 add dst0 = 8, in0 // first t1 dst
125 add dst1 = 3*8, in0 // first t3 dst
126 mov t1 = (PAGE_SIZE/128) - (2*PREFETCH_DIST) - 1
127 nop.m 0
128 nop.i 0
129 ;;
130 // same as .line_copy loop, but with all predicated-off instructions removed:
131.prefetch_loop:
132(p[A]) ld8 v[A] = [src_pre_mem], 128 // M0
133(p[B]) st8 [dst_pre_mem] = v[B], 128 // M2
134 br.ctop.sptk .prefetch_loop
135 ;;
136 cmp.eq p16, p0 = r0, r0 // reset p16 to 1 (br.ctop cleared it to zero)
137 mov ar.lc = t1 // with 64KB pages, t1 is too big to fit in 8 bits!
138 mov ar.ec = N // # of stages in pipeline
139 ;;
140.line_copy:
141(p[D]) ld8 t2 = [src0], 3*8 // M0
142(p[D]) ld8 t4 = [src1], 3*8 // M1
143(p[B]) st8 [dst_pre_mem] = v[B], 128 // M2 prefetch dst from memory
144(p[D]) st8 [dst_pre_l2] = n[D-C], 128 // M3 prefetch dst from L2
145 ;;
146(p[A]) ld8 v[A] = [src_pre_mem], 128 // M0 prefetch src from memory
147(p[C]) ld8 n[0] = [src_pre_l2], 128 // M1 prefetch src from L2
148(p[D]) st8 [dst0] = t1, 8 // M2
149(p[D]) st8 [dst1] = t3, 8 // M3
150 ;;
151(p[D]) ld8 t5 = [src0], 8
152(p[D]) ld8 t7 = [src1], 3*8
153(p[D]) st8 [dst0] = t2, 3*8
154(p[D]) st8 [dst1] = t4, 3*8
155 ;;
156(p[D]) ld8 t6 = [src0], 3*8
157(p[D]) ld8 t10 = [src1], 8
158(p[D]) st8 [dst0] = t5, 8
159(p[D]) st8 [dst1] = t7, 3*8
160 ;;
161(p[D]) ld8 t9 = [src0], 3*8
162(p[D]) ld8 t11 = [src1], 3*8
163(p[D]) st8 [dst0] = t6, 3*8
164(p[D]) st8 [dst1] = t10, 8
165 ;;
166(p[D]) ld8 t12 = [src0], 8
167(p[D]) ld8 t14 = [src1], 8
168(p[D]) st8 [dst0] = t9, 3*8
169(p[D]) st8 [dst1] = t11, 3*8
170 ;;
171(p[D]) ld8 t13 = [src0], 4*8
172(p[D]) ld8 t15 = [src1], 4*8
173(p[D]) st8 [dst0] = t12, 8
174(p[D]) st8 [dst1] = t14, 8
175 ;;
176(p[D-1])ld8 t1 = [src0], 8
177(p[D-1])ld8 t3 = [src1], 8
178(p[D]) st8 [dst0] = t13, 4*8
179(p[D]) st8 [dst1] = t15, 4*8
180 br.ctop.sptk .line_copy
181 ;;
182 mov ar.lc = saved_lc
183 mov pr = saved_pr, -1
184 br.ret.sptk.many rp
185END(copy_page)