diff options
author | Jeff Garzik <jeff@garzik.org> | 2006-12-03 22:22:41 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-12-03 22:22:41 -0500 |
commit | d916faace3efc0bf19fe9a615a1ab8fa1a24cd93 (patch) | |
tree | e6adbc42541498306728490a4978afe116131299 /drivers/char/ftape/compressor | |
parent | 2b5f6dcce5bf94b9b119e9ed8d537098ec61c3d2 (diff) |
Remove long-unmaintained ftape driver subsystem.
It's bitrotten, long unmaintained, long hidden under BROKEN_ON_SMP,
etc. As scheduled in feature-removal-schedule.txt, and ack'd several
times on lkml.
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/char/ftape/compressor')
-rw-r--r-- | drivers/char/ftape/compressor/Makefile | 31 | ||||
-rw-r--r-- | drivers/char/ftape/compressor/lzrw3.c | 743 | ||||
-rw-r--r-- | drivers/char/ftape/compressor/lzrw3.h | 253 | ||||
-rw-r--r-- | drivers/char/ftape/compressor/zftape-compress.c | 1203 | ||||
-rw-r--r-- | drivers/char/ftape/compressor/zftape-compress.h | 83 |
5 files changed, 0 insertions, 2313 deletions
diff --git a/drivers/char/ftape/compressor/Makefile b/drivers/char/ftape/compressor/Makefile deleted file mode 100644 index 1fbd6c4019db..000000000000 --- a/drivers/char/ftape/compressor/Makefile +++ /dev/null | |||
@@ -1,31 +0,0 @@ | |||
1 | # | ||
2 | # Copyright (C) 1997 Claus-Justus Heine. | ||
3 | # | ||
4 | # This program is free software; you can redistribute it and/or modify | ||
5 | # it under the terms of the GNU General Public License as published by | ||
6 | # the Free Software Foundation; either version 2, or (at your option) | ||
7 | # any later version. | ||
8 | # | ||
9 | # This program is distributed in the hope that it will be useful, | ||
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | # GNU General Public License for more details. | ||
13 | # | ||
14 | # You should have received a copy of the GNU General Public License | ||
15 | # along with this program; see the file COPYING. If not, write to | ||
16 | # the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | ||
17 | # | ||
18 | # $Source: /homes/cvs/ftape-stacked/ftape/compressor/Makefile,v $ | ||
19 | # $Revision: 1.1 $ | ||
20 | # $Date: 1997/10/05 19:12:28 $ | ||
21 | # | ||
22 | # Makefile for the optional compressor for th zftape VFS | ||
23 | # interface to the QIC-40/80/3010/3020 floppy-tape driver for | ||
24 | # Linux. | ||
25 | # | ||
26 | |||
27 | obj-$(CONFIG_ZFT_COMPRESSOR) += zft-compressor.o | ||
28 | |||
29 | zft-compressor-objs := zftape-compress.o lzrw3.o | ||
30 | |||
31 | CFLAGS_lzrw3.o := -O6 -funroll-all-loops | ||
diff --git a/drivers/char/ftape/compressor/lzrw3.c b/drivers/char/ftape/compressor/lzrw3.c deleted file mode 100644 index a032a0ee2a99..000000000000 --- a/drivers/char/ftape/compressor/lzrw3.c +++ /dev/null | |||
@@ -1,743 +0,0 @@ | |||
1 | /* | ||
2 | * $Source: /homes/cvs/ftape-stacked/ftape/compressor/lzrw3.c,v $ | ||
3 | * $Revision: 1.1 $ | ||
4 | * $Date: 1997/10/05 19:12:29 $ | ||
5 | * | ||
6 | * Implementation of Ross Williams lzrw3 algorithm. Adaption for zftape. | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #include "../compressor/lzrw3.h" /* Defines single exported function "compress". */ | ||
11 | |||
12 | /******************************************************************************/ | ||
13 | /* */ | ||
14 | /* LZRW3.C */ | ||
15 | /* */ | ||
16 | /******************************************************************************/ | ||
17 | /* */ | ||
18 | /* Author : Ross Williams. */ | ||
19 | /* Date : 30-Jun-1991. */ | ||
20 | /* Release : 1. */ | ||
21 | /* */ | ||
22 | /******************************************************************************/ | ||
23 | /* */ | ||
24 | /* This file contains an implementation of the LZRW3 data compression */ | ||
25 | /* algorithm in C. */ | ||
26 | /* */ | ||
27 | /* The algorithm is a general purpose compression algorithm that runs fast */ | ||
28 | /* and gives reasonable compression. The algorithm is a member of the Lempel */ | ||
29 | /* Ziv family of algorithms and bases its compression on the presence in the */ | ||
30 | /* data of repeated substrings. */ | ||
31 | /* */ | ||
32 | /* This algorithm is unpatented and the code is public domain. As the */ | ||
33 | /* algorithm is based on the LZ77 class of algorithms, it is unlikely to be */ | ||
34 | /* the subject of a patent challenge. */ | ||
35 | /* */ | ||
36 | /* Unlike the LZRW1 and LZRW1-A algorithms, the LZRW3 algorithm is */ | ||
37 | /* deterministic and is guaranteed to yield the same compressed */ | ||
38 | /* representation for a given file each time it is run. */ | ||
39 | /* */ | ||
40 | /* The LZRW3 algorithm was originally designed and implemented */ | ||
41 | /* by Ross Williams on 31-Dec-1990. */ | ||
42 | /* */ | ||
43 | /* Here are the results of applying this code, compiled under THINK C 4.0 */ | ||
44 | /* and running on a Mac-SE (8MHz 68000), to the standard calgary corpus. */ | ||
45 | /* */ | ||
46 | /* +----------------------------------------------------------------+ */ | ||
47 | /* | DATA COMPRESSION TEST | */ | ||
48 | /* | ===================== | */ | ||
49 | /* | Time of run : Sun 30-Jun-1991 09:31PM | */ | ||
50 | /* | Timing accuracy : One part in 100 | */ | ||
51 | /* | Context length : 262144 bytes (= 256.0000K) | */ | ||
52 | /* | Test suite : Calgary Corpus Suite | */ | ||
53 | /* | Files in suite : 14 | */ | ||
54 | /* | Algorithm : LZRW3 | */ | ||
55 | /* | Note: All averages are calculated from the un-rounded values. | */ | ||
56 | /* +----------------------------------------------------------------+ */ | ||
57 | /* | File Name Length CxB ComLen %Remn Bits Com K/s Dec K/s | */ | ||
58 | /* | ---------- ------ --- ------ ----- ---- ------- ------- | */ | ||
59 | /* | rpus:Bib.D 111261 1 55033 49.5 3.96 19.46 32.27 | */ | ||
60 | /* | us:Book1.D 768771 3 467962 60.9 4.87 17.03 31.07 | */ | ||
61 | /* | us:Book2.D 610856 3 317102 51.9 4.15 19.39 34.15 | */ | ||
62 | /* | rpus:Geo.D 102400 1 82424 80.5 6.44 11.65 18.18 | */ | ||
63 | /* | pus:News.D 377109 2 205670 54.5 4.36 17.14 27.47 | */ | ||
64 | /* | pus:Obj1.D 21504 1 13027 60.6 4.85 13.40 18.95 | */ | ||
65 | /* | pus:Obj2.D 246814 1 116286 47.1 3.77 19.31 30.10 | */ | ||
66 | /* | s:Paper1.D 53161 1 27522 51.8 4.14 18.60 31.15 | */ | ||
67 | /* | s:Paper2.D 82199 1 45160 54.9 4.40 18.45 32.84 | */ | ||
68 | /* | rpus:Pic.D 513216 2 122388 23.8 1.91 35.29 51.05 | */ | ||
69 | /* | us:Progc.D 39611 1 19669 49.7 3.97 18.87 30.64 | */ | ||
70 | /* | us:Progl.D 71646 1 28247 39.4 3.15 24.34 40.66 | */ | ||
71 | /* | us:Progp.D 49379 1 19377 39.2 3.14 23.91 39.23 | */ | ||
72 | /* | us:Trans.D 93695 1 33481 35.7 2.86 25.48 40.37 | */ | ||
73 | /* +----------------------------------------------------------------+ */ | ||
74 | /* | Average 224401 1 110953 50.0 4.00 20.17 32.72 | */ | ||
75 | /* +----------------------------------------------------------------+ */ | ||
76 | /* */ | ||
77 | /******************************************************************************/ | ||
78 | |||
79 | /******************************************************************************/ | ||
80 | |||
81 | /* The following structure is returned by the "compress" function below when */ | ||
82 | /* the user asks the function to return identifying information. */ | ||
83 | /* The most important field in the record is the working memory field which */ | ||
84 | /* tells the calling program how much working memory should be passed to */ | ||
85 | /* "compress" when it is called to perform a compression or decompression. */ | ||
86 | /* LZRW3 uses the same amount of memory during compression and decompression. */ | ||
87 | /* For more information on this structure see "compress.h". */ | ||
88 | |||
89 | #define U(X) ((ULONG) X) | ||
90 | #define SIZE_P_BYTE (U(sizeof(UBYTE *))) | ||
91 | #define SIZE_WORD (U(sizeof(UWORD ))) | ||
92 | #define ALIGNMENT_FUDGE (U(16)) | ||
93 | #define MEM_REQ ( U(4096)*(SIZE_P_BYTE) + ALIGNMENT_FUDGE ) | ||
94 | |||
95 | static struct compress_identity identity = | ||
96 | { | ||
97 | U(0x032DDEA8), /* Algorithm identification number. */ | ||
98 | MEM_REQ, /* Working memory (bytes) required. */ | ||
99 | "LZRW3", /* Name of algorithm. */ | ||
100 | "1.0", /* Version number of algorithm. */ | ||
101 | "31-Dec-1990", /* Date of algorithm. */ | ||
102 | "Public Domain", /* Copyright notice. */ | ||
103 | "Ross N. Williams", /* Author of algorithm. */ | ||
104 | "Renaissance Software", /* Affiliation of author. */ | ||
105 | "Public Domain" /* Vendor of algorithm. */ | ||
106 | }; | ||
107 | |||
108 | LOCAL void compress_compress (UBYTE *,UBYTE *,ULONG,UBYTE *, LONG *); | ||
109 | LOCAL void compress_decompress(UBYTE *,UBYTE *,LONG, UBYTE *, ULONG *); | ||
110 | |||
111 | /******************************************************************************/ | ||
112 | |||
113 | /* This function is the only function exported by this module. */ | ||
114 | /* Depending on its first parameter, the function can be requested to */ | ||
115 | /* compress a block of memory, decompress a block of memory, or to identify */ | ||
116 | /* itself. For more information, see the specification file "compress.h". */ | ||
117 | |||
118 | EXPORT void lzrw3_compress( | ||
119 | UWORD action, /* Action to be performed. */ | ||
120 | UBYTE *wrk_mem, /* Address of working memory we can use.*/ | ||
121 | UBYTE *src_adr, /* Address of input data. */ | ||
122 | LONG src_len, /* Length of input data. */ | ||
123 | UBYTE *dst_adr, /* Address to put output data. */ | ||
124 | void *p_dst_len /* Address of longword for length of output data.*/ | ||
125 | ) | ||
126 | { | ||
127 | switch (action) | ||
128 | { | ||
129 | case COMPRESS_ACTION_IDENTITY: | ||
130 | *((struct compress_identity **)p_dst_len)= &identity; | ||
131 | break; | ||
132 | case COMPRESS_ACTION_COMPRESS: | ||
133 | compress_compress(wrk_mem,src_adr,src_len,dst_adr,(LONG *)p_dst_len); | ||
134 | break; | ||
135 | case COMPRESS_ACTION_DECOMPRESS: | ||
136 | compress_decompress(wrk_mem,src_adr,src_len,dst_adr,(LONG *)p_dst_len); | ||
137 | break; | ||
138 | } | ||
139 | } | ||
140 | |||
141 | /******************************************************************************/ | ||
142 | /* */ | ||
143 | /* BRIEF DESCRIPTION OF THE LZRW3 ALGORITHM */ | ||
144 | /* ======================================== */ | ||
145 | /* The LZRW3 algorithm is identical to the LZRW1-A algorithm except that */ | ||
146 | /* instead of transmitting history offsets, it transmits hash table indexes. */ | ||
147 | /* In order to decode the indexes, the decompressor must maintain an */ | ||
148 | /* identical hash table. Copy items are straightforward:when the decompressor */ | ||
149 | /* receives a copy item, it simply looks up the hash table to translate the */ | ||
150 | /* index into a pointer into the data already decompressed. To update the */ | ||
151 | /* hash table, it replaces the same table entry with a pointer to the start */ | ||
152 | /* of the newly decoded phrase. The tricky part is with literal items, for at */ | ||
153 | /* the time that the decompressor receives a literal item the decompressor */ | ||
154 | /* does not have the three bytes in the Ziv (that the compressor has) to */ | ||
155 | /* perform the three-byte hash. To solve this problem, in LZRW3, both the */ | ||
156 | /* compressor and decompressor are wired up so that they "buffer" these */ | ||
157 | /* literals and update their hash tables only when three bytes are available. */ | ||
158 | /* This makes the maximum buffering 2 bytes. */ | ||
159 | /* */ | ||
160 | /* Replacement of offsets by hash table indexes yields a few percent extra */ | ||
161 | /* compression at the cost of some speed. LZRW3 is slower than LZRW1, LZRW1-A */ | ||
162 | /* and LZRW2, but yields better compression. */ | ||
163 | /* */ | ||
164 | /* Extra compression could be obtained by using a hash table of depth two. */ | ||
165 | /* However, increasing the depth above one incurs a significant decrease in */ | ||
166 | /* compression speed which was not considered worthwhile. Another reason for */ | ||
167 | /* keeping the depth down to one was to allow easy comparison with the */ | ||
168 | /* LZRW1-A and LZRW2 algorithms so as to demonstrate the exact effect of the */ | ||
169 | /* use of direct hash indexes. */ | ||
170 | /* */ | ||
171 | /* +---+ */ | ||
172 | /* |___|4095 */ | ||
173 | /* |___| */ | ||
174 | /* +---------------------*_|<---+ /----+---\ */ | ||
175 | /* | |___| +---|Hash | */ | ||
176 | /* | |___| |Function| */ | ||
177 | /* | |___| \--------/ */ | ||
178 | /* | |___|0 ^ */ | ||
179 | /* | +---+ | */ | ||
180 | /* | Hash +-----+ */ | ||
181 | /* | Table | */ | ||
182 | /* | --- */ | ||
183 | /* v ^^^ */ | ||
184 | /* +-------------------------------------|----------------+ */ | ||
185 | /* |||||||||||||||||||||||||||||||||||||||||||||||||||||||| */ | ||
186 | /* +-------------------------------------|----------------+ */ | ||
187 | /* | |1......18| | */ | ||
188 | /* |<------- Lempel=History ------------>|<--Ziv-->| | */ | ||
189 | /* | (=bytes already processed) |<-Still to go-->| */ | ||
190 | /* |<-------------------- INPUT BLOCK ------------------->| */ | ||
191 | /* */ | ||
192 | /* The diagram above for LZRW3 looks almost identical to the diagram for */ | ||
193 | /* LZRW1. The difference is that in LZRW3, the compressor transmits hash */ | ||
194 | /* table indices instead of Lempel offsets. For this to work, the */ | ||
195 | /* decompressor must maintain a hash table as well as the compressor and both */ | ||
196 | /* compressor and decompressor must "buffer" literals, as the decompressor */ | ||
197 | /* cannot hash phrases commencing with a literal until another two bytes have */ | ||
198 | /* arrived. */ | ||
199 | /* */ | ||
200 | /* LZRW3 Algorithm Execution Summary */ | ||
201 | /* --------------------------------- */ | ||
202 | /* 1. Hash the first three bytes of the Ziv to yield a hash table index h. */ | ||
203 | /* 2. Look up the hash table yielding history pointer p. */ | ||
204 | /* 3. Match where p points with the Ziv. If there is a match of three or */ | ||
205 | /* more bytes, code those bytes (in the Ziv) as a copy item, otherwise */ | ||
206 | /* code the next byte in the Ziv as a literal item. */ | ||
207 | /* 4. Update the hash table as possible subject to the constraint that only */ | ||
208 | /* phrases commencing three bytes back from the Ziv can be hashed and */ | ||
209 | /* entered into the hash table. (This enables the decompressor to keep */ | ||
210 | /* pace). See the description and code for more details. */ | ||
211 | /* */ | ||
212 | /******************************************************************************/ | ||
213 | /* */ | ||
214 | /* DEFINITION OF COMPRESSED FILE FORMAT */ | ||
215 | /* ==================================== */ | ||
216 | /* * A compressed file consists of a COPY FLAG followed by a REMAINDER. */ | ||
217 | /* * The copy flag CF uses up four bytes with the first byte being the */ | ||
218 | /* least significant. */ | ||
219 | /* * If CF=1, then the compressed file represents the remainder of the file */ | ||
220 | /* exactly. Otherwise CF=0 and the remainder of the file consists of zero */ | ||
221 | /* or more GROUPS, each of which represents one or more bytes. */ | ||
222 | /* * Each group consists of two bytes of CONTROL information followed by */ | ||
223 | /* sixteen ITEMs except for the last group which can contain from one */ | ||
224 | /* to sixteen items. */ | ||
225 | /* * An item can be either a LITERAL item or a COPY item. */ | ||
226 | /* * Each item corresponds to a bit in the control bytes. */ | ||
227 | /* * The first control byte corresponds to the first 8 items in the group */ | ||
228 | /* with bit 0 corresponding to the first item in the group and bit 7 to */ | ||
229 | /* the eighth item in the group. */ | ||
230 | /* * The second control byte corresponds to the second 8 items in the group */ | ||
231 | /* with bit 0 corresponding to the ninth item in the group and bit 7 to */ | ||
232 | /* the sixteenth item in the group. */ | ||
233 | /* * A zero bit in a control word means that the corresponding item is a */ | ||
234 | /* literal item. A one bit corresponds to a copy item. */ | ||
235 | /* * A literal item consists of a single byte which represents itself. */ | ||
236 | /* * A copy item consists of two bytes that represent from 3 to 18 bytes. */ | ||
237 | /* * The first byte in a copy item will be denoted C1. */ | ||
238 | /* * The second byte in a copy item will be denoted C2. */ | ||
239 | /* * Bits will be selected using square brackets. */ | ||
240 | /* For example: C1[0..3] is the low nibble of the first control byte. */ | ||
241 | /* of copy item C1. */ | ||
242 | /* * The LENGTH of a copy item is defined to be C1[0..3]+3 which is a number */ | ||
243 | /* in the range [3,18]. */ | ||
244 | /* * The INDEX of a copy item is defined to be C1[4..7]*256+C2[0..8] which */ | ||
245 | /* is a number in the range [0,4095]. */ | ||
246 | /* * A copy item represents the sequence of bytes */ | ||
247 | /* text[POS-OFFSET..POS-OFFSET+LENGTH-1] where */ | ||
248 | /* text is the entire text of the uncompressed string. */ | ||
249 | /* POS is the index in the text of the character following the */ | ||
250 | /* string represented by all the items preceeding the item */ | ||
251 | /* being defined. */ | ||
252 | /* OFFSET is obtained from INDEX by looking up the hash table. */ | ||
253 | /* */ | ||
254 | /******************************************************************************/ | ||
255 | |||
256 | /* The following #define defines the length of the copy flag that appears at */ | ||
257 | /* the start of the compressed file. The value of four bytes was chosen */ | ||
258 | /* because the fast_copy routine on my Macintosh runs faster if the source */ | ||
259 | /* and destination blocks are relatively longword aligned. */ | ||
260 | /* The actual flag data appears in the first byte. The rest are zeroed so as */ | ||
261 | /* to normalize the compressed representation (i.e. not non-deterministic). */ | ||
262 | #define FLAG_BYTES 4 | ||
263 | |||
264 | /* The following #defines define the meaning of the values of the copy */ | ||
265 | /* flag at the start of the compressed file. */ | ||
266 | #define FLAG_COMPRESS 0 /* Signals that output was result of compression. */ | ||
267 | #define FLAG_COPY 1 /* Signals that output was simply copied over. */ | ||
268 | |||
269 | /* The 68000 microprocessor (on which this algorithm was originally developed */ | ||
270 | /* is fussy about non-aligned arrays of words. To avoid these problems the */ | ||
271 | /* following macro can be used to "waste" from 0 to 3 bytes so as to align */ | ||
272 | /* the argument pointer. */ | ||
273 | #define ULONG_ALIGN_UP(X) ((((ULONG)X)+sizeof(ULONG)-1)&~(sizeof(ULONG)-1)) | ||
274 | |||
275 | |||
276 | /* The following constant defines the maximum length of an uncompressed item. */ | ||
277 | /* This definition must not be changed; its value is hardwired into the code. */ | ||
278 | /* The longest number of bytes that can be spanned by a single item is 18 */ | ||
279 | /* for the longest copy item. */ | ||
280 | #define MAX_RAW_ITEM (18) | ||
281 | |||
282 | /* The following constant defines the maximum length of an uncompressed group.*/ | ||
283 | /* This definition must not be changed; its value is hardwired into the code. */ | ||
284 | /* A group contains at most 16 items which explains this definition. */ | ||
285 | #define MAX_RAW_GROUP (16*MAX_RAW_ITEM) | ||
286 | |||
287 | /* The following constant defines the maximum length of a compressed group. */ | ||
288 | /* This definition must not be changed; its value is hardwired into the code. */ | ||
289 | /* A compressed group consists of two control bytes followed by up to 16 */ | ||
290 | /* compressed items each of which can have a maximum length of two bytes. */ | ||
291 | #define MAX_CMP_GROUP (2+16*2) | ||
292 | |||
293 | /* The following constant defines the number of entries in the hash table. */ | ||
294 | /* This definition must not be changed; its value is hardwired into the code. */ | ||
295 | #define HASH_TABLE_LENGTH (4096) | ||
296 | |||
297 | /* LZRW3, unlike LZRW1(-A), must initialize its hash table so as to enable */ | ||
298 | /* the compressor and decompressor to stay in step maintaining identical hash */ | ||
299 | /* tables. In an early version of the algorithm, the tables were simply */ | ||
300 | /* initialized to zero and a check for zero was included just before the */ | ||
301 | /* matching code. However, this test costs time. A better solution is to */ | ||
302 | /* initialize all the entries in the hash table to point to a constant */ | ||
303 | /* string. The decompressor does the same. This solution requires no extra */ | ||
304 | /* test. The contents of the string do not matter so long as the string is */ | ||
305 | /* the same for the compressor and decompressor and contains at least */ | ||
306 | /* MAX_RAW_ITEM bytes. I chose consecutive decimal digits because they do not */ | ||
307 | /* have white space problems (e.g. there is no chance that the compiler will */ | ||
308 | /* replace more than one space by a TAB) and because they make the length of */ | ||
309 | /* the string obvious by inspection. */ | ||
310 | #define START_STRING_18 ((UBYTE *) "123456789012345678") | ||
311 | |||
312 | /* In this algorithm, hash values have to be calculated at more than one */ | ||
313 | /* point. The following macro neatens the code up for this. */ | ||
314 | #define HASH(PTR) \ | ||
315 | (((40543*(((*(PTR))<<8)^((*((PTR)+1))<<4)^(*((PTR)+2))))>>4) & 0xFFF) | ||
316 | |||
317 | /******************************************************************************/ | ||
318 | |||
319 | /* Input : Hand over the required amount of working memory in p_wrk_mem. */ | ||
320 | /* Input : Specify input block using p_src_first and src_len. */ | ||
321 | /* Input : Point p_dst_first to the start of the output zone (OZ). */ | ||
322 | /* Input : Point p_dst_len to a ULONG to receive the output length. */ | ||
323 | /* Input : Input block and output zone must not overlap. */ | ||
324 | /* Output : Length of output block written to *p_dst_len. */ | ||
325 | /* Output : Output block in Mem[p_dst_first..p_dst_first+*p_dst_len-1]. May */ | ||
326 | /* Output : write in OZ=Mem[p_dst_first..p_dst_first+src_len+MAX_CMP_GROUP-1].*/ | ||
327 | /* Output : Upon completion guaranteed *p_dst_len<=src_len+FLAG_BYTES. */ | ||
328 | LOCAL void compress_compress(UBYTE *p_wrk_mem, | ||
329 | UBYTE *p_src_first, ULONG src_len, | ||
330 | UBYTE *p_dst_first, LONG *p_dst_len) | ||
331 | { | ||
332 | /* p_src and p_dst step through the source and destination blocks. */ | ||
333 | register UBYTE *p_src = p_src_first; | ||
334 | register UBYTE *p_dst = p_dst_first; | ||
335 | |||
336 | /* The following variables are never modified and are used in the */ | ||
337 | /* calculations that determine when the main loop terminates. */ | ||
338 | UBYTE *p_src_post = p_src_first+src_len; | ||
339 | UBYTE *p_dst_post = p_dst_first+src_len; | ||
340 | UBYTE *p_src_max1 = p_src_first+src_len-MAX_RAW_ITEM; | ||
341 | UBYTE *p_src_max16 = p_src_first+src_len-MAX_RAW_ITEM*16; | ||
342 | |||
343 | /* The variables 'p_control' and 'control' are used to buffer control bits. */ | ||
344 | /* Before each group is processed, the next two bytes of the output block */ | ||
345 | /* are set aside for the control word for the group about to be processed. */ | ||
346 | /* 'p_control' is set to point to the first byte of that word. Meanwhile, */ | ||
347 | /* 'control' buffers the control bits being generated during the processing */ | ||
348 | /* of the group. Instead of having a counter to keep track of how many items */ | ||
349 | /* have been processed (=the number of bits in the control word), at the */ | ||
350 | /* start of each group, the top word of 'control' is filled with 1 bits. */ | ||
351 | /* As 'control' is shifted for each item, the 1 bits in the top word are */ | ||
352 | /* absorbed or destroyed. When they all run out (i.e. when the top word is */ | ||
353 | /* all zero bits, we know that we are at the end of a group. */ | ||
354 | # define TOPWORD 0xFFFF0000 | ||
355 | UBYTE *p_control; | ||
356 | register ULONG control=TOPWORD; | ||
357 | |||
358 | /* THe variable 'hash' always points to the first element of the hash table. */ | ||
359 | UBYTE **hash= (UBYTE **) ULONG_ALIGN_UP(p_wrk_mem); | ||
360 | |||
361 | /* The following two variables represent the literal buffer. p_h1 points to */ | ||
362 | /* the hash table entry corresponding to the youngest literal. p_h2 points */ | ||
363 | /* to the hash table entry corresponding to the second youngest literal. */ | ||
364 | /* Note: p_h1=0=>p_h2=0 because zero values denote absence of a pending */ | ||
365 | /* literal. The variables are initialized to zero meaning an empty "buffer". */ | ||
366 | UBYTE **p_h1=NULL; | ||
367 | UBYTE **p_h2=NULL; | ||
368 | |||
369 | /* To start, we write the flag bytes. Being optimistic, we set the flag to */ | ||
370 | /* FLAG_COMPRESS. The remaining flag bytes are zeroed so as to keep the */ | ||
371 | /* algorithm deterministic. */ | ||
372 | *p_dst++=FLAG_COMPRESS; | ||
373 | {UWORD i; for (i=2;i<=FLAG_BYTES;i++) *p_dst++=0;} | ||
374 | |||
375 | /* Reserve the first word of output as the control word for the first group. */ | ||
376 | /* Note: This is undone at the end if the input block is empty. */ | ||
377 | p_control=p_dst; p_dst+=2; | ||
378 | |||
379 | /* Initialize all elements of the hash table to point to a constant string. */ | ||
380 | /* Use of an unrolled loop speeds this up considerably. */ | ||
381 | {UWORD i; UBYTE **p_h=hash; | ||
382 | # define ZH *p_h++=START_STRING_18 | ||
383 | for (i=0;i<256;i++) /* 256=HASH_TABLE_LENGTH/16. */ | ||
384 | {ZH;ZH;ZH;ZH; | ||
385 | ZH;ZH;ZH;ZH; | ||
386 | ZH;ZH;ZH;ZH; | ||
387 | ZH;ZH;ZH;ZH;} | ||
388 | } | ||
389 | |||
390 | /* The main loop processes either 1 or 16 items per iteration. As its */ | ||
391 | /* termination logic is complicated, I have opted for an infinite loop */ | ||
392 | /* structure containing 'break' and 'goto' statements. */ | ||
393 | while (TRUE) | ||
394 | {/* Begin main processing loop. */ | ||
395 | |||
396 | /* Note: All the variables here except unroll should be defined within */ | ||
397 | /* the inner loop. Unfortunately the loop hasn't got a block. */ | ||
398 | register UBYTE *p; /* Scans through targ phrase during matching. */ | ||
399 | register UBYTE *p_ziv= NULL ; /* Points to first byte of current Ziv. */ | ||
400 | register UWORD unroll; /* Loop counter for unrolled inner loop. */ | ||
401 | register UWORD index; /* Index of current hash table entry. */ | ||
402 | register UBYTE **p_h0 = NULL ; /* Pointer to current hash table entry. */ | ||
403 | |||
404 | /* Test for overrun and jump to overrun code if necessary. */ | ||
405 | if (p_dst>p_dst_post) | ||
406 | goto overrun; | ||
407 | |||
408 | /* The following cascade of if statements efficiently catches and deals */ | ||
409 | /* with varying degrees of closeness to the end of the input block. */ | ||
410 | /* When we get very close to the end, we stop updating the table and */ | ||
411 | /* code the remaining bytes as literals. This makes the code simpler. */ | ||
412 | unroll=16; | ||
413 | if (p_src>p_src_max16) | ||
414 | { | ||
415 | unroll=1; | ||
416 | if (p_src>p_src_max1) | ||
417 | { | ||
418 | if (p_src==p_src_post) | ||
419 | break; | ||
420 | else | ||
421 | goto literal; | ||
422 | } | ||
423 | } | ||
424 | |||
425 | /* This inner unrolled loop processes 'unroll' (whose value is either 1 */ | ||
426 | /* or 16) items. I have chosen to implement this loop with labels and */ | ||
427 | /* gotos to heighten the ease with which the loop may be implemented with */ | ||
428 | /* a single decrement and branch instruction in assembly language and */ | ||
429 | /* also because the labels act as highly readable place markers. */ | ||
430 | /* (Also because we jump into the loop for endgame literals (see above)). */ | ||
431 | |||
432 | begin_unrolled_loop: | ||
433 | |||
434 | /* To process the next phrase, we hash the next three bytes and use */ | ||
435 | /* the resultant hash table index to look up the hash table. A pointer */ | ||
436 | /* to the entry is stored in p_h0 so as to avoid an array lookup. The */ | ||
437 | /* hash table entry *p_h0 is looked up yielding a pointer p to a */ | ||
438 | /* potential match of the Ziv in the history. */ | ||
439 | index=HASH(p_src); | ||
440 | p_h0=&hash[index]; | ||
441 | p=*p_h0; | ||
442 | |||
443 | /* Having looked up the candidate position, we are in a position to */ | ||
444 | /* attempt a match. The match loop has been unrolled using the PS */ | ||
445 | /* macro so that failure within the first three bytes automatically */ | ||
446 | /* results in the literal branch being taken. The coding is simple. */ | ||
447 | /* p_ziv saves p_src so we can let p_src wander. */ | ||
448 | # define PS *p++!=*p_src++ | ||
449 | p_ziv=p_src; | ||
450 | if (PS || PS || PS) | ||
451 | { | ||
452 | /* Literal. */ | ||
453 | |||
454 | /* Code the literal byte as itself and a zero control bit. */ | ||
455 | p_src=p_ziv; literal: *p_dst++=*p_src++; control&=0xFFFEFFFF; | ||
456 | |||
457 | /* We have just coded a literal. If we had two pending ones, that */ | ||
458 | /* makes three and we can update the hash table. */ | ||
459 | if (p_h2!=0) | ||
460 | {*p_h2=p_ziv-2;} | ||
461 | |||
462 | /* In any case, rotate the hash table pointers for next time. */ | ||
463 | p_h2=p_h1; p_h1=p_h0; | ||
464 | |||
465 | } | ||
466 | else | ||
467 | { | ||
468 | /* Copy */ | ||
469 | |||
470 | /* Match up to 15 remaining bytes using an unrolled loop and code. */ | ||
471 | #if 0 | ||
472 | PS || PS || PS || PS || PS || PS || PS || PS || | ||
473 | PS || PS || PS || PS || PS || PS || PS || p_src++; | ||
474 | #else | ||
475 | if ( | ||
476 | !( PS || PS || PS || PS || PS || PS || PS || PS || | ||
477 | PS || PS || PS || PS || PS || PS || PS ) | ||
478 | ) p_src++; | ||
479 | #endif | ||
480 | *p_dst++=((index&0xF00)>>4)|(--p_src-p_ziv-3); | ||
481 | *p_dst++=index&0xFF; | ||
482 | |||
483 | /* As we have just coded three bytes, we are now in a position to */ | ||
484 | /* update the hash table with the literal bytes that were pending */ | ||
485 | /* upon the arrival of extra context bytes. */ | ||
486 | if (p_h1!=0) | ||
487 | { | ||
488 | if (p_h2) | ||
489 | {*p_h2=p_ziv-2; p_h2=NULL;} | ||
490 | *p_h1=p_ziv-1; p_h1=NULL; | ||
491 | } | ||
492 | |||
493 | /* In any case, we can update the hash table based on the current */ | ||
494 | /* position as we just coded at least three bytes in a copy items. */ | ||
495 | *p_h0=p_ziv; | ||
496 | |||
497 | } | ||
498 | control>>=1; | ||
499 | |||
500 | /* This loop is all set up for a decrement and jump instruction! */ | ||
501 | #ifndef linux | ||
502 | ` end_unrolled_loop: if (--unroll) goto begin_unrolled_loop; | ||
503 | #else | ||
504 | /* end_unrolled_loop: */ if (--unroll) goto begin_unrolled_loop; | ||
505 | #endif | ||
506 | |||
507 | /* At this point it will nearly always be the end of a group in which */ | ||
508 | /* case, we have to do some control-word processing. However, near the */ | ||
509 | /* end of the input block, the inner unrolled loop is only executed once. */ | ||
510 | /* This necessitates the 'if' test. */ | ||
511 | if ((control&TOPWORD)==0) | ||
512 | { | ||
513 | /* Write the control word to the place we saved for it in the output. */ | ||
514 | *p_control++= control &0xFF; | ||
515 | *p_control = (control>>8) &0xFF; | ||
516 | |||
517 | /* Reserve the next word in the output block for the control word */ | ||
518 | /* for the group about to be processed. */ | ||
519 | p_control=p_dst; p_dst+=2; | ||
520 | |||
521 | /* Reset the control bits buffer. */ | ||
522 | control=TOPWORD; | ||
523 | } | ||
524 | |||
525 | } /* End main processing loop. */ | ||
526 | |||
527 | /* After the main processing loop has executed, all the input bytes have */ | ||
528 | /* been processed. However, the control word has still to be written to the */ | ||
529 | /* word reserved for it in the output at the start of the most recent group. */ | ||
530 | /* Before writing, the control word has to be shifted so that all the bits */ | ||
531 | /* are in the right place. The "empty" bit positions are filled with 1s */ | ||
532 | /* which partially fill the top word. */ | ||
533 | while(control&TOPWORD) control>>=1; | ||
534 | *p_control++= control &0xFF; | ||
535 | *p_control++=(control>>8) &0xFF; | ||
536 | |||
537 | /* If the last group contained no items, delete the control word too. */ | ||
538 | if (p_control==p_dst) p_dst-=2; | ||
539 | |||
540 | /* Write the length of the output block to the dst_len parameter and return. */ | ||
541 | *p_dst_len=p_dst-p_dst_first; | ||
542 | return; | ||
543 | |||
544 | /* Jump here as soon as an overrun is detected. An overrun is defined to */ | ||
545 | /* have occurred if p_dst>p_dst_first+src_len. That is, the moment the */ | ||
546 | /* length of the output written so far exceeds the length of the input block.*/ | ||
547 | /* The algorithm checks for overruns at least at the end of each group */ | ||
548 | /* which means that the maximum overrun is MAX_CMP_GROUP bytes. */ | ||
549 | /* Once an overrun occurs, the only thing to do is to set the copy flag and */ | ||
550 | /* copy the input over. */ | ||
551 | overrun: | ||
552 | #if 0 | ||
553 | *p_dst_first=FLAG_COPY; | ||
554 | fast_copy(p_src_first,p_dst_first+FLAG_BYTES,src_len); | ||
555 | *p_dst_len=src_len+FLAG_BYTES; | ||
556 | #else | ||
557 | fast_copy(p_src_first,p_dst_first,src_len); | ||
558 | *p_dst_len= -src_len; /* return a negative number to indicate uncompressed data */ | ||
559 | #endif | ||
560 | } | ||
561 | |||
562 | /******************************************************************************/ | ||
563 | |||
564 | /* Input : Hand over the required amount of working memory in p_wrk_mem. */ | ||
565 | /* Input : Specify input block using p_src_first and src_len. */ | ||
566 | /* Input : Point p_dst_first to the start of the output zone. */ | ||
567 | /* Input : Point p_dst_len to a ULONG to receive the output length. */ | ||
568 | /* Input : Input block and output zone must not overlap. User knows */ | ||
569 | /* Input : upperbound on output block length from earlier compression. */ | ||
570 | /* Input : In any case, maximum expansion possible is nine times. */ | ||
571 | /* Output : Length of output block written to *p_dst_len. */ | ||
572 | /* Output : Output block in Mem[p_dst_first..p_dst_first+*p_dst_len-1]. */ | ||
573 | /* Output : Writes only in Mem[p_dst_first..p_dst_first+*p_dst_len-1]. */ | ||
574 | LOCAL void compress_decompress( UBYTE *p_wrk_mem, | ||
575 | UBYTE *p_src_first, LONG src_len, | ||
576 | UBYTE *p_dst_first, ULONG *p_dst_len) | ||
577 | { | ||
578 | /* Byte pointers p_src and p_dst scan through the input and output blocks. */ | ||
579 | register UBYTE *p_src = p_src_first+FLAG_BYTES; | ||
580 | register UBYTE *p_dst = p_dst_first; | ||
581 | /* we need to avoid a SEGV when trying to uncompress corrupt data */ | ||
582 | register UBYTE *p_dst_post = p_dst_first + *p_dst_len; | ||
583 | |||
584 | /* The following two variables are never modified and are used to control */ | ||
585 | /* the main loop. */ | ||
586 | UBYTE *p_src_post = p_src_first+src_len; | ||
587 | UBYTE *p_src_max16 = p_src_first+src_len-(MAX_CMP_GROUP-2); | ||
588 | |||
589 | /* The hash table is the only resident of the working memory. The hash table */ | ||
590 | /* contains HASH_TABLE_LENGTH=4096 pointers to positions in the history. To */ | ||
591 | /* keep Macintoshes happy, it is longword aligned. */ | ||
592 | UBYTE **hash = (UBYTE **) ULONG_ALIGN_UP(p_wrk_mem); | ||
593 | |||
594 | /* The variable 'control' is used to buffer the control bits which appear in */ | ||
595 | /* groups of 16 bits (control words) at the start of each compressed group. */ | ||
596 | /* When each group is read, bit 16 of the register is set to one. Whenever */ | ||
597 | /* a new bit is needed, the register is shifted right. When the value of the */ | ||
598 | /* register becomes 1, we know that we have reached the end of a group. */ | ||
599 | /* Initializing the register to 1 thus instructs the code to follow that it */ | ||
600 | /* should read a new control word immediately. */ | ||
601 | register ULONG control=1; | ||
602 | |||
603 | /* The value of 'literals' is always in the range 0..3. It is the number of */ | ||
604 | /* consecutive literal items just seen. We have to record this number so as */ | ||
605 | /* to know when to update the hash table. When literals gets to 3, there */ | ||
606 | /* have been three consecutive literals and we can update at the position of */ | ||
607 | /* the oldest of the three. */ | ||
608 | register UWORD literals=0; | ||
609 | |||
610 | /* Check the leading copy flag to see if the compressor chose to use a copy */ | ||
611 | /* operation instead of a compression operation. If a copy operation was */ | ||
612 | /* used, then all we need to do is copy the data over, set the output length */ | ||
613 | /* and return. */ | ||
614 | #if 0 | ||
615 | if (*p_src_first==FLAG_COPY) | ||
616 | { | ||
617 | fast_copy(p_src_first+FLAG_BYTES,p_dst_first,src_len-FLAG_BYTES); | ||
618 | *p_dst_len=src_len-FLAG_BYTES; | ||
619 | return; | ||
620 | } | ||
621 | #else | ||
622 | if ( src_len < 0 ) | ||
623 | { | ||
624 | fast_copy(p_src_first,p_dst_first,-src_len ); | ||
625 | *p_dst_len = (ULONG)-src_len; | ||
626 | return; | ||
627 | } | ||
628 | #endif | ||
629 | |||
630 | /* Initialize all elements of the hash table to point to a constant string. */ | ||
631 | /* Use of an unrolled loop speeds this up considerably. */ | ||
632 | {UWORD i; UBYTE **p_h=hash; | ||
633 | # define ZJ *p_h++=START_STRING_18 | ||
634 | for (i=0;i<256;i++) /* 256=HASH_TABLE_LENGTH/16. */ | ||
635 | {ZJ;ZJ;ZJ;ZJ; | ||
636 | ZJ;ZJ;ZJ;ZJ; | ||
637 | ZJ;ZJ;ZJ;ZJ; | ||
638 | ZJ;ZJ;ZJ;ZJ;} | ||
639 | } | ||
640 | |||
641 | /* The outer loop processes either 1 or 16 items per iteration depending on */ | ||
642 | /* how close p_src is to the end of the input block. */ | ||
643 | while (p_src!=p_src_post) | ||
644 | {/* Start of outer loop */ | ||
645 | |||
646 | register UWORD unroll; /* Counts unrolled loop executions. */ | ||
647 | |||
648 | /* When 'control' has the value 1, it means that the 16 buffered control */ | ||
649 | /* bits that were read in at the start of the current group have all been */ | ||
650 | /* shifted out and that all that is left is the 1 bit that was injected */ | ||
651 | /* into bit 16 at the start of the current group. When we reach the end */ | ||
652 | /* of a group, we have to load a new control word and inject a new 1 bit. */ | ||
653 | if (control==1) | ||
654 | { | ||
655 | control=0x10000|*p_src++; | ||
656 | control|=(*p_src++)<<8; | ||
657 | } | ||
658 | |||
659 | /* If it is possible that we are within 16 groups from the end of the */ | ||
660 | /* input, execute the unrolled loop only once, else process a whole group */ | ||
661 | /* of 16 items by looping 16 times. */ | ||
662 | unroll= p_src<=p_src_max16 ? 16 : 1; | ||
663 | |||
664 | /* This inner loop processes one phrase (item) per iteration. */ | ||
665 | while (unroll--) | ||
666 | { /* Begin unrolled inner loop. */ | ||
667 | |||
668 | /* Process a literal or copy item depending on the next control bit. */ | ||
669 | if (control&1) | ||
670 | { | ||
671 | /* Copy item. */ | ||
672 | |||
673 | register UBYTE *p; /* Points to place from which to copy. */ | ||
674 | register UWORD lenmt; /* Length of copy item minus three. */ | ||
675 | register UBYTE **p_hte; /* Pointer to current hash table entry.*/ | ||
676 | register UBYTE *p_ziv=p_dst; /* Pointer to start of current Ziv. */ | ||
677 | |||
678 | /* Read and dismantle the copy word. Work out from where to copy. */ | ||
679 | lenmt=*p_src++; | ||
680 | p_hte=&hash[((lenmt&0xF0)<<4)|*p_src++]; | ||
681 | p=*p_hte; | ||
682 | lenmt&=0xF; | ||
683 | |||
684 | /* Now perform the copy using a half unrolled loop. */ | ||
685 | *p_dst++=*p++; | ||
686 | *p_dst++=*p++; | ||
687 | *p_dst++=*p++; | ||
688 | while (lenmt--) | ||
689 | *p_dst++=*p++; | ||
690 | |||
691 | /* Because we have just received 3 or more bytes in a copy item */ | ||
692 | /* (whose bytes we have just installed in the output), we are now */ | ||
693 | /* in a position to flush all the pending literal hashings that had */ | ||
694 | /* been postponed for lack of bytes. */ | ||
695 | if (literals>0) | ||
696 | { | ||
697 | register UBYTE *r=p_ziv-literals; | ||
698 | hash[HASH(r)]=r; | ||
699 | if (literals==2) | ||
700 | {r++; hash[HASH(r)]=r;} | ||
701 | literals=0; | ||
702 | } | ||
703 | |||
704 | /* In any case, we can immediately update the hash table with the */ | ||
705 | /* current position. We don't need to do a HASH(...) to work out */ | ||
706 | /* where to put the pointer, as the compressor just told us!!! */ | ||
707 | *p_hte=p_ziv; | ||
708 | |||
709 | } | ||
710 | else | ||
711 | { | ||
712 | /* Literal item. */ | ||
713 | |||
714 | /* Copy over the literal byte. */ | ||
715 | *p_dst++=*p_src++; | ||
716 | |||
717 | /* If we now have three literals waiting to be hashed into the hash */ | ||
718 | /* table, we can do one of them now (because there are three). */ | ||
719 | if (++literals == 3) | ||
720 | {register UBYTE *p=p_dst-3; hash[HASH(p)]=p; literals=2;} | ||
721 | } | ||
722 | |||
723 | /* Shift the control buffer so the next control bit is in bit 0. */ | ||
724 | control>>=1; | ||
725 | #if 1 | ||
726 | if (p_dst > p_dst_post) | ||
727 | { | ||
728 | /* Shit: we tried to decompress corrupt data */ | ||
729 | *p_dst_len = 0; | ||
730 | return; | ||
731 | } | ||
732 | #endif | ||
733 | } /* End unrolled inner loop. */ | ||
734 | |||
735 | } /* End of outer loop */ | ||
736 | |||
737 | /* Write the length of the decompressed data before returning. */ | ||
738 | *p_dst_len=p_dst-p_dst_first; | ||
739 | } | ||
740 | |||
741 | /******************************************************************************/ | ||
742 | /* End of LZRW3.C */ | ||
743 | /******************************************************************************/ | ||
diff --git a/drivers/char/ftape/compressor/lzrw3.h b/drivers/char/ftape/compressor/lzrw3.h deleted file mode 100644 index 533feba47526..000000000000 --- a/drivers/char/ftape/compressor/lzrw3.h +++ /dev/null | |||
@@ -1,253 +0,0 @@ | |||
1 | #ifndef _LZRW3_H | ||
2 | #define _LZRW3_H | ||
3 | /* | ||
4 | * $Source: /homes/cvs/ftape-stacked/ftape/compressor/lzrw3.h,v $ | ||
5 | * $Revision: 1.1 $ | ||
6 | * $Date: 1997/10/05 19:12:30 $ | ||
7 | * | ||
8 | * include files for lzrw3. Only slighty modified from the original | ||
9 | * version. Assembles the three include files compress.h, port.h and | ||
10 | * fastcopy.h from the original lzrw3 package. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/types.h> | ||
15 | #include <linux/string.h> | ||
16 | |||
17 | /******************************************************************************/ | ||
18 | /* */ | ||
19 | /* COMPRESS.H */ | ||
20 | /* */ | ||
21 | /******************************************************************************/ | ||
22 | /* */ | ||
23 | /* Author : Ross Williams. */ | ||
24 | /* Date : December 1989. */ | ||
25 | /* */ | ||
26 | /* This header file defines the interface to a set of functions called */ | ||
27 | /* 'compress', each member of which implements a particular data compression */ | ||
28 | /* algorithm. */ | ||
29 | /* */ | ||
30 | /* Normally in C programming, for each .H file, there is a corresponding .C */ | ||
31 | /* file that implements the functions promised in the .H file. */ | ||
32 | /* Here, there are many .C files corresponding to this header file. */ | ||
33 | /* Each comforming implementation file contains a single function */ | ||
34 | /* called 'compress' that implements a single data compression */ | ||
35 | /* algorithm that conforms with the interface specified in this header file. */ | ||
36 | /* Only one algorithm can be linked in at a time in this organization. */ | ||
37 | /* */ | ||
38 | /******************************************************************************/ | ||
39 | /* */ | ||
40 | /* DEFINITION OF FUNCTION COMPRESS */ | ||
41 | /* =============================== */ | ||
42 | /* */ | ||
43 | /* Summary of Function Compress */ | ||
44 | /* ---------------------------- */ | ||
45 | /* The action that 'compress' takes depends on its first argument called */ | ||
46 | /* 'action'. The function provides three actions: */ | ||
47 | /* */ | ||
48 | /* - Return information about the algorithm. */ | ||
49 | /* - Compress a block of memory. */ | ||
50 | /* - Decompress a block of memory. */ | ||
51 | /* */ | ||
52 | /* Parameters */ | ||
53 | /* ---------- */ | ||
54 | /* See the formal C definition later for a description of the parameters. */ | ||
55 | /* */ | ||
56 | /* Constants */ | ||
57 | /* --------- */ | ||
58 | /* COMPRESS_OVERRUN: The constant COMPRESS_OVERRUN defines by how many bytes */ | ||
59 | /* an algorithm is allowed to expand a block during a compression operation. */ | ||
60 | /* */ | ||
61 | /* Although compression algorithms usually compress data, there will always */ | ||
62 | /* be data that a given compressor will expand (this can be proven). */ | ||
63 | /* Fortunately, the degree of expansion can be limited to a single bit, by */ | ||
64 | /* copying over the input data if the data gets bigger during compression. */ | ||
65 | /* To allow for this possibility, the first bit of a compressed */ | ||
66 | /* representation can be used as a flag indicating whether the */ | ||
67 | /* input data was copied over, or truly compressed. In practice, the first */ | ||
68 | /* byte would be used to store this bit so as to maintain byte alignment. */ | ||
69 | /* */ | ||
70 | /* Unfortunately, in general, the only way to tell if an algorithm will */ | ||
71 | /* expand a particular block of data is to run the algorithm on the data. */ | ||
72 | /* If the algorithm does not continuously monitor how many output bytes it */ | ||
73 | /* has written, it might write an output block far larger than the input */ | ||
74 | /* block before realizing that it has done so. */ | ||
75 | /* On the other hand, continuous checks on output length are inefficient. */ | ||
76 | /* */ | ||
77 | /* To cater for all these problems, this interface definition: */ | ||
78 | /* > Allows a compression algorithm to return an output block that is up to */ | ||
79 | /* COMPRESS_OVERRUN bytes longer than the input block. */ | ||
80 | /* > Allows a compression algorithm to write up to COMPRESS_OVERRUN bytes */ | ||
81 | /* more than the length of the input block to the memory of the output */ | ||
82 | /* block regardless of the length of the output block eventually returned. */ | ||
83 | /* This allows an algorithm to overrun the length of the input block in the */ | ||
84 | /* output block by up to COMPRESS_OVERRUN bytes between expansion checks. */ | ||
85 | /* */ | ||
86 | /* The problem does not arise for decompression. */ | ||
87 | /* */ | ||
88 | /* Identity Action */ | ||
89 | /* --------------- */ | ||
90 | /* > action must be COMPRESS_ACTION_IDENTITY. */ | ||
91 | /* > p_dst_len must point to a longword to receive a longword address. */ | ||
92 | /* > The value of the other parameters does not matter. */ | ||
93 | /* > After execution, the longword that p_dst_len points to will be a pointer */ | ||
94 | /* to a structure of type compress_identity. */ | ||
95 | /* Thus, for example, after the call, (*p_dst_len)->memory will return the */ | ||
96 | /* number of bytes of working memory that the algorithm requires to run. */ | ||
97 | /* > The values of the identity structure returned are fixed constant */ | ||
98 | /* attributes of the algorithm and must not vary from call to call. */ | ||
99 | /* */ | ||
100 | /* Common Requirements for Compression and Decompression Actions */ | ||
101 | /* ------------------------------------------------------------- */ | ||
102 | /* > wrk_mem must point to an unused block of memory of a length specified in */ | ||
103 | /* the algorithm's identity block. The identity block can be obtained by */ | ||
104 | /* making a separate call to compress, specifying the identity action. */ | ||
105 | /* > The INPUT BLOCK is defined to be Memory[src_addr,src_addr+src_len-1]. */ | ||
106 | /* > dst_len will be used to denote *p_dst_len. */ | ||
107 | /* > dst_len is not read by compress, only written. */ | ||
108 | /* > The value of dst_len is defined only upon termination. */ | ||
109 | /* > The OUTPUT BLOCK is defined to be Memory[dst_addr,dst_addr+dst_len-1]. */ | ||
110 | /* */ | ||
111 | /* Compression Action */ | ||
112 | /* ------------------ */ | ||
113 | /* > action must be COMPRESS_ACTION_COMPRESS. */ | ||
114 | /* > src_len must be in the range [0,COMPRESS_MAX_ORG]. */ | ||
115 | /* > The OUTPUT ZONE is defined to be */ | ||
116 | /* Memory[dst_addr,dst_addr+src_len-1+COMPRESS_OVERRUN]. */ | ||
117 | /* > The function can modify any part of the output zone regardless of the */ | ||
118 | /* final length of the output block. */ | ||
119 | /* > The input block and the output zone must not overlap. */ | ||
120 | /* > dst_len will be in the range [0,src_len+COMPRESS_OVERRUN]. */ | ||
121 | /* > dst_len will be in the range [0,COMPRESS_MAX_COM] (from prev fact). */ | ||
122 | /* > The output block will consist of a representation of the input block. */ | ||
123 | /* */ | ||
124 | /* Decompression Action */ | ||
125 | /* -------------------- */ | ||
126 | /* > action must be COMPRESS_ACTION_DECOMPRESS. */ | ||
127 | /* > The input block must be the result of an earlier compression operation. */ | ||
128 | /* > If the previous fact is true, the following facts must also be true: */ | ||
129 | /* > src_len will be in the range [0,COMPRESS_MAX_COM]. */ | ||
130 | /* > dst_len will be in the range [0,COMPRESS_MAX_ORG]. */ | ||
131 | /* > The input and output blocks must not overlap. */ | ||
132 | /* > Only the output block is modified. */ | ||
133 | /* > Upon termination, the output block will consist of the bytes contained */ | ||
134 | /* in the input block passed to the earlier compression operation. */ | ||
135 | /* */ | ||
136 | /******************************************************************************/ | ||
137 | |||
138 | /******************************************************************************/ | ||
139 | /* */ | ||
140 | /* PORT.H */ | ||
141 | /* */ | ||
142 | /******************************************************************************/ | ||
143 | /* */ | ||
144 | /* This module contains macro definitions and types that are likely to */ | ||
145 | /* change between computers. */ | ||
146 | /* */ | ||
147 | /******************************************************************************/ | ||
148 | |||
149 | #ifndef DONE_PORT /* Only do this if not previously done. */ | ||
150 | |||
151 | #ifdef THINK_C | ||
152 | #define UBYTE unsigned char /* Unsigned byte */ | ||
153 | #define UWORD unsigned int /* Unsigned word (2 bytes) */ | ||
154 | #define ULONG unsigned long /* Unsigned word (4 bytes) */ | ||
155 | #define BOOL unsigned char /* Boolean */ | ||
156 | #define FOPEN_BINARY_READ "rb" /* Mode string for binary reading. */ | ||
157 | #define FOPEN_BINARY_WRITE "wb" /* Mode string for binary writing. */ | ||
158 | #define FOPEN_TEXT_APPEND "a" /* Mode string for text appending. */ | ||
159 | #define REAL double /* USed for floating point stuff. */ | ||
160 | #endif | ||
161 | #if defined(LINUX) || defined(linux) | ||
162 | #define UBYTE __u8 /* Unsigned byte */ | ||
163 | #define UWORD __u16 /* Unsigned word (2 bytes) */ | ||
164 | #define ULONG __u32 /* Unsigned word (4 bytes) */ | ||
165 | #define LONG __s32 /* Signed word (4 bytes) */ | ||
166 | #define BOOL is not used here /* Boolean */ | ||
167 | #define FOPEN_BINARY_READ not used /* Mode string for binary reading. */ | ||
168 | #define FOPEN_BINARY_WRITE not used /* Mode string for binary writing. */ | ||
169 | #define FOPEN_TEXT_APPEND not used /* Mode string for text appending. */ | ||
170 | #define REAL not used /* USed for floating point stuff. */ | ||
171 | #ifndef TRUE | ||
172 | #define TRUE 1 | ||
173 | #endif | ||
174 | #endif | ||
175 | |||
176 | #define DONE_PORT /* Don't do all this again. */ | ||
177 | #define MALLOC_FAIL NULL /* Failure status from malloc() */ | ||
178 | #define LOCAL static /* For non-exported routines. */ | ||
179 | #define EXPORT /* Signals exported function. */ | ||
180 | #define then /* Useful for aligning ifs. */ | ||
181 | |||
182 | #endif | ||
183 | |||
184 | /******************************************************************************/ | ||
185 | /* End of PORT.H */ | ||
186 | /******************************************************************************/ | ||
187 | |||
188 | #define COMPRESS_ACTION_IDENTITY 0 | ||
189 | #define COMPRESS_ACTION_COMPRESS 1 | ||
190 | #define COMPRESS_ACTION_DECOMPRESS 2 | ||
191 | |||
192 | #define COMPRESS_OVERRUN 1024 | ||
193 | #define COMPRESS_MAX_COM 0x70000000 | ||
194 | #define COMPRESS_MAX_ORG (COMPRESS_MAX_COM-COMPRESS_OVERRUN) | ||
195 | |||
196 | #define COMPRESS_MAX_STRLEN 255 | ||
197 | |||
198 | /* The following structure provides information about the algorithm. */ | ||
199 | /* > The top bit of id must be zero. The remaining bits must be chosen by */ | ||
200 | /* the author of the algorithm by tossing a coin 31 times. */ | ||
201 | /* > The amount of memory requested by the algorithm is specified in bytes */ | ||
202 | /* and must be in the range [0,0x70000000]. */ | ||
203 | /* > All strings s must be such that strlen(s)<=COMPRESS_MAX_STRLEN. */ | ||
204 | struct compress_identity | ||
205 | { | ||
206 | ULONG id; /* Identifying number of algorithm. */ | ||
207 | ULONG memory; /* Number of bytes of working memory required. */ | ||
208 | |||
209 | char *name; /* Name of algorithm. */ | ||
210 | char *version; /* Version number. */ | ||
211 | char *date; /* Date of release of this version. */ | ||
212 | char *copyright; /* Copyright message. */ | ||
213 | |||
214 | char *author; /* Author of algorithm. */ | ||
215 | char *affiliation; /* Affiliation of author. */ | ||
216 | char *vendor; /* Where the algorithm can be obtained. */ | ||
217 | }; | ||
218 | |||
219 | void lzrw3_compress( /* Single function interface to compression algorithm. */ | ||
220 | UWORD action, /* Action to be performed. */ | ||
221 | UBYTE *wrk_mem, /* Working memory temporarily given to routine to use. */ | ||
222 | UBYTE *src_adr, /* Address of input data. */ | ||
223 | LONG src_len, /* Length of input data. */ | ||
224 | UBYTE *dst_adr, /* Address of output data. */ | ||
225 | void *p_dst_len /* Pointer to a longword where routine will write: */ | ||
226 | /* If action=..IDENTITY => Adr of id structure. */ | ||
227 | /* If action=..COMPRESS => Length of output data. */ | ||
228 | /* If action=..DECOMPRESS => Length of output data. */ | ||
229 | ); | ||
230 | |||
231 | /******************************************************************************/ | ||
232 | /* End of COMPRESS.H */ | ||
233 | /******************************************************************************/ | ||
234 | |||
235 | |||
236 | /******************************************************************************/ | ||
237 | /* fast_copy.h */ | ||
238 | /******************************************************************************/ | ||
239 | |||
240 | /* This function copies a block of memory very quickly. */ | ||
241 | /* The exact speed depends on the relative alignment of the blocks of memory. */ | ||
242 | /* PRE : 0<=src_len<=(2^32)-1 . */ | ||
243 | /* PRE : Source and destination blocks must not overlap. */ | ||
244 | /* POST : MEM[dst_adr,dst_adr+src_len-1]=MEM[src_adr,src_adr+src_len-1]. */ | ||
245 | /* POST : MEM[dst_adr,dst_adr+src_len-1] is the only memory changed. */ | ||
246 | |||
247 | #define fast_copy(src,dst,len) memcpy(dst,src,len) | ||
248 | |||
249 | /******************************************************************************/ | ||
250 | /* End of fast_copy.h */ | ||
251 | /******************************************************************************/ | ||
252 | |||
253 | #endif | ||
diff --git a/drivers/char/ftape/compressor/zftape-compress.c b/drivers/char/ftape/compressor/zftape-compress.c deleted file mode 100644 index 65ffc0be3df9..000000000000 --- a/drivers/char/ftape/compressor/zftape-compress.c +++ /dev/null | |||
@@ -1,1203 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1994-1997 Claus-Justus Heine | ||
3 | |||
4 | This program is free software; you can redistribute it and/or | ||
5 | modify it under the terms of the GNU General Public License as | ||
6 | published by the Free Software Foundation; either version 2, or (at | ||
7 | your option) any later version. | ||
8 | |||
9 | This program is distributed in the hope that it will be useful, but | ||
10 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
12 | General Public License for more details. | ||
13 | |||
14 | You should have received a copy of the GNU General Public License | ||
15 | along with this program; see the file COPYING. If not, write to | ||
16 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, | ||
17 | USA. | ||
18 | |||
19 | * | ||
20 | * This file implements a "generic" interface between the * | ||
21 | * zftape-driver and a compression-algorithm. The * | ||
22 | * compression-algorithm currently used is a LZ77. I use the * | ||
23 | * implementation lzrw3 by Ross N. Williams (Renaissance * | ||
24 | * Software). The compression program itself is in the file | ||
25 | * lzrw3.c * and lzrw3.h. To adopt another compression algorithm | ||
26 | * the functions * zft_compress() and zft_uncompress() must be | ||
27 | * changed * appropriately. See below. | ||
28 | */ | ||
29 | |||
30 | #include <linux/errno.h> | ||
31 | #include <linux/mm.h> | ||
32 | #include <linux/module.h> | ||
33 | |||
34 | #include <linux/zftape.h> | ||
35 | |||
36 | #include <asm/uaccess.h> | ||
37 | |||
38 | #include "../zftape/zftape-init.h" | ||
39 | #include "../zftape/zftape-eof.h" | ||
40 | #include "../zftape/zftape-ctl.h" | ||
41 | #include "../zftape/zftape-write.h" | ||
42 | #include "../zftape/zftape-read.h" | ||
43 | #include "../zftape/zftape-rw.h" | ||
44 | #include "../compressor/zftape-compress.h" | ||
45 | #include "../zftape/zftape-vtbl.h" | ||
46 | #include "../compressor/lzrw3.h" | ||
47 | |||
48 | /* | ||
49 | * global variables | ||
50 | */ | ||
51 | |||
52 | /* I handle the allocation of this buffer as a special case, because | ||
53 | * it's size varies depending on the tape length inserted. | ||
54 | */ | ||
55 | |||
56 | /* local variables | ||
57 | */ | ||
58 | static void *zftc_wrk_mem = NULL; | ||
59 | static __u8 *zftc_buf = NULL; | ||
60 | static void *zftc_scratch_buf = NULL; | ||
61 | |||
62 | /* compression statistics | ||
63 | */ | ||
64 | static unsigned int zftc_wr_uncompressed = 0; | ||
65 | static unsigned int zftc_wr_compressed = 0; | ||
66 | static unsigned int zftc_rd_uncompressed = 0; | ||
67 | static unsigned int zftc_rd_compressed = 0; | ||
68 | |||
69 | /* forward */ | ||
70 | static int zftc_write(int *write_cnt, | ||
71 | __u8 *dst_buf, const int seg_sz, | ||
72 | const __u8 __user *src_buf, const int req_len, | ||
73 | const zft_position *pos, const zft_volinfo *volume); | ||
74 | static int zftc_read(int *read_cnt, | ||
75 | __u8 __user *dst_buf, const int to_do, | ||
76 | const __u8 *src_buf, const int seg_sz, | ||
77 | const zft_position *pos, const zft_volinfo *volume); | ||
78 | static int zftc_seek(unsigned int new_block_pos, | ||
79 | zft_position *pos, const zft_volinfo *volume, | ||
80 | __u8 *buffer); | ||
81 | static void zftc_lock (void); | ||
82 | static void zftc_reset (void); | ||
83 | static void zftc_cleanup(void); | ||
84 | static void zftc_stats (void); | ||
85 | |||
86 | /* compressed segment. This conforms to QIC-80-MC, Revision K. | ||
87 | * | ||
88 | * Rev. K applies to tapes with `fixed length format' which is | ||
89 | * indicated by format code 2,3 and 5. See below for format code 4 and 6 | ||
90 | * | ||
91 | * 2 bytes: offset of compression segment structure | ||
92 | * 29k > offset >= 29k-18: data from previous segment ens in this | ||
93 | * segment and no compressed block starts | ||
94 | * in this segment | ||
95 | * offset == 0: data from previous segment occupies entire | ||
96 | * segment and continues in next segment | ||
97 | * n bytes: remainder from previous segment | ||
98 | * | ||
99 | * Rev. K: | ||
100 | * 4 bytes: 4 bytes: files set byte offset | ||
101 | * Post Rev. K and QIC-3020/3020: | ||
102 | * 8 bytes: 8 bytes: files set byte offset | ||
103 | * 2 bytes: byte count N (amount of data following) | ||
104 | * bit 15 is set if data is compressed, bit 15 is not | ||
105 | * set if data is uncompressed | ||
106 | * N bytes: data (as much as specified in the byte count) | ||
107 | * 2 bytes: byte count N_1 of next cluster | ||
108 | * N_1 bytes: data of next cluset | ||
109 | * 2 bytes: byte count N_2 of next cluster | ||
110 | * N_2 bytes: ... | ||
111 | * | ||
112 | * Note that the `N' byte count accounts only for the bytes that in the | ||
113 | * current segment if the cluster spans to the next segment. | ||
114 | */ | ||
115 | |||
116 | typedef struct | ||
117 | { | ||
118 | int cmpr_pos; /* actual position in compression buffer */ | ||
119 | int cmpr_sz; /* what is left in the compression buffer | ||
120 | * when copying the compressed data to the | ||
121 | * deblock buffer | ||
122 | */ | ||
123 | unsigned int first_block; /* location of header information in | ||
124 | * this segment | ||
125 | */ | ||
126 | unsigned int count; /* amount of data of current block | ||
127 | * contained in current segment | ||
128 | */ | ||
129 | unsigned int offset; /* offset in current segment */ | ||
130 | unsigned int spans:1; /* might continue in next segment */ | ||
131 | unsigned int uncmpr; /* 0x8000 if this block contains | ||
132 | * uncompressed data | ||
133 | */ | ||
134 | __s64 foffs; /* file set byte offset, same as in | ||
135 | * compression map segment | ||
136 | */ | ||
137 | } cmpr_info; | ||
138 | |||
139 | static cmpr_info cseg; /* static data. Must be kept uptodate and shared by | ||
140 | * read, write and seek functions | ||
141 | */ | ||
142 | |||
143 | #define DUMP_CMPR_INFO(level, msg, info) \ | ||
144 | TRACE(level, msg "\n" \ | ||
145 | KERN_INFO "cmpr_pos : %d\n" \ | ||
146 | KERN_INFO "cmpr_sz : %d\n" \ | ||
147 | KERN_INFO "first_block: %d\n" \ | ||
148 | KERN_INFO "count : %d\n" \ | ||
149 | KERN_INFO "offset : %d\n" \ | ||
150 | KERN_INFO "spans : %d\n" \ | ||
151 | KERN_INFO "uncmpr : 0x%04x\n" \ | ||
152 | KERN_INFO "foffs : " LL_X, \ | ||
153 | (info)->cmpr_pos, (info)->cmpr_sz, (info)->first_block, \ | ||
154 | (info)->count, (info)->offset, (info)->spans == 1, \ | ||
155 | (info)->uncmpr, LL((info)->foffs)) | ||
156 | |||
157 | /* dispatch compression segment info, return error code | ||
158 | * | ||
159 | * afterwards, cseg->offset points to start of data of the NEXT | ||
160 | * compressed block, and cseg->count contains the amount of data | ||
161 | * left in the actual compressed block. cseg->spans is set to 1 if | ||
162 | * the block is continued in the following segment. Otherwise it is | ||
163 | * set to 0. | ||
164 | */ | ||
165 | static int get_cseg (cmpr_info *cinfo, const __u8 *buff, | ||
166 | const unsigned int seg_sz, | ||
167 | const zft_volinfo *volume) | ||
168 | { | ||
169 | TRACE_FUN(ft_t_flow); | ||
170 | |||
171 | cinfo->first_block = GET2(buff, 0); | ||
172 | if (cinfo->first_block == 0) { /* data spans to next segment */ | ||
173 | cinfo->count = seg_sz - sizeof(__u16); | ||
174 | cinfo->offset = seg_sz; | ||
175 | cinfo->spans = 1; | ||
176 | } else { /* cluster definetely ends in this segment */ | ||
177 | if (cinfo->first_block > seg_sz) { | ||
178 | /* data corrupted */ | ||
179 | TRACE_ABORT(-EIO, ft_t_err, "corrupted data:\n" | ||
180 | KERN_INFO "segment size: %d\n" | ||
181 | KERN_INFO "first block : %d", | ||
182 | seg_sz, cinfo->first_block); | ||
183 | } | ||
184 | cinfo->count = cinfo->first_block - sizeof(__u16); | ||
185 | cinfo->offset = cinfo->first_block; | ||
186 | cinfo->spans = 0; | ||
187 | } | ||
188 | /* now get the offset the first block should have in the | ||
189 | * uncompressed data stream. | ||
190 | * | ||
191 | * For this magic `18' refer to CRF-3 standard or QIC-80MC, | ||
192 | * Rev. K. | ||
193 | */ | ||
194 | if ((seg_sz - cinfo->offset) > 18) { | ||
195 | if (volume->qic113) { /* > revision K */ | ||
196 | TRACE(ft_t_data_flow, "New QIC-113 compliance"); | ||
197 | cinfo->foffs = GET8(buff, cinfo->offset); | ||
198 | cinfo->offset += sizeof(__s64); | ||
199 | } else { | ||
200 | TRACE(/* ft_t_data_flow */ ft_t_noise, "pre QIC-113 version"); | ||
201 | cinfo->foffs = (__s64)GET4(buff, cinfo->offset); | ||
202 | cinfo->offset += sizeof(__u32); | ||
203 | } | ||
204 | } | ||
205 | if (cinfo->foffs > volume->size) { | ||
206 | TRACE_ABORT(-EIO, ft_t_err, "Inconsistency:\n" | ||
207 | KERN_INFO "offset in current volume: %d\n" | ||
208 | KERN_INFO "size of current volume : %d", | ||
209 | (int)(cinfo->foffs>>10), (int)(volume->size>>10)); | ||
210 | } | ||
211 | if (cinfo->cmpr_pos + cinfo->count > volume->blk_sz) { | ||
212 | TRACE_ABORT(-EIO, ft_t_err, "Inconsistency:\n" | ||
213 | KERN_INFO "block size : %d\n" | ||
214 | KERN_INFO "data record: %d", | ||
215 | volume->blk_sz, cinfo->cmpr_pos + cinfo->count); | ||
216 | } | ||
217 | DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */, "", cinfo); | ||
218 | TRACE_EXIT 0; | ||
219 | } | ||
220 | |||
221 | /* This one is called, when a new cluster starts in same segment. | ||
222 | * | ||
223 | * Note: if this is the first cluster in the current segment, we must | ||
224 | * not check whether there are more than 18 bytes available because | ||
225 | * this have already been done in get_cseg() and there may be less | ||
226 | * than 18 bytes available due to header information. | ||
227 | * | ||
228 | */ | ||
229 | static void get_next_cluster(cmpr_info *cluster, const __u8 *buff, | ||
230 | const int seg_sz, const int finish) | ||
231 | { | ||
232 | TRACE_FUN(ft_t_flow); | ||
233 | |||
234 | if (seg_sz - cluster->offset > 18 || cluster->foffs != 0) { | ||
235 | cluster->count = GET2(buff, cluster->offset); | ||
236 | cluster->uncmpr = cluster->count & 0x8000; | ||
237 | cluster->count -= cluster->uncmpr; | ||
238 | cluster->offset += sizeof(__u16); | ||
239 | cluster->foffs = 0; | ||
240 | if ((cluster->offset + cluster->count) < seg_sz) { | ||
241 | cluster->spans = 0; | ||
242 | } else if (cluster->offset + cluster->count == seg_sz) { | ||
243 | cluster->spans = !finish; | ||
244 | } else { | ||
245 | /* either an error or a volume written by an | ||
246 | * old version. If this is a data error, then we'll | ||
247 | * catch it later. | ||
248 | */ | ||
249 | TRACE(ft_t_data_flow, "Either error or old volume"); | ||
250 | cluster->spans = 1; | ||
251 | cluster->count = seg_sz - cluster->offset; | ||
252 | } | ||
253 | } else { | ||
254 | cluster->count = 0; | ||
255 | cluster->spans = 0; | ||
256 | cluster->foffs = 0; | ||
257 | } | ||
258 | DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */ , "", cluster); | ||
259 | TRACE_EXIT; | ||
260 | } | ||
261 | |||
262 | static void zftc_lock(void) | ||
263 | { | ||
264 | } | ||
265 | |||
266 | /* this function is needed for zftape_reset_position in zftape-io.c | ||
267 | */ | ||
268 | static void zftc_reset(void) | ||
269 | { | ||
270 | TRACE_FUN(ft_t_flow); | ||
271 | |||
272 | memset((void *)&cseg, '\0', sizeof(cseg)); | ||
273 | zftc_stats(); | ||
274 | TRACE_EXIT; | ||
275 | } | ||
276 | |||
277 | static int cmpr_mem_initialized = 0; | ||
278 | static unsigned int alloc_blksz = 0; | ||
279 | |||
280 | static int zft_allocate_cmpr_mem(unsigned int blksz) | ||
281 | { | ||
282 | TRACE_FUN(ft_t_flow); | ||
283 | |||
284 | if (cmpr_mem_initialized && blksz == alloc_blksz) { | ||
285 | TRACE_EXIT 0; | ||
286 | } | ||
287 | TRACE_CATCH(zft_vmalloc_once(&zftc_wrk_mem, CMPR_WRK_MEM_SIZE), | ||
288 | zftc_cleanup()); | ||
289 | TRACE_CATCH(zft_vmalloc_always(&zftc_buf, blksz + CMPR_OVERRUN), | ||
290 | zftc_cleanup()); | ||
291 | alloc_blksz = blksz; | ||
292 | TRACE_CATCH(zft_vmalloc_always(&zftc_scratch_buf, blksz+CMPR_OVERRUN), | ||
293 | zftc_cleanup()); | ||
294 | cmpr_mem_initialized = 1; | ||
295 | TRACE_EXIT 0; | ||
296 | } | ||
297 | |||
298 | static void zftc_cleanup(void) | ||
299 | { | ||
300 | TRACE_FUN(ft_t_flow); | ||
301 | |||
302 | zft_vfree(&zftc_wrk_mem, CMPR_WRK_MEM_SIZE); | ||
303 | zft_vfree(&zftc_buf, alloc_blksz + CMPR_OVERRUN); | ||
304 | zft_vfree(&zftc_scratch_buf, alloc_blksz + CMPR_OVERRUN); | ||
305 | cmpr_mem_initialized = alloc_blksz = 0; | ||
306 | TRACE_EXIT; | ||
307 | } | ||
308 | |||
309 | /***************************************************************************** | ||
310 | * * | ||
311 | * The following two functions "ftape_compress()" and * | ||
312 | * "ftape_uncompress()" are the interface to the actual compression * | ||
313 | * algorithm (i.e. they are calling the "compress()" function from * | ||
314 | * the lzrw3 package for now). These routines could quite easily be * | ||
315 | * changed to adopt another compression algorithm instead of lzrw3, * | ||
316 | * which currently is used. * | ||
317 | * * | ||
318 | *****************************************************************************/ | ||
319 | |||
320 | /* called by zft_compress_write() to perform the compression. Must | ||
321 | * return the size of the compressed data. | ||
322 | * | ||
323 | * NOTE: The size of the compressed data should not exceed the size of | ||
324 | * the uncompressed data. Most compression algorithms have means | ||
325 | * to store data unchanged if the "compressed" data amount would | ||
326 | * exceed the original one. Mostly this is done by storing some | ||
327 | * flag-bytes in front of the compressed data to indicate if it | ||
328 | * is compressed or not. Thus the worst compression result | ||
329 | * length is the original length plus those flag-bytes. | ||
330 | * | ||
331 | * We don't want that, as the QIC-80 standard provides a means | ||
332 | * of marking uncompressed blocks by simply setting bit 15 of | ||
333 | * the compressed block's length. Thus a compessed block can | ||
334 | * have at most a length of 2^15-1 bytes. The QIC-80 standard | ||
335 | * restricts the block-length even further, allowing only 29k - | ||
336 | * 6 bytes. | ||
337 | * | ||
338 | * Currently, the maximum blocksize used by zftape is 28k. | ||
339 | * | ||
340 | * In short: don't exceed the length of the input-package, set | ||
341 | * bit 15 of the compressed size to 1 if you have copied data | ||
342 | * instead of compressing it. | ||
343 | */ | ||
344 | static int zft_compress(__u8 *in_buffer, unsigned int in_sz, __u8 *out_buffer) | ||
345 | { | ||
346 | __s32 compressed_sz; | ||
347 | TRACE_FUN(ft_t_flow); | ||
348 | |||
349 | |||
350 | lzrw3_compress(COMPRESS_ACTION_COMPRESS, zftc_wrk_mem, | ||
351 | in_buffer, in_sz, out_buffer, &compressed_sz); | ||
352 | if (TRACE_LEVEL >= ft_t_info) { | ||
353 | /* the compiler will optimize this away when | ||
354 | * compiled with NO_TRACE_AT_ALL option | ||
355 | */ | ||
356 | TRACE(ft_t_data_flow, "\n" | ||
357 | KERN_INFO "before compression: %d bytes\n" | ||
358 | KERN_INFO "after compresison : %d bytes", | ||
359 | in_sz, | ||
360 | (int)(compressed_sz < 0 | ||
361 | ? -compressed_sz : compressed_sz)); | ||
362 | /* for statistical purposes | ||
363 | */ | ||
364 | zftc_wr_compressed += (compressed_sz < 0 | ||
365 | ? -compressed_sz : compressed_sz); | ||
366 | zftc_wr_uncompressed += in_sz; | ||
367 | } | ||
368 | TRACE_EXIT (int)compressed_sz; | ||
369 | } | ||
370 | |||
371 | /* called by zft_compress_read() to decompress the data. Must | ||
372 | * return the size of the decompressed data for sanity checks | ||
373 | * (compared with zft_blk_sz) | ||
374 | * | ||
375 | * NOTE: Read the note for zft_compress() above! If bit 15 of the | ||
376 | * parameter in_sz is set, then the data in in_buffer isn't | ||
377 | * compressed, which must be handled by the un-compression | ||
378 | * algorithm. (I changed lzrw3 to handle this.) | ||
379 | * | ||
380 | * The parameter max_out_sz is needed to prevent buffer overruns when | ||
381 | * uncompressing corrupt data. | ||
382 | */ | ||
383 | static unsigned int zft_uncompress(__u8 *in_buffer, | ||
384 | int in_sz, | ||
385 | __u8 *out_buffer, | ||
386 | unsigned int max_out_sz) | ||
387 | { | ||
388 | TRACE_FUN(ft_t_flow); | ||
389 | |||
390 | lzrw3_compress(COMPRESS_ACTION_DECOMPRESS, zftc_wrk_mem, | ||
391 | in_buffer, (__s32)in_sz, | ||
392 | out_buffer, (__u32 *)&max_out_sz); | ||
393 | |||
394 | if (TRACE_LEVEL >= ft_t_info) { | ||
395 | TRACE(ft_t_data_flow, "\n" | ||
396 | KERN_INFO "before decompression: %d bytes\n" | ||
397 | KERN_INFO "after decompression : %d bytes", | ||
398 | in_sz < 0 ? -in_sz : in_sz,(int)max_out_sz); | ||
399 | /* for statistical purposes | ||
400 | */ | ||
401 | zftc_rd_compressed += in_sz < 0 ? -in_sz : in_sz; | ||
402 | zftc_rd_uncompressed += max_out_sz; | ||
403 | } | ||
404 | TRACE_EXIT (unsigned int)max_out_sz; | ||
405 | } | ||
406 | |||
407 | /* print some statistics about the efficiency of the compression to | ||
408 | * the kernel log | ||
409 | */ | ||
410 | static void zftc_stats(void) | ||
411 | { | ||
412 | TRACE_FUN(ft_t_flow); | ||
413 | |||
414 | if (TRACE_LEVEL < ft_t_info) { | ||
415 | TRACE_EXIT; | ||
416 | } | ||
417 | if (zftc_wr_uncompressed != 0) { | ||
418 | if (zftc_wr_compressed > (1<<14)) { | ||
419 | TRACE(ft_t_info, "compression statistics (writing):\n" | ||
420 | KERN_INFO " compr./uncmpr. : %3d %%", | ||
421 | (((zftc_wr_compressed>>10) * 100) | ||
422 | / (zftc_wr_uncompressed>>10))); | ||
423 | } else { | ||
424 | TRACE(ft_t_info, "compression statistics (writing):\n" | ||
425 | KERN_INFO " compr./uncmpr. : %3d %%", | ||
426 | ((zftc_wr_compressed * 100) | ||
427 | / zftc_wr_uncompressed)); | ||
428 | } | ||
429 | } | ||
430 | if (zftc_rd_uncompressed != 0) { | ||
431 | if (zftc_rd_compressed > (1<<14)) { | ||
432 | TRACE(ft_t_info, "compression statistics (reading):\n" | ||
433 | KERN_INFO " compr./uncmpr. : %3d %%", | ||
434 | (((zftc_rd_compressed>>10) * 100) | ||
435 | / (zftc_rd_uncompressed>>10))); | ||
436 | } else { | ||
437 | TRACE(ft_t_info, "compression statistics (reading):\n" | ||
438 | KERN_INFO " compr./uncmpr. : %3d %%", | ||
439 | ((zftc_rd_compressed * 100) | ||
440 | / zftc_rd_uncompressed)); | ||
441 | } | ||
442 | } | ||
443 | /* only print it once: */ | ||
444 | zftc_wr_uncompressed = | ||
445 | zftc_wr_compressed = | ||
446 | zftc_rd_uncompressed = | ||
447 | zftc_rd_compressed = 0; | ||
448 | TRACE_EXIT; | ||
449 | } | ||
450 | |||
451 | /* start new compressed block | ||
452 | */ | ||
453 | static int start_new_cseg(cmpr_info *cluster, | ||
454 | char *dst_buf, | ||
455 | const zft_position *pos, | ||
456 | const unsigned int blk_sz, | ||
457 | const char *src_buf, | ||
458 | const int this_segs_sz, | ||
459 | const int qic113) | ||
460 | { | ||
461 | int size_left; | ||
462 | int cp_cnt; | ||
463 | int buf_pos; | ||
464 | TRACE_FUN(ft_t_flow); | ||
465 | |||
466 | size_left = this_segs_sz - sizeof(__u16) - cluster->cmpr_sz; | ||
467 | TRACE(ft_t_data_flow,"\n" | ||
468 | KERN_INFO "segment size : %d\n" | ||
469 | KERN_INFO "compressed_sz: %d\n" | ||
470 | KERN_INFO "size_left : %d", | ||
471 | this_segs_sz, cluster->cmpr_sz, size_left); | ||
472 | if (size_left > 18) { /* start a new cluseter */ | ||
473 | cp_cnt = cluster->cmpr_sz; | ||
474 | cluster->cmpr_sz = 0; | ||
475 | buf_pos = cp_cnt + sizeof(__u16); | ||
476 | PUT2(dst_buf, 0, buf_pos); | ||
477 | |||
478 | if (qic113) { | ||
479 | __s64 foffs = pos->volume_pos; | ||
480 | if (cp_cnt) foffs += (__s64)blk_sz; | ||
481 | |||
482 | TRACE(ft_t_data_flow, "new style QIC-113 header"); | ||
483 | PUT8(dst_buf, buf_pos, foffs); | ||
484 | buf_pos += sizeof(__s64); | ||
485 | } else { | ||
486 | __u32 foffs = (__u32)pos->volume_pos; | ||
487 | if (cp_cnt) foffs += (__u32)blk_sz; | ||
488 | |||
489 | TRACE(ft_t_data_flow, "old style QIC-80MC header"); | ||
490 | PUT4(dst_buf, buf_pos, foffs); | ||
491 | buf_pos += sizeof(__u32); | ||
492 | } | ||
493 | } else if (size_left >= 0) { | ||
494 | cp_cnt = cluster->cmpr_sz; | ||
495 | cluster->cmpr_sz = 0; | ||
496 | buf_pos = cp_cnt + sizeof(__u16); | ||
497 | PUT2(dst_buf, 0, buf_pos); | ||
498 | /* zero unused part of segment. */ | ||
499 | memset(dst_buf + buf_pos, '\0', size_left); | ||
500 | buf_pos = this_segs_sz; | ||
501 | } else { /* need entire segment and more space */ | ||
502 | PUT2(dst_buf, 0, 0); | ||
503 | cp_cnt = this_segs_sz - sizeof(__u16); | ||
504 | cluster->cmpr_sz -= cp_cnt; | ||
505 | buf_pos = this_segs_sz; | ||
506 | } | ||
507 | memcpy(dst_buf + sizeof(__u16), src_buf + cluster->cmpr_pos, cp_cnt); | ||
508 | cluster->cmpr_pos += cp_cnt; | ||
509 | TRACE_EXIT buf_pos; | ||
510 | } | ||
511 | |||
512 | /* return-value: the number of bytes removed from the user-buffer | ||
513 | * `src_buf' or error code | ||
514 | * | ||
515 | * int *write_cnt : how much actually has been moved to the | ||
516 | * dst_buf. Need not be initialized when | ||
517 | * function returns with an error code | ||
518 | * (negativ return value) | ||
519 | * __u8 *dst_buf : kernel space buffer where the has to be | ||
520 | * copied to. The contents of this buffers | ||
521 | * goes to a specific segment. | ||
522 | * const int seg_sz : the size of the segment dst_buf will be | ||
523 | * copied to. | ||
524 | * const zft_position *pos : struct containing the coordinates in | ||
525 | * the current volume (byte position, | ||
526 | * segment id of current segment etc) | ||
527 | * const zft_volinfo *volume: information about the current volume, | ||
528 | * size etc. | ||
529 | * const __u8 *src_buf : user space buffer that contains the | ||
530 | * data the user wants to be written to | ||
531 | * tape. | ||
532 | * const int req_len : the amount of data the user wants to be | ||
533 | * written to tape. | ||
534 | */ | ||
535 | static int zftc_write(int *write_cnt, | ||
536 | __u8 *dst_buf, const int seg_sz, | ||
537 | const __u8 __user *src_buf, const int req_len, | ||
538 | const zft_position *pos, const zft_volinfo *volume) | ||
539 | { | ||
540 | int req_len_left = req_len; | ||
541 | int result; | ||
542 | int len_left; | ||
543 | int buf_pos_write = pos->seg_byte_pos; | ||
544 | TRACE_FUN(ft_t_flow); | ||
545 | |||
546 | /* Note: we do not unlock the module because | ||
547 | * there are some values cached in that `cseg' variable. We | ||
548 | * don't don't want to use this information when being | ||
549 | * unloaded by kerneld even when the tape is full or when we | ||
550 | * cannot allocate enough memory. | ||
551 | */ | ||
552 | if (pos->tape_pos > (volume->size-volume->blk_sz-ZFT_CMPR_OVERHEAD)) { | ||
553 | TRACE_EXIT -ENOSPC; | ||
554 | } | ||
555 | if (zft_allocate_cmpr_mem(volume->blk_sz) < 0) { | ||
556 | /* should we unlock the module? But it shouldn't | ||
557 | * be locked anyway ... | ||
558 | */ | ||
559 | TRACE_EXIT -ENOMEM; | ||
560 | } | ||
561 | if (buf_pos_write == 0) { /* fill a new segment */ | ||
562 | *write_cnt = buf_pos_write = start_new_cseg(&cseg, | ||
563 | dst_buf, | ||
564 | pos, | ||
565 | volume->blk_sz, | ||
566 | zftc_buf, | ||
567 | seg_sz, | ||
568 | volume->qic113); | ||
569 | if (cseg.cmpr_sz == 0 && cseg.cmpr_pos != 0) { | ||
570 | req_len_left -= result = volume->blk_sz; | ||
571 | cseg.cmpr_pos = 0; | ||
572 | } else { | ||
573 | result = 0; | ||
574 | } | ||
575 | } else { | ||
576 | *write_cnt = result = 0; | ||
577 | } | ||
578 | |||
579 | len_left = seg_sz - buf_pos_write; | ||
580 | while ((req_len_left > 0) && (len_left > 18)) { | ||
581 | /* now we have some size left for a new compressed | ||
582 | * block. We know, that the compression buffer is | ||
583 | * empty (else there wouldn't be any space left). | ||
584 | */ | ||
585 | if (copy_from_user(zftc_scratch_buf, src_buf + result, | ||
586 | volume->blk_sz) != 0) { | ||
587 | TRACE_EXIT -EFAULT; | ||
588 | } | ||
589 | req_len_left -= volume->blk_sz; | ||
590 | cseg.cmpr_sz = zft_compress(zftc_scratch_buf, volume->blk_sz, | ||
591 | zftc_buf); | ||
592 | if (cseg.cmpr_sz < 0) { | ||
593 | cseg.uncmpr = 0x8000; | ||
594 | cseg.cmpr_sz = -cseg.cmpr_sz; | ||
595 | } else { | ||
596 | cseg.uncmpr = 0; | ||
597 | } | ||
598 | /* increment "result" iff we copied the entire | ||
599 | * compressed block to the zft_deblock_buf | ||
600 | */ | ||
601 | len_left -= sizeof(__u16); | ||
602 | if (len_left >= cseg.cmpr_sz) { | ||
603 | len_left -= cseg.count = cseg.cmpr_sz; | ||
604 | cseg.cmpr_pos = cseg.cmpr_sz = 0; | ||
605 | result += volume->blk_sz; | ||
606 | } else { | ||
607 | cseg.cmpr_sz -= | ||
608 | cseg.cmpr_pos = | ||
609 | cseg.count = len_left; | ||
610 | len_left = 0; | ||
611 | } | ||
612 | PUT2(dst_buf, buf_pos_write, cseg.uncmpr | cseg.count); | ||
613 | buf_pos_write += sizeof(__u16); | ||
614 | memcpy(dst_buf + buf_pos_write, zftc_buf, cseg.count); | ||
615 | buf_pos_write += cseg.count; | ||
616 | *write_cnt += cseg.count + sizeof(__u16); | ||
617 | FT_SIGNAL_EXIT(_DONT_BLOCK); | ||
618 | } | ||
619 | /* erase the remainder of the segment if less than 18 bytes | ||
620 | * left (18 bytes is due to the QIC-80 standard) | ||
621 | */ | ||
622 | if (len_left <= 18) { | ||
623 | memset(dst_buf + buf_pos_write, '\0', len_left); | ||
624 | (*write_cnt) += len_left; | ||
625 | } | ||
626 | TRACE(ft_t_data_flow, "returning %d", result); | ||
627 | TRACE_EXIT result; | ||
628 | } | ||
629 | |||
630 | /* out: | ||
631 | * | ||
632 | * int *read_cnt: the number of bytes we removed from the zft_deblock_buf | ||
633 | * (result) | ||
634 | * int *to_do : the remaining size of the read-request. | ||
635 | * | ||
636 | * in: | ||
637 | * | ||
638 | * char *buff : buff is the address of the upper part of the user | ||
639 | * buffer, that hasn't been filled with data yet. | ||
640 | |||
641 | * int buf_pos_read : copy of from _ftape_read() | ||
642 | * int buf_len_read : copy of buf_len_rd from _ftape_read() | ||
643 | * char *zft_deblock_buf: zft_deblock_buf | ||
644 | * unsigned short blk_sz: the block size valid for this volume, may differ | ||
645 | * from zft_blk_sz. | ||
646 | * int finish: if != 0 means that this is the last segment belonging | ||
647 | * to this volume | ||
648 | * returns the amount of data actually copied to the user-buffer | ||
649 | * | ||
650 | * to_do MUST NOT SHRINK except to indicate an EOF. In this case *to_do has to | ||
651 | * be set to 0 | ||
652 | */ | ||
653 | static int zftc_read (int *read_cnt, | ||
654 | __u8 __user *dst_buf, const int to_do, | ||
655 | const __u8 *src_buf, const int seg_sz, | ||
656 | const zft_position *pos, const zft_volinfo *volume) | ||
657 | { | ||
658 | int uncompressed_sz; | ||
659 | int result = 0; | ||
660 | int remaining = to_do; | ||
661 | TRACE_FUN(ft_t_flow); | ||
662 | |||
663 | TRACE_CATCH(zft_allocate_cmpr_mem(volume->blk_sz),); | ||
664 | if (pos->seg_byte_pos == 0) { | ||
665 | /* new segment just read | ||
666 | */ | ||
667 | TRACE_CATCH(get_cseg(&cseg, src_buf, seg_sz, volume), | ||
668 | *read_cnt = 0); | ||
669 | memcpy(zftc_buf + cseg.cmpr_pos, src_buf + sizeof(__u16), | ||
670 | cseg.count); | ||
671 | cseg.cmpr_pos += cseg.count; | ||
672 | *read_cnt = cseg.offset; | ||
673 | DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */, "", &cseg); | ||
674 | } else { | ||
675 | *read_cnt = 0; | ||
676 | } | ||
677 | /* loop and uncompress until user buffer full or | ||
678 | * deblock-buffer empty | ||
679 | */ | ||
680 | TRACE(ft_t_data_flow, "compressed_sz: %d, compos : %d, *read_cnt: %d", | ||
681 | cseg.cmpr_sz, cseg.cmpr_pos, *read_cnt); | ||
682 | while ((cseg.spans == 0) && (remaining > 0)) { | ||
683 | if (cseg.cmpr_pos != 0) { /* cmpr buf is not empty */ | ||
684 | uncompressed_sz = | ||
685 | zft_uncompress(zftc_buf, | ||
686 | cseg.uncmpr == 0x8000 ? | ||
687 | -cseg.cmpr_pos : cseg.cmpr_pos, | ||
688 | zftc_scratch_buf, | ||
689 | volume->blk_sz); | ||
690 | if (uncompressed_sz != volume->blk_sz) { | ||
691 | *read_cnt = 0; | ||
692 | TRACE_ABORT(-EIO, ft_t_warn, | ||
693 | "Uncompressed blk (%d) != blk size (%d)", | ||
694 | uncompressed_sz, volume->blk_sz); | ||
695 | } | ||
696 | if (copy_to_user(dst_buf + result, | ||
697 | zftc_scratch_buf, | ||
698 | uncompressed_sz) != 0 ) { | ||
699 | TRACE_EXIT -EFAULT; | ||
700 | } | ||
701 | remaining -= uncompressed_sz; | ||
702 | result += uncompressed_sz; | ||
703 | cseg.cmpr_pos = 0; | ||
704 | } | ||
705 | if (remaining > 0) { | ||
706 | get_next_cluster(&cseg, src_buf, seg_sz, | ||
707 | volume->end_seg == pos->seg_pos); | ||
708 | if (cseg.count != 0) { | ||
709 | memcpy(zftc_buf, src_buf + cseg.offset, | ||
710 | cseg.count); | ||
711 | cseg.cmpr_pos = cseg.count; | ||
712 | cseg.offset += cseg.count; | ||
713 | *read_cnt += cseg.count + sizeof(__u16); | ||
714 | } else { | ||
715 | remaining = 0; | ||
716 | } | ||
717 | } | ||
718 | TRACE(ft_t_data_flow, "\n" | ||
719 | KERN_INFO "compressed_sz: %d\n" | ||
720 | KERN_INFO "compos : %d\n" | ||
721 | KERN_INFO "*read_cnt : %d", | ||
722 | cseg.cmpr_sz, cseg.cmpr_pos, *read_cnt); | ||
723 | } | ||
724 | if (seg_sz - cseg.offset <= 18) { | ||
725 | *read_cnt += seg_sz - cseg.offset; | ||
726 | TRACE(ft_t_data_flow, "expanding read cnt to: %d", *read_cnt); | ||
727 | } | ||
728 | TRACE(ft_t_data_flow, "\n" | ||
729 | KERN_INFO "segment size : %d\n" | ||
730 | KERN_INFO "read count : %d\n" | ||
731 | KERN_INFO "buf_pos_read : %d\n" | ||
732 | KERN_INFO "remaining : %d", | ||
733 | seg_sz, *read_cnt, pos->seg_byte_pos, | ||
734 | seg_sz - *read_cnt - pos->seg_byte_pos); | ||
735 | TRACE(ft_t_data_flow, "returning: %d", result); | ||
736 | TRACE_EXIT result; | ||
737 | } | ||
738 | |||
739 | /* seeks to the new data-position. Reads sometimes a segment. | ||
740 | * | ||
741 | * start_seg and end_seg give the boundaries of the current volume | ||
742 | * blk_sz is the blk_sz of the current volume as stored in the | ||
743 | * volume label | ||
744 | * | ||
745 | * We don't allow blocksizes less than 1024 bytes, therefore we don't need | ||
746 | * a 64 bit argument for new_block_pos. | ||
747 | */ | ||
748 | |||
749 | static int seek_in_segment(const unsigned int to_do, cmpr_info *c_info, | ||
750 | const char *src_buf, const int seg_sz, | ||
751 | const int seg_pos, const zft_volinfo *volume); | ||
752 | static int slow_seek_forward_until_error(const unsigned int distance, | ||
753 | cmpr_info *c_info, zft_position *pos, | ||
754 | const zft_volinfo *volume, __u8 *buf); | ||
755 | static int search_valid_segment(unsigned int segment, | ||
756 | const unsigned int end_seg, | ||
757 | const unsigned int max_foffs, | ||
758 | zft_position *pos, cmpr_info *c_info, | ||
759 | const zft_volinfo *volume, __u8 *buf); | ||
760 | static int slow_seek_forward(unsigned int dest, cmpr_info *c_info, | ||
761 | zft_position *pos, const zft_volinfo *volume, | ||
762 | __u8 *buf); | ||
763 | static int compute_seg_pos(unsigned int dest, zft_position *pos, | ||
764 | const zft_volinfo *volume); | ||
765 | |||
766 | #define ZFT_SLOW_SEEK_THRESHOLD 10 /* segments */ | ||
767 | #define ZFT_FAST_SEEK_MAX_TRIALS 10 /* times */ | ||
768 | #define ZFT_FAST_SEEK_BACKUP 10 /* segments */ | ||
769 | |||
770 | static int zftc_seek(unsigned int new_block_pos, | ||
771 | zft_position *pos, const zft_volinfo *volume, __u8 *buf) | ||
772 | { | ||
773 | unsigned int dest; | ||
774 | int limit; | ||
775 | int distance; | ||
776 | int result = 0; | ||
777 | int seg_dist; | ||
778 | int new_seg; | ||
779 | int old_seg = 0; | ||
780 | int fast_seek_trials = 0; | ||
781 | TRACE_FUN(ft_t_flow); | ||
782 | |||
783 | if (new_block_pos == 0) { | ||
784 | pos->seg_pos = volume->start_seg; | ||
785 | pos->seg_byte_pos = 0; | ||
786 | pos->volume_pos = 0; | ||
787 | zftc_reset(); | ||
788 | TRACE_EXIT 0; | ||
789 | } | ||
790 | dest = new_block_pos * (volume->blk_sz >> 10); | ||
791 | distance = dest - (pos->volume_pos >> 10); | ||
792 | while (distance != 0) { | ||
793 | seg_dist = compute_seg_pos(dest, pos, volume); | ||
794 | TRACE(ft_t_noise, "\n" | ||
795 | KERN_INFO "seg_dist: %d\n" | ||
796 | KERN_INFO "distance: %d\n" | ||
797 | KERN_INFO "dest : %d\n" | ||
798 | KERN_INFO "vpos : %d\n" | ||
799 | KERN_INFO "seg_pos : %d\n" | ||
800 | KERN_INFO "trials : %d", | ||
801 | seg_dist, distance, dest, | ||
802 | (unsigned int)(pos->volume_pos>>10), pos->seg_pos, | ||
803 | fast_seek_trials); | ||
804 | if (distance > 0) { | ||
805 | if (seg_dist < 0) { | ||
806 | TRACE(ft_t_bug, "BUG: distance %d > 0, " | ||
807 | "segment difference %d < 0", | ||
808 | distance, seg_dist); | ||
809 | result = -EIO; | ||
810 | break; | ||
811 | } | ||
812 | new_seg = pos->seg_pos + seg_dist; | ||
813 | if (new_seg > volume->end_seg) { | ||
814 | new_seg = volume->end_seg; | ||
815 | } | ||
816 | if (old_seg == new_seg || /* loop */ | ||
817 | seg_dist <= ZFT_SLOW_SEEK_THRESHOLD || | ||
818 | fast_seek_trials >= ZFT_FAST_SEEK_MAX_TRIALS) { | ||
819 | TRACE(ft_t_noise, "starting slow seek:\n" | ||
820 | KERN_INFO "fast seek failed too often: %s\n" | ||
821 | KERN_INFO "near target position : %s\n" | ||
822 | KERN_INFO "looping between two segs : %s", | ||
823 | (fast_seek_trials >= | ||
824 | ZFT_FAST_SEEK_MAX_TRIALS) | ||
825 | ? "yes" : "no", | ||
826 | (seg_dist <= ZFT_SLOW_SEEK_THRESHOLD) | ||
827 | ? "yes" : "no", | ||
828 | (old_seg == new_seg) | ||
829 | ? "yes" : "no"); | ||
830 | result = slow_seek_forward(dest, &cseg, | ||
831 | pos, volume, buf); | ||
832 | break; | ||
833 | } | ||
834 | old_seg = new_seg; | ||
835 | limit = volume->end_seg; | ||
836 | fast_seek_trials ++; | ||
837 | for (;;) { | ||
838 | result = search_valid_segment(new_seg, limit, | ||
839 | volume->size, | ||
840 | pos, &cseg, | ||
841 | volume, buf); | ||
842 | if (result == 0 || result == -EINTR) { | ||
843 | break; | ||
844 | } | ||
845 | if (new_seg == volume->start_seg) { | ||
846 | result = -EIO; /* set errror | ||
847 | * condition | ||
848 | */ | ||
849 | break; | ||
850 | } | ||
851 | limit = new_seg; | ||
852 | new_seg -= ZFT_FAST_SEEK_BACKUP; | ||
853 | if (new_seg < volume->start_seg) { | ||
854 | new_seg = volume->start_seg; | ||
855 | } | ||
856 | } | ||
857 | if (result < 0) { | ||
858 | TRACE(ft_t_warn, | ||
859 | "Couldn't find a readable segment"); | ||
860 | break; | ||
861 | } | ||
862 | } else /* if (distance < 0) */ { | ||
863 | if (seg_dist > 0) { | ||
864 | TRACE(ft_t_bug, "BUG: distance %d < 0, " | ||
865 | "segment difference %d >0", | ||
866 | distance, seg_dist); | ||
867 | result = -EIO; | ||
868 | break; | ||
869 | } | ||
870 | new_seg = pos->seg_pos + seg_dist; | ||
871 | if (fast_seek_trials > 0 && seg_dist == 0) { | ||
872 | /* this avoids sticking to the same | ||
873 | * segment all the time. On the other hand: | ||
874 | * if we got here for the first time, and the | ||
875 | * deblock_buffer still contains a valid | ||
876 | * segment, then there is no need to skip to | ||
877 | * the previous segment if the desired position | ||
878 | * is inside this segment. | ||
879 | */ | ||
880 | new_seg --; | ||
881 | } | ||
882 | if (new_seg < volume->start_seg) { | ||
883 | new_seg = volume->start_seg; | ||
884 | } | ||
885 | limit = pos->seg_pos; | ||
886 | fast_seek_trials ++; | ||
887 | for (;;) { | ||
888 | result = search_valid_segment(new_seg, limit, | ||
889 | pos->volume_pos, | ||
890 | pos, &cseg, | ||
891 | volume, buf); | ||
892 | if (result == 0 || result == -EINTR) { | ||
893 | break; | ||
894 | } | ||
895 | if (new_seg == volume->start_seg) { | ||
896 | result = -EIO; /* set errror | ||
897 | * condition | ||
898 | */ | ||
899 | break; | ||
900 | } | ||
901 | limit = new_seg; | ||
902 | new_seg -= ZFT_FAST_SEEK_BACKUP; | ||
903 | if (new_seg < volume->start_seg) { | ||
904 | new_seg = volume->start_seg; | ||
905 | } | ||
906 | } | ||
907 | if (result < 0) { | ||
908 | TRACE(ft_t_warn, | ||
909 | "Couldn't find a readable segment"); | ||
910 | break; | ||
911 | } | ||
912 | } | ||
913 | distance = dest - (pos->volume_pos >> 10); | ||
914 | } | ||
915 | TRACE_EXIT result; | ||
916 | } | ||
917 | |||
918 | |||
919 | /* advance inside the given segment at most to_do bytes. | ||
920 | * of kilobytes moved | ||
921 | */ | ||
922 | |||
923 | static int seek_in_segment(const unsigned int to_do, | ||
924 | cmpr_info *c_info, | ||
925 | const char *src_buf, | ||
926 | const int seg_sz, | ||
927 | const int seg_pos, | ||
928 | const zft_volinfo *volume) | ||
929 | { | ||
930 | int result = 0; | ||
931 | int blk_sz = volume->blk_sz >> 10; | ||
932 | int remaining = to_do; | ||
933 | TRACE_FUN(ft_t_flow); | ||
934 | |||
935 | if (c_info->offset == 0) { | ||
936 | /* new segment just read | ||
937 | */ | ||
938 | TRACE_CATCH(get_cseg(c_info, src_buf, seg_sz, volume),); | ||
939 | c_info->cmpr_pos += c_info->count; | ||
940 | DUMP_CMPR_INFO(ft_t_noise, "", c_info); | ||
941 | } | ||
942 | /* loop and uncompress until user buffer full or | ||
943 | * deblock-buffer empty | ||
944 | */ | ||
945 | TRACE(ft_t_noise, "compressed_sz: %d, compos : %d", | ||
946 | c_info->cmpr_sz, c_info->cmpr_pos); | ||
947 | while (c_info->spans == 0 && remaining > 0) { | ||
948 | if (c_info->cmpr_pos != 0) { /* cmpr buf is not empty */ | ||
949 | result += blk_sz; | ||
950 | remaining -= blk_sz; | ||
951 | c_info->cmpr_pos = 0; | ||
952 | } | ||
953 | if (remaining > 0) { | ||
954 | get_next_cluster(c_info, src_buf, seg_sz, | ||
955 | volume->end_seg == seg_pos); | ||
956 | if (c_info->count != 0) { | ||
957 | c_info->cmpr_pos = c_info->count; | ||
958 | c_info->offset += c_info->count; | ||
959 | } else { | ||
960 | break; | ||
961 | } | ||
962 | } | ||
963 | /* Allow escape from this loop on signal! | ||
964 | */ | ||
965 | FT_SIGNAL_EXIT(_DONT_BLOCK); | ||
966 | DUMP_CMPR_INFO(ft_t_noise, "", c_info); | ||
967 | TRACE(ft_t_noise, "to_do: %d", remaining); | ||
968 | } | ||
969 | if (seg_sz - c_info->offset <= 18) { | ||
970 | c_info->offset = seg_sz; | ||
971 | } | ||
972 | TRACE(ft_t_noise, "\n" | ||
973 | KERN_INFO "segment size : %d\n" | ||
974 | KERN_INFO "buf_pos_read : %d\n" | ||
975 | KERN_INFO "remaining : %d", | ||
976 | seg_sz, c_info->offset, | ||
977 | seg_sz - c_info->offset); | ||
978 | TRACE_EXIT result; | ||
979 | } | ||
980 | |||
981 | static int slow_seek_forward_until_error(const unsigned int distance, | ||
982 | cmpr_info *c_info, | ||
983 | zft_position *pos, | ||
984 | const zft_volinfo *volume, | ||
985 | __u8 *buf) | ||
986 | { | ||
987 | unsigned int remaining = distance; | ||
988 | int seg_sz; | ||
989 | int seg_pos; | ||
990 | int result; | ||
991 | TRACE_FUN(ft_t_flow); | ||
992 | |||
993 | seg_pos = pos->seg_pos; | ||
994 | do { | ||
995 | TRACE_CATCH(seg_sz = zft_fetch_segment(seg_pos, buf, | ||
996 | FT_RD_AHEAD),); | ||
997 | /* now we have the contents of the actual segment in | ||
998 | * the deblock buffer | ||
999 | */ | ||
1000 | TRACE_CATCH(result = seek_in_segment(remaining, c_info, buf, | ||
1001 | seg_sz, seg_pos,volume),); | ||
1002 | remaining -= result; | ||
1003 | pos->volume_pos += result<<10; | ||
1004 | pos->seg_pos = seg_pos; | ||
1005 | pos->seg_byte_pos = c_info->offset; | ||
1006 | seg_pos ++; | ||
1007 | if (seg_pos <= volume->end_seg && c_info->offset == seg_sz) { | ||
1008 | pos->seg_pos ++; | ||
1009 | pos->seg_byte_pos = 0; | ||
1010 | c_info->offset = 0; | ||
1011 | } | ||
1012 | /* Allow escape from this loop on signal! | ||
1013 | */ | ||
1014 | FT_SIGNAL_EXIT(_DONT_BLOCK); | ||
1015 | TRACE(ft_t_noise, "\n" | ||
1016 | KERN_INFO "remaining: %d\n" | ||
1017 | KERN_INFO "seg_pos: %d\n" | ||
1018 | KERN_INFO "end_seg: %d\n" | ||
1019 | KERN_INFO "result: %d", | ||
1020 | remaining, seg_pos, volume->end_seg, result); | ||
1021 | } while (remaining > 0 && seg_pos <= volume->end_seg); | ||
1022 | TRACE_EXIT 0; | ||
1023 | } | ||
1024 | |||
1025 | /* return segment id of next segment containing valid data, -EIO otherwise | ||
1026 | */ | ||
1027 | static int search_valid_segment(unsigned int segment, | ||
1028 | const unsigned int end_seg, | ||
1029 | const unsigned int max_foffs, | ||
1030 | zft_position *pos, | ||
1031 | cmpr_info *c_info, | ||
1032 | const zft_volinfo *volume, | ||
1033 | __u8 *buf) | ||
1034 | { | ||
1035 | cmpr_info tmp_info; | ||
1036 | int seg_sz; | ||
1037 | TRACE_FUN(ft_t_flow); | ||
1038 | |||
1039 | memset(&tmp_info, 0, sizeof(cmpr_info)); | ||
1040 | while (segment <= end_seg) { | ||
1041 | FT_SIGNAL_EXIT(_DONT_BLOCK); | ||
1042 | TRACE(ft_t_noise, | ||
1043 | "Searching readable segment between %d and %d", | ||
1044 | segment, end_seg); | ||
1045 | seg_sz = zft_fetch_segment(segment, buf, FT_RD_AHEAD); | ||
1046 | if ((seg_sz > 0) && | ||
1047 | (get_cseg (&tmp_info, buf, seg_sz, volume) >= 0) && | ||
1048 | (tmp_info.foffs != 0 || segment == volume->start_seg)) { | ||
1049 | if ((tmp_info.foffs>>10) > max_foffs) { | ||
1050 | TRACE_ABORT(-EIO, ft_t_noise, "\n" | ||
1051 | KERN_INFO "cseg.foff: %d\n" | ||
1052 | KERN_INFO "dest : %d", | ||
1053 | (int)(tmp_info.foffs >> 10), | ||
1054 | max_foffs); | ||
1055 | } | ||
1056 | DUMP_CMPR_INFO(ft_t_noise, "", &tmp_info); | ||
1057 | *c_info = tmp_info; | ||
1058 | pos->seg_pos = segment; | ||
1059 | pos->volume_pos = c_info->foffs; | ||
1060 | pos->seg_byte_pos = c_info->offset; | ||
1061 | TRACE(ft_t_noise, "found segment at %d", segment); | ||
1062 | TRACE_EXIT 0; | ||
1063 | } | ||
1064 | segment++; | ||
1065 | } | ||
1066 | TRACE_EXIT -EIO; | ||
1067 | } | ||
1068 | |||
1069 | static int slow_seek_forward(unsigned int dest, | ||
1070 | cmpr_info *c_info, | ||
1071 | zft_position *pos, | ||
1072 | const zft_volinfo *volume, | ||
1073 | __u8 *buf) | ||
1074 | { | ||
1075 | unsigned int distance; | ||
1076 | int result = 0; | ||
1077 | TRACE_FUN(ft_t_flow); | ||
1078 | |||
1079 | distance = dest - (pos->volume_pos >> 10); | ||
1080 | while ((distance > 0) && | ||
1081 | (result = slow_seek_forward_until_error(distance, | ||
1082 | c_info, | ||
1083 | pos, | ||
1084 | volume, | ||
1085 | buf)) < 0) { | ||
1086 | if (result == -EINTR) { | ||
1087 | break; | ||
1088 | } | ||
1089 | TRACE(ft_t_noise, "seg_pos: %d", pos->seg_pos); | ||
1090 | /* the failing segment is either pos->seg_pos or | ||
1091 | * pos->seg_pos + 1. There is no need to further try | ||
1092 | * that segment, because ftape_read_segment() already | ||
1093 | * has tried very much to read it. So we start with | ||
1094 | * following segment, which is pos->seg_pos + 1 | ||
1095 | */ | ||
1096 | if(search_valid_segment(pos->seg_pos+1, volume->end_seg, dest, | ||
1097 | pos, c_info, | ||
1098 | volume, buf) < 0) { | ||
1099 | TRACE(ft_t_noise, "search_valid_segment() failed"); | ||
1100 | result = -EIO; | ||
1101 | break; | ||
1102 | } | ||
1103 | distance = dest - (pos->volume_pos >> 10); | ||
1104 | result = 0; | ||
1105 | TRACE(ft_t_noise, "segment: %d", pos->seg_pos); | ||
1106 | /* found valid segment, retry the seek */ | ||
1107 | } | ||
1108 | TRACE_EXIT result; | ||
1109 | } | ||
1110 | |||
1111 | static int compute_seg_pos(const unsigned int dest, | ||
1112 | zft_position *pos, | ||
1113 | const zft_volinfo *volume) | ||
1114 | { | ||
1115 | int segment; | ||
1116 | int distance = dest - (pos->volume_pos >> 10); | ||
1117 | unsigned int raw_size; | ||
1118 | unsigned int virt_size; | ||
1119 | unsigned int factor; | ||
1120 | TRACE_FUN(ft_t_flow); | ||
1121 | |||
1122 | if (distance >= 0) { | ||
1123 | raw_size = volume->end_seg - pos->seg_pos + 1; | ||
1124 | virt_size = ((unsigned int)(volume->size>>10) | ||
1125 | - (unsigned int)(pos->volume_pos>>10) | ||
1126 | + FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS - 1); | ||
1127 | virt_size /= FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS; | ||
1128 | if (virt_size == 0 || raw_size == 0) { | ||
1129 | TRACE_EXIT 0; | ||
1130 | } | ||
1131 | if (raw_size >= (1<<25)) { | ||
1132 | factor = raw_size/(virt_size>>7); | ||
1133 | } else { | ||
1134 | factor = (raw_size<<7)/virt_size; | ||
1135 | } | ||
1136 | segment = distance/(FT_SECTORS_PER_SEGMENT-FT_ECC_SECTORS); | ||
1137 | segment = (segment * factor)>>7; | ||
1138 | } else { | ||
1139 | raw_size = pos->seg_pos - volume->start_seg + 1; | ||
1140 | virt_size = ((unsigned int)(pos->volume_pos>>10) | ||
1141 | + FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS - 1); | ||
1142 | virt_size /= FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS; | ||
1143 | if (virt_size == 0 || raw_size == 0) { | ||
1144 | TRACE_EXIT 0; | ||
1145 | } | ||
1146 | if (raw_size >= (1<<25)) { | ||
1147 | factor = raw_size/(virt_size>>7); | ||
1148 | } else { | ||
1149 | factor = (raw_size<<7)/virt_size; | ||
1150 | } | ||
1151 | segment = distance/(FT_SECTORS_PER_SEGMENT-FT_ECC_SECTORS); | ||
1152 | } | ||
1153 | TRACE(ft_t_noise, "factor: %d/%d", factor, 1<<7); | ||
1154 | TRACE_EXIT segment; | ||
1155 | } | ||
1156 | |||
1157 | static struct zft_cmpr_ops cmpr_ops = { | ||
1158 | zftc_write, | ||
1159 | zftc_read, | ||
1160 | zftc_seek, | ||
1161 | zftc_lock, | ||
1162 | zftc_reset, | ||
1163 | zftc_cleanup | ||
1164 | }; | ||
1165 | |||
1166 | int zft_compressor_init(void) | ||
1167 | { | ||
1168 | TRACE_FUN(ft_t_flow); | ||
1169 | |||
1170 | #ifdef MODULE | ||
1171 | printk(KERN_INFO "zftape compressor v1.00a 970514 for " FTAPE_VERSION "\n"); | ||
1172 | if (TRACE_LEVEL >= ft_t_info) { | ||
1173 | printk( | ||
1174 | KERN_INFO "(c) 1997 Claus-Justus Heine (claus@momo.math.rwth-aachen.de)\n" | ||
1175 | KERN_INFO "Compressor for zftape (lzrw3 algorithm)\n"); | ||
1176 | } | ||
1177 | #else /* !MODULE */ | ||
1178 | /* print a short no-nonsense boot message */ | ||
1179 | printk(KERN_INFO "zftape compressor v1.00a 970514\n"); | ||
1180 | printk(KERN_INFO "For use with " FTAPE_VERSION "\n"); | ||
1181 | #endif /* MODULE */ | ||
1182 | TRACE(ft_t_info, "zft_compressor_init @ 0x%p", zft_compressor_init); | ||
1183 | TRACE(ft_t_info, "installing compressor for zftape ..."); | ||
1184 | TRACE_CATCH(zft_cmpr_register(&cmpr_ops),); | ||
1185 | TRACE_EXIT 0; | ||
1186 | } | ||
1187 | |||
1188 | #ifdef MODULE | ||
1189 | |||
1190 | MODULE_AUTHOR( | ||
1191 | "(c) 1996, 1997 Claus-Justus Heine (claus@momo.math.rwth-aachen.de"); | ||
1192 | MODULE_DESCRIPTION( | ||
1193 | "Compression routines for zftape. Uses the lzrw3 algorithm by Ross Williams"); | ||
1194 | MODULE_LICENSE("GPL"); | ||
1195 | |||
1196 | /* Called by modules package when installing the driver | ||
1197 | */ | ||
1198 | int init_module(void) | ||
1199 | { | ||
1200 | return zft_compressor_init(); | ||
1201 | } | ||
1202 | |||
1203 | #endif /* MODULE */ | ||
diff --git a/drivers/char/ftape/compressor/zftape-compress.h b/drivers/char/ftape/compressor/zftape-compress.h deleted file mode 100644 index f200741e33bf..000000000000 --- a/drivers/char/ftape/compressor/zftape-compress.h +++ /dev/null | |||
@@ -1,83 +0,0 @@ | |||
1 | #ifndef _ZFTAPE_COMPRESS_H | ||
2 | #define _ZFTAPE_COMPRESS_H | ||
3 | /* | ||
4 | * Copyright (c) 1994-1997 Claus-Justus Heine | ||
5 | |||
6 | This program is free software; you can redistribute it and/or | ||
7 | modify it under the terms of the GNU General Public License as | ||
8 | published by the Free Software Foundation; either version 2, or (at | ||
9 | your option) any later version. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | You should have received a copy of the GNU General Public License | ||
17 | along with this program; see the file COPYING. If not, write to | ||
18 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, | ||
19 | USA. | ||
20 | |||
21 | * | ||
22 | * $Source: /homes/cvs/ftape-stacked/ftape/compressor/zftape-compress.h,v $ | ||
23 | * $Revision: 1.1 $ | ||
24 | * $Date: 1997/10/05 19:12:32 $ | ||
25 | * | ||
26 | * This file contains macros and definitions for zftape's | ||
27 | * builtin compression code. | ||
28 | * | ||
29 | */ | ||
30 | |||
31 | #include "../zftape/zftape-buffers.h" | ||
32 | #include "../zftape/zftape-vtbl.h" | ||
33 | #include "../compressor/lzrw3.h" | ||
34 | |||
35 | /* CMPR_WRK_MEM_SIZE gives the size of the compression wrk_mem */ | ||
36 | /* I got these out of lzrw3.c */ | ||
37 | #define U(X) ((__u32) X) | ||
38 | #define SIZE_P_BYTE (U(sizeof(__u8 *))) | ||
39 | #define ALIGNMENT_FUDGE (U(16)) | ||
40 | |||
41 | #define CMPR_WRK_MEM_SIZE (U(4096)*(SIZE_P_BYTE) + ALIGNMENT_FUDGE) | ||
42 | |||
43 | /* the maximum number of bytes the size of the "compressed" data can | ||
44 | * exceed the uncompressed data. As it is quite useless to compress | ||
45 | * data twice it is sometimes the case that it is more efficient to | ||
46 | * copy a block of data but to feed it to the "compression" | ||
47 | * algorithm. In this case there are some flag bytes or the like | ||
48 | * proceding the "compressed" data. THAT MUST NOT BE THE CASE for the | ||
49 | * algorithm we use for this driver. Instead, the high bit 15 of | ||
50 | * compressed_size: | ||
51 | * | ||
52 | * compressed_size = ftape_compress() | ||
53 | * | ||
54 | * must be set in such a case. | ||
55 | * | ||
56 | * Nevertheless, it might also be as for lzrw3 that there is an | ||
57 | * "intermediate" overrun that exceeds the amount of the compressed | ||
58 | * data that is actually produced. During the algorithm we need in the | ||
59 | * worst case MAX_CMP_GROUP bytes more than the input-size. | ||
60 | */ | ||
61 | #define MAX_CMP_GROUP (2+16*2) /* from lzrw3.c */ | ||
62 | |||
63 | #define CMPR_OVERRUN MAX_CMP_GROUP /* during compression */ | ||
64 | |||
65 | /****************************************************/ | ||
66 | |||
67 | #define CMPR_BUFFER_SIZE (MAX_BLOCK_SIZE + CMPR_OVERRUN) | ||
68 | |||
69 | /* the compression map stores the byte offset compressed blocks within | ||
70 | * the current volume for catridges with format code 2,3 and 5 | ||
71 | * (and old versions of zftape) and the offset measured in kilobytes for | ||
72 | * format code 4 and 6. This gives us a possible max. size of a | ||
73 | * compressed volume of 1024*4GIG which should be enough. | ||
74 | */ | ||
75 | typedef __u32 CmprMap; | ||
76 | |||
77 | /* globals | ||
78 | */ | ||
79 | |||
80 | /* exported functions | ||
81 | */ | ||
82 | |||
83 | #endif /* _ZFTAPE_COMPRESS_H */ | ||