OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [char/] [ftape/] [compressor/] [zftape-compress.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *      Copyright (C) 1994-1997 Claus-Justus Heine
3
 
4
 This program is free software; you can redistribute it and/or
5
 modify it under the terms of the GNU General Public License as
6
 published by the Free Software Foundation; either version 2, or (at
7
 your option) any later version.
8
 
9
 This program is distributed in the hope that it will be useful, but
10
 WITHOUT ANY WARRANTY; without even the implied warranty of
11
 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12
 General Public License for more details.
13
 
14
 You should have received a copy of the GNU General Public License
15
 along with this program; see the file COPYING.  If not, write to
16
 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
17
 USA.
18
 
19
 *
20
 *     This file implements a "generic" interface between the *
21
 *     zftape-driver and a compression-algorithm. The *
22
 *     compression-algorithm currently used is a LZ77. I use the *
23
 *     implementation lzrw3 by Ross N. Williams (Renaissance *
24
 *     Software). The compression program itself is in the file
25
 *     lzrw3.c * and lzrw3.h.  To adopt another compression algorithm
26
 *     the functions * zft_compress() and zft_uncompress() must be
27
 *     changed * appropriately. See below.
28
 */
29
 
30
 char zftc_src[] ="$Source: /home/marcus/revision_ctrl_test/oc_cvs/cvs/or1k/linux/linux-2.4/drivers/char/ftape/compressor/zftape-compress.c,v $";
31
 char zftc_rev[] = "$Revision: 1.1.1.1 $";
32
 char zftc_dat[] = "$Date: 2004-04-15 02:02:26 $";
33
 
34
#include <linux/errno.h>
35
#include <linux/mm.h>
36
#include <linux/module.h>
37
 
38
#include <linux/zftape.h>
39
 
40
#if LINUX_VERSION_CODE >= KERNEL_VER(2,1,6)
41
#include <asm/uaccess.h>
42
#else
43
#include <asm/segment.h>
44
#endif
45
 
46
#include "../zftape/zftape-init.h"
47
#include "../zftape/zftape-eof.h"
48
#include "../zftape/zftape-ctl.h"
49
#include "../zftape/zftape-write.h"
50
#include "../zftape/zftape-read.h"
51
#include "../zftape/zftape-rw.h"
52
#include "../compressor/zftape-compress.h"
53
#include "../zftape/zftape-vtbl.h"
54
#include "../compressor/lzrw3.h"
55
 
56
/*
57
 *   global variables
58
 */
59
 
60
/* I handle the allocation of this buffer as a special case, because
61
 * it's size varies depending on the tape length inserted.
62
 */
63
 
64
/* local variables
65
 */
66
static int keep_module_locked = 1;
67
 
68
static void *zftc_wrk_mem = NULL;
69
static __u8 *zftc_buf     = NULL;
70
static void *zftc_scratch_buf  = NULL;
71
 
72
/* compression statistics
73
 */
74
static unsigned int zftc_wr_uncompressed = 0;
75
static unsigned int zftc_wr_compressed   = 0;
76
static unsigned int zftc_rd_uncompressed = 0;
77
static unsigned int zftc_rd_compressed   = 0;
78
 
79
/* forward */
80
static int  zftc_write(int *write_cnt,
81
                       __u8 *dst_buf, const int seg_sz,
82
                       const __u8 *src_buf, const int req_len,
83
                       const zft_position *pos, const zft_volinfo *volume);
84
static int  zftc_read(int *read_cnt,
85
                      __u8  *dst_buf, const int to_do,
86
                      const __u8 *src_buf, const int seg_sz,
87
                      const zft_position *pos, const zft_volinfo *volume);
88
static int  zftc_seek(unsigned int new_block_pos,
89
                      zft_position *pos, const zft_volinfo *volume,
90
                      __u8 *buffer);
91
static void zftc_lock   (void);
92
static void zftc_reset  (void);
93
static void zftc_cleanup(void);
94
static void zftc_stats      (void);
95
 
96
/* compressed segment. This conforms to QIC-80-MC, Revision K.
97
 *
98
 * Rev. K applies to tapes with `fixed length format' which is
99
 * indicated by format code 2,3 and 5. See below for format code 4 and 6
100
 *
101
 * 2 bytes: offset of compression segment structure
102
 *          29k > offset >= 29k-18: data from previous segment ens in this
103
 *                                  segment and no compressed block starts
104
 *                                  in this segment
105
 *                     offset == 0: data from previous segment occupies entire
106
 *                                  segment and continues in next segment
107
 * n bytes: remainder from previous segment
108
 *
109
 * Rev. K:
110
 * 4 bytes: 4 bytes: files set byte offset
111
 * Post Rev. K and QIC-3020/3020:
112
 * 8 bytes: 8 bytes: files set byte offset
113
 * 2 bytes: byte count N (amount of data following)
114
 *          bit 15 is set if data is compressed, bit 15 is not
115
 *          set if data is uncompressed
116
 * N bytes: data (as much as specified in the byte count)
117
 * 2 bytes: byte count N_1 of next cluster
118
 * N_1 bytes: data of next cluset
119
 * 2 bytes: byte count N_2 of next cluster
120
 * N_2 bytes: ...
121
 *
122
 * Note that the `N' byte count accounts only for the bytes that in the
123
 * current segment if the cluster spans to the next segment.
124
 */
125
 
126
typedef struct
127
{
128
        int cmpr_pos;             /* actual position in compression buffer */
129
        int cmpr_sz;              /* what is left in the compression buffer
130
                                   * when copying the compressed data to the
131
                                   * deblock buffer
132
                                   */
133
        unsigned int first_block; /* location of header information in
134
                                   * this segment
135
                                   */
136
        unsigned int count;       /* amount of data of current block
137
                                   * contained in current segment
138
                                   */
139
        unsigned int offset;      /* offset in current segment */
140
        unsigned int spans:1;     /* might continue in next segment */
141
        unsigned int uncmpr;      /* 0x8000 if this block contains
142
                                   * uncompressed data
143
                                   */
144
        __s64 foffs;              /* file set byte offset, same as in
145
                                   * compression map segment
146
                                   */
147
} cmpr_info;
148
 
149
static cmpr_info cseg; /* static data. Must be kept uptodate and shared by
150
                        * read, write and seek functions
151
                        */
152
 
153
#define DUMP_CMPR_INFO(level, msg, info)                                \
154
        TRACE(level, msg "\n"                                           \
155
              KERN_INFO "cmpr_pos   : %d\n"                             \
156
              KERN_INFO "cmpr_sz    : %d\n"                             \
157
              KERN_INFO "first_block: %d\n"                             \
158
              KERN_INFO "count      : %d\n"                             \
159
              KERN_INFO "offset     : %d\n"                             \
160
              KERN_INFO "spans      : %d\n"                             \
161
              KERN_INFO "uncmpr     : 0x%04x\n"                         \
162
              KERN_INFO "foffs      : " LL_X,                           \
163
              (info)->cmpr_pos, (info)->cmpr_sz, (info)->first_block,   \
164
              (info)->count, (info)->offset, (info)->spans == 1,        \
165
              (info)->uncmpr, LL((info)->foffs))
166
 
167
/*   dispatch compression segment info, return error code
168
 *
169
 *   afterwards, cseg->offset points to start of data of the NEXT
170
 *   compressed block, and cseg->count contains the amount of data
171
 *   left in the actual compressed block. cseg->spans is set to 1 if
172
 *   the block is continued in the following segment. Otherwise it is
173
 *   set to 0.
174
 */
175
static int get_cseg (cmpr_info *cinfo, const __u8 *buff,
176
                     const unsigned int seg_sz,
177
                     const zft_volinfo *volume)
178
{
179
        TRACE_FUN(ft_t_flow);
180
 
181
        cinfo->first_block = GET2(buff, 0);
182
        if (cinfo->first_block == 0) { /* data spans to next segment */
183
                cinfo->count  = seg_sz - sizeof(__u16);
184
                cinfo->offset = seg_sz;
185
                cinfo->spans = 1;
186
        } else { /* cluster definetely ends in this segment */
187
                if (cinfo->first_block > seg_sz) {
188
                        /* data corrupted */
189
                        TRACE_ABORT(-EIO, ft_t_err, "corrupted data:\n"
190
                                    KERN_INFO "segment size: %d\n"
191
                                    KERN_INFO "first block : %d",
192
                                    seg_sz, cinfo->first_block);
193
                }
194
                cinfo->count  = cinfo->first_block - sizeof(__u16);
195
                cinfo->offset = cinfo->first_block;
196
                cinfo->spans = 0;
197
        }
198
        /* now get the offset the first block should have in the
199
         * uncompressed data stream.
200
         *
201
         * For this magic `18' refer to CRF-3 standard or QIC-80MC,
202
         * Rev. K.
203
         */
204
        if ((seg_sz - cinfo->offset) > 18) {
205
                if (volume->qic113) { /* > revision K */
206
                        TRACE(ft_t_data_flow, "New QIC-113 compliance");
207
                        cinfo->foffs = GET8(buff, cinfo->offset);
208
                        cinfo->offset += sizeof(__s64);
209
                } else {
210
                        TRACE(/* ft_t_data_flow */ ft_t_noise, "pre QIC-113 version");
211
                        cinfo->foffs   = (__s64)GET4(buff, cinfo->offset);
212
                        cinfo->offset += sizeof(__u32);
213
                }
214
        }
215
        if (cinfo->foffs > volume->size) {
216
                TRACE_ABORT(-EIO, ft_t_err, "Inconsistency:\n"
217
                            KERN_INFO "offset in current volume: %d\n"
218
                            KERN_INFO "size of current volume  : %d",
219
                            (int)(cinfo->foffs>>10), (int)(volume->size>>10));
220
        }
221
        if (cinfo->cmpr_pos + cinfo->count > volume->blk_sz) {
222
                TRACE_ABORT(-EIO, ft_t_err, "Inconsistency:\n"
223
                            KERN_INFO "block size : %d\n"
224
                            KERN_INFO "data record: %d",
225
                            volume->blk_sz, cinfo->cmpr_pos + cinfo->count);
226
        }
227
        DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */, "", cinfo);
228
        TRACE_EXIT 0;
229
}
230
 
231
/*  This one is called, when a new cluster starts in same segment.
232
 *
233
 *  Note: if this is the first cluster in the current segment, we must
234
 *  not check whether there are more than 18 bytes available because
235
 *  this have already been done in get_cseg() and there may be less
236
 *  than 18 bytes available due to header information.
237
 *
238
 */
239
static void get_next_cluster(cmpr_info *cluster, const __u8 *buff,
240
                             const int seg_sz, const int finish)
241
{
242
        TRACE_FUN(ft_t_flow);
243
 
244
        if (seg_sz - cluster->offset > 18 || cluster->foffs != 0) {
245
                cluster->count   = GET2(buff, cluster->offset);
246
                cluster->uncmpr  = cluster->count & 0x8000;
247
                cluster->count  -= cluster->uncmpr;
248
                cluster->offset += sizeof(__u16);
249
                cluster->foffs   = 0;
250
                if ((cluster->offset + cluster->count) < seg_sz) {
251
                        cluster->spans = 0;
252
                } else if (cluster->offset + cluster->count == seg_sz) {
253
                        cluster->spans = !finish;
254
                } else {
255
                        /* either an error or a volume written by an
256
                         * old version. If this is a data error, then we'll
257
                         * catch it later.
258
                         */
259
                        TRACE(ft_t_data_flow, "Either error or old volume");
260
                        cluster->spans = 1;
261
                        cluster->count = seg_sz - cluster->offset;
262
                }
263
        } else {
264
                cluster->count = 0;
265
                cluster->spans = 0;
266
                cluster->foffs = 0;
267
        }
268
        DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */ , "", cluster);
269
        TRACE_EXIT;
270
}
271
 
272
static void zftc_lock(void)
273
{
274
#if LINUX_VERSION_CODE < KERNEL_VER(2,1,18)
275
        if (!MOD_IN_USE) {
276
                MOD_INC_USE_COUNT;
277
        }
278
#else
279
        MOD_INC_USE_COUNT; /*  sets MOD_VISITED and MOD_USED_ONCE,
280
                            *  locking is done with can_unload()
281
                            */
282
#endif
283
        keep_module_locked = 1;
284
}
285
 
286
/*  this function is needed for zftape_reset_position in zftape-io.c
287
 */
288
static void zftc_reset(void)
289
{
290
        TRACE_FUN(ft_t_flow);
291
 
292
        memset((void *)&cseg, '\0', sizeof(cseg));
293
        zftc_stats();
294
#if LINUX_VERSION_CODE < KERNEL_VER(2,1,18)
295
        if (MOD_IN_USE) {
296
                MOD_DEC_USE_COUNT;
297
        }
298
#endif
299
        keep_module_locked = 0;
300
        TRACE_EXIT;
301
}
302
 
303
static int cmpr_mem_initialized = 0;
304
static unsigned int alloc_blksz = 0;
305
 
306
static int zft_allocate_cmpr_mem(unsigned int blksz)
307
{
308
        TRACE_FUN(ft_t_flow);
309
 
310
        if (cmpr_mem_initialized && blksz == alloc_blksz) {
311
                TRACE_EXIT 0;
312
        }
313
        TRACE_CATCH(zft_vmalloc_once(&zftc_wrk_mem, CMPR_WRK_MEM_SIZE),
314
                    zftc_cleanup());
315
        TRACE_CATCH(zft_vmalloc_always(&zftc_buf, blksz + CMPR_OVERRUN),
316
                    zftc_cleanup());
317
        alloc_blksz = blksz;
318
        TRACE_CATCH(zft_vmalloc_always(&zftc_scratch_buf, blksz+CMPR_OVERRUN),
319
                    zftc_cleanup());
320
        cmpr_mem_initialized = 1;
321
        TRACE_EXIT 0;
322
}
323
 
324
static void zftc_cleanup(void)
325
{
326
        TRACE_FUN(ft_t_flow);
327
 
328
        zft_vfree(&zftc_wrk_mem, CMPR_WRK_MEM_SIZE);
329
        zft_vfree(&zftc_buf, alloc_blksz + CMPR_OVERRUN);
330
        zft_vfree(&zftc_scratch_buf, alloc_blksz + CMPR_OVERRUN);
331
        cmpr_mem_initialized = alloc_blksz = 0;
332
        TRACE_EXIT;
333
}
334
 
335
/*****************************************************************************
336
 *                                                                           *
337
 *  The following two functions "ftape_compress()" and                       *
338
 *  "ftape_uncompress()" are the interface to the actual compression         *
339
 *  algorithm (i.e. they are calling the "compress()" function from          *
340
 *  the lzrw3 package for now). These routines could quite easily be         *
341
 *  changed to adopt another compression algorithm instead of lzrw3,         *
342
 *  which currently is used.                                                 *
343
 *                                                                           *
344
 *****************************************************************************/
345
 
346
/* called by zft_compress_write() to perform the compression. Must
347
 * return the size of the compressed data.
348
 *
349
 * NOTE: The size of the compressed data should not exceed the size of
350
 *       the uncompressed data. Most compression algorithms have means
351
 *       to store data unchanged if the "compressed" data amount would
352
 *       exceed the original one. Mostly this is done by storing some
353
 *       flag-bytes in front of the compressed data to indicate if it
354
 *       is compressed or not. Thus the worst compression result
355
 *       length is the original length plus those flag-bytes.
356
 *
357
 *       We don't want that, as the QIC-80 standard provides a means
358
 *       of marking uncompressed blocks by simply setting bit 15 of
359
 *       the compressed block's length. Thus a compessed block can
360
 *       have at most a length of 2^15-1 bytes. The QIC-80 standard
361
 *       restricts the block-length even further, allowing only 29k -
362
 *       6 bytes.
363
 *
364
 *       Currently, the maximum blocksize used by zftape is 28k.
365
 *
366
 *       In short: don't exceed the length of the input-package, set
367
 *       bit 15 of the compressed size to 1 if you have copied data
368
 *       instead of compressing it.
369
 */
370
static int zft_compress(__u8 *in_buffer, unsigned int in_sz, __u8 *out_buffer)
371
{
372
        __s32 compressed_sz;
373
        TRACE_FUN(ft_t_flow);
374
 
375
 
376
        lzrw3_compress(COMPRESS_ACTION_COMPRESS, zftc_wrk_mem,
377
                       in_buffer, in_sz, out_buffer, &compressed_sz);
378
        if (TRACE_LEVEL >= ft_t_info) {
379
                /*  the compiler will optimize this away when
380
                 *  compiled with NO_TRACE_AT_ALL option
381
                 */
382
                TRACE(ft_t_data_flow, "\n"
383
                      KERN_INFO "before compression: %d bytes\n"
384
                      KERN_INFO "after compresison : %d bytes",
385
                      in_sz,
386
                      (int)(compressed_sz < 0
387
                      ? -compressed_sz : compressed_sz));
388
                /*  for statistical purposes
389
                 */
390
                zftc_wr_compressed   += (compressed_sz < 0
391
                                           ? -compressed_sz : compressed_sz);
392
                zftc_wr_uncompressed += in_sz;
393
        }
394
        TRACE_EXIT (int)compressed_sz;
395
}
396
 
397
/* called by zft_compress_read() to decompress the data. Must
398
 * return the size of the decompressed data for sanity checks
399
 * (compared with zft_blk_sz)
400
 *
401
 * NOTE: Read the note for zft_compress() above!  If bit 15 of the
402
 *       parameter in_sz is set, then the data in in_buffer isn't
403
 *       compressed, which must be handled by the un-compression
404
 *       algorithm. (I changed lzrw3 to handle this.)
405
 *
406
 *  The parameter max_out_sz is needed to prevent buffer overruns when
407
 *  uncompressing corrupt data.
408
 */
409
static unsigned int zft_uncompress(__u8 *in_buffer,
410
                                   int in_sz,
411
                                   __u8 *out_buffer,
412
                                   unsigned int max_out_sz)
413
{
414
        TRACE_FUN(ft_t_flow);
415
 
416
        lzrw3_compress(COMPRESS_ACTION_DECOMPRESS, zftc_wrk_mem,
417
                       in_buffer, (__s32)in_sz,
418
                       out_buffer, (__u32 *)&max_out_sz);
419
 
420
        if (TRACE_LEVEL >= ft_t_info) {
421
                TRACE(ft_t_data_flow, "\n"
422
                      KERN_INFO "before decompression: %d bytes\n"
423
                      KERN_INFO "after decompression : %d bytes",
424
                      in_sz < 0 ? -in_sz : in_sz,(int)max_out_sz);
425
                /*  for statistical purposes
426
                 */
427
                zftc_rd_compressed   += in_sz < 0 ? -in_sz : in_sz;
428
                zftc_rd_uncompressed += max_out_sz;
429
        }
430
        TRACE_EXIT (unsigned int)max_out_sz;
431
}
432
 
433
/* print some statistics about the efficiency of the compression to
434
 * the kernel log
435
 */
436
static void zftc_stats(void)
437
{
438
        TRACE_FUN(ft_t_flow);
439
 
440
        if (TRACE_LEVEL < ft_t_info) {
441
                TRACE_EXIT;
442
        }
443
        if (zftc_wr_uncompressed != 0) {
444
                if (zftc_wr_compressed > (1<<14)) {
445
                        TRACE(ft_t_info, "compression statistics (writing):\n"
446
                              KERN_INFO " compr./uncmpr.   : %3d %%",
447
                              (((zftc_wr_compressed>>10) * 100)
448
                               / (zftc_wr_uncompressed>>10)));
449
                } else {
450
                        TRACE(ft_t_info, "compression statistics (writing):\n"
451
                              KERN_INFO " compr./uncmpr.   : %3d %%",
452
                              ((zftc_wr_compressed * 100)
453
                               / zftc_wr_uncompressed));
454
                }
455
        }
456
        if (zftc_rd_uncompressed != 0) {
457
                if (zftc_rd_compressed > (1<<14)) {
458
                        TRACE(ft_t_info, "compression statistics (reading):\n"
459
                              KERN_INFO " compr./uncmpr.   : %3d %%",
460
                              (((zftc_rd_compressed>>10) * 100)
461
                               / (zftc_rd_uncompressed>>10)));
462
                } else {
463
                        TRACE(ft_t_info, "compression statistics (reading):\n"
464
                              KERN_INFO " compr./uncmpr.   : %3d %%",
465
                              ((zftc_rd_compressed * 100)
466
                               / zftc_rd_uncompressed));
467
                }
468
        }
469
        /* only print it once: */
470
        zftc_wr_uncompressed =
471
                zftc_wr_compressed  =
472
                zftc_rd_uncompressed =
473
                zftc_rd_compressed   = 0;
474
        TRACE_EXIT;
475
}
476
 
477
/* start new compressed block
478
 */
479
static int start_new_cseg(cmpr_info *cluster,
480
                          char *dst_buf,
481
                          const zft_position *pos,
482
                          const unsigned int blk_sz,
483
                          const char *src_buf,
484
                          const int this_segs_sz,
485
                          const int qic113)
486
{
487
        int size_left;
488
        int cp_cnt;
489
        int buf_pos;
490
        TRACE_FUN(ft_t_flow);
491
 
492
        size_left = this_segs_sz - sizeof(__u16) - cluster->cmpr_sz;
493
        TRACE(ft_t_data_flow,"\n"
494
              KERN_INFO "segment size   : %d\n"
495
              KERN_INFO "compressed_sz: %d\n"
496
              KERN_INFO "size_left      : %d",
497
              this_segs_sz, cluster->cmpr_sz, size_left);
498
        if (size_left > 18) { /* start a new cluseter */
499
                cp_cnt = cluster->cmpr_sz;
500
                cluster->cmpr_sz = 0;
501
                buf_pos = cp_cnt + sizeof(__u16);
502
                PUT2(dst_buf, 0, buf_pos);
503
 
504
                if (qic113) {
505
                        __s64 foffs = pos->volume_pos;
506
                        if (cp_cnt) foffs += (__s64)blk_sz;
507
 
508
                        TRACE(ft_t_data_flow, "new style QIC-113 header");
509
                        PUT8(dst_buf, buf_pos, foffs);
510
                        buf_pos += sizeof(__s64);
511
                } else {
512
                        __u32 foffs = (__u32)pos->volume_pos;
513
                        if (cp_cnt) foffs += (__u32)blk_sz;
514
 
515
                        TRACE(ft_t_data_flow, "old style QIC-80MC header");
516
                        PUT4(dst_buf, buf_pos, foffs);
517
                        buf_pos += sizeof(__u32);
518
                }
519
        } else if (size_left >= 0) {
520
                cp_cnt = cluster->cmpr_sz;
521
                cluster->cmpr_sz = 0;
522
                buf_pos = cp_cnt + sizeof(__u16);
523
                PUT2(dst_buf, 0, buf_pos);
524
                /* zero unused part of segment. */
525
                memset(dst_buf + buf_pos, '\0', size_left);
526
                buf_pos = this_segs_sz;
527
        } else { /* need entire segment and more space */
528
                PUT2(dst_buf, 0, 0);
529
                cp_cnt = this_segs_sz - sizeof(__u16);
530
                cluster->cmpr_sz  -= cp_cnt;
531
                buf_pos = this_segs_sz;
532
        }
533
        memcpy(dst_buf + sizeof(__u16), src_buf + cluster->cmpr_pos, cp_cnt);
534
        cluster->cmpr_pos += cp_cnt;
535
        TRACE_EXIT buf_pos;
536
}
537
 
538
/* return-value: the number of bytes removed from the user-buffer
539
 *               `src_buf' or error code
540
 *
541
 *  int *write_cnt           : how much actually has been moved to the
542
 *                             dst_buf. Need not be initialized when
543
 *                             function returns with an error code
544
 *                             (negativ return value)
545
 *  __u8 *dst_buf            : kernel space buffer where the has to be
546
 *                             copied to. The contents of this buffers
547
 *                             goes to a specific segment.
548
 *  const int seg_sz         : the size of the segment dst_buf will be
549
 *                             copied to.
550
 *  const zft_position *pos  : struct containing the coordinates in
551
 *                             the current volume (byte position,
552
 *                             segment id of current segment etc)
553
 *  const zft_volinfo *volume: information about the current volume,
554
 *                             size etc.
555
 *  const __u8 *src_buf      : user space buffer that contains the
556
 *                             data the user wants to be written to
557
 *                             tape.
558
 *  const int req_len        : the amount of data the user wants to be
559
 *                             written to tape.
560
 */
561
static int zftc_write(int *write_cnt,
562
                      __u8 *dst_buf, const int seg_sz,
563
                      const __u8 *src_buf, const int req_len,
564
                      const zft_position *pos, const zft_volinfo *volume)
565
{
566
        int req_len_left = req_len;
567
        int result;
568
        int len_left;
569
        int buf_pos_write = pos->seg_byte_pos;
570
        TRACE_FUN(ft_t_flow);
571
 
572
        keep_module_locked = 1;
573
#if LINUX_VERSION_CODE >= KERNEL_VER(2,1,18)
574
        MOD_INC_USE_COUNT; /*  sets MOD_VISITED and MOD_USED_ONCE,
575
                            *  locking is done with can_unload()
576
                            */
577
#else
578
        if (!MOD_IN_USE) {
579
                MOD_INC_USE_COUNT;
580
        }
581
#endif
582
        /* Note: we do not unlock the module because
583
         * there are some values cached in that `cseg' variable.  We
584
         * don't don't want to use this information when being
585
         * unloaded by kerneld even when the tape is full or when we
586
         * cannot allocate enough memory.
587
         */
588
        if (pos->tape_pos > (volume->size-volume->blk_sz-ZFT_CMPR_OVERHEAD)) {
589
                TRACE_EXIT -ENOSPC;
590
        }
591
        if (zft_allocate_cmpr_mem(volume->blk_sz) < 0) {
592
                /* should we unlock the module? But it shouldn't
593
                 * be locked anyway ...
594
                 */
595
                TRACE_EXIT -ENOMEM;
596
        }
597
        if (buf_pos_write == 0) { /* fill a new segment */
598
                *write_cnt = buf_pos_write = start_new_cseg(&cseg,
599
                                                            dst_buf,
600
                                                            pos,
601
                                                            volume->blk_sz,
602
                                                            zftc_buf,
603
                                                            seg_sz,
604
                                                            volume->qic113);
605
                if (cseg.cmpr_sz == 0 && cseg.cmpr_pos != 0) {
606
                        req_len_left -= result = volume->blk_sz;
607
                        cseg.cmpr_pos  = 0;
608
                } else {
609
                        result = 0;
610
                }
611
        } else {
612
                *write_cnt = result = 0;
613
        }
614
 
615
        len_left = seg_sz - buf_pos_write;
616
        while ((req_len_left > 0) && (len_left > 18)) {
617
                /* now we have some size left for a new compressed
618
                 * block.  We know, that the compression buffer is
619
                 * empty (else there wouldn't be any space left).
620
                 */
621
#if LINUX_VERSION_CODE > KERNEL_VER(2,1,3)
622
                if (copy_from_user(zftc_scratch_buf, src_buf + result,
623
                                   volume->blk_sz) != 0) {
624
                        TRACE_EXIT -EFAULT;
625
                }
626
#else
627
                TRACE_CATCH(verify_area(VERIFY_READ, src_buf + result,
628
                                        volume->blk_sz),);
629
                memcpy_fromfs(zftc_scratch_buf, src_buf + result,
630
                              volume->blk_sz);
631
#endif
632
                req_len_left -= volume->blk_sz;
633
                cseg.cmpr_sz = zft_compress(zftc_scratch_buf, volume->blk_sz,
634
                                            zftc_buf);
635
                if (cseg.cmpr_sz < 0) {
636
                        cseg.uncmpr = 0x8000;
637
                        cseg.cmpr_sz = -cseg.cmpr_sz;
638
                } else {
639
                        cseg.uncmpr = 0;
640
                }
641
                /* increment "result" iff we copied the entire
642
                 * compressed block to the zft_deblock_buf
643
                 */
644
                len_left -= sizeof(__u16);
645
                if (len_left >= cseg.cmpr_sz) {
646
                        len_left -= cseg.count = cseg.cmpr_sz;
647
                        cseg.cmpr_pos = cseg.cmpr_sz = 0;
648
                        result += volume->blk_sz;
649
                } else {
650
                        cseg.cmpr_sz       -=
651
                                cseg.cmpr_pos =
652
                                cseg.count    = len_left;
653
                        len_left = 0;
654
                }
655
                PUT2(dst_buf, buf_pos_write, cseg.uncmpr | cseg.count);
656
                buf_pos_write += sizeof(__u16);
657
                memcpy(dst_buf + buf_pos_write, zftc_buf, cseg.count);
658
                buf_pos_write += cseg.count;
659
                *write_cnt    += cseg.count + sizeof(__u16);
660
                FT_SIGNAL_EXIT(_DONT_BLOCK);
661
        }
662
        /* erase the remainder of the segment if less than 18 bytes
663
         * left (18 bytes is due to the QIC-80 standard)
664
         */
665
        if (len_left <= 18) {
666
                memset(dst_buf + buf_pos_write, '\0', len_left);
667
                (*write_cnt) += len_left;
668
        }
669
        TRACE(ft_t_data_flow, "returning %d", result);
670
        TRACE_EXIT result;
671
}
672
 
673
/* out:
674
 *
675
 * int *read_cnt: the number of bytes we removed from the zft_deblock_buf
676
 *                (result)
677
 * int *to_do   : the remaining size of the read-request.
678
 *
679
 * in:
680
 *
681
 * char *buff          : buff is the address of the upper part of the user
682
 *                       buffer, that hasn't been filled with data yet.
683
 
684
 * int buf_pos_read    : copy of from _ftape_read()
685
 * int buf_len_read    : copy of buf_len_rd from _ftape_read()
686
 * char *zft_deblock_buf: zft_deblock_buf
687
 * unsigned short blk_sz: the block size valid for this volume, may differ
688
 *                            from zft_blk_sz.
689
 * int finish: if != 0 means that this is the last segment belonging
690
 *  to this volume
691
 * returns the amount of data actually copied to the user-buffer
692
 *
693
 * to_do MUST NOT SHRINK except to indicate an EOF. In this case *to_do has to
694
 * be set to 0
695
 */
696
static int zftc_read (int *read_cnt,
697
                      __u8  *dst_buf, const int to_do,
698
                      const __u8 *src_buf, const int seg_sz,
699
                      const zft_position *pos, const zft_volinfo *volume)
700
{
701
        int uncompressed_sz;
702
        int result = 0;
703
        int remaining = to_do;
704
        TRACE_FUN(ft_t_flow);
705
 
706
        keep_module_locked = 1;
707
#if LINUX_VERSION_CODE >= KERNEL_VER(2,1,18)
708
        MOD_INC_USE_COUNT; /*  sets MOD_VISITED and MOD_USED_ONCE,
709
                            *  locking is done with can_unload()
710
                            */
711
#else
712
        if (!MOD_IN_USE) {
713
                MOD_INC_USE_COUNT;
714
        }
715
#endif
716
        TRACE_CATCH(zft_allocate_cmpr_mem(volume->blk_sz),);
717
        if (pos->seg_byte_pos == 0) {
718
                /* new segment just read
719
                 */
720
                TRACE_CATCH(get_cseg(&cseg, src_buf, seg_sz, volume),
721
                            *read_cnt = 0);
722
                memcpy(zftc_buf + cseg.cmpr_pos, src_buf + sizeof(__u16),
723
                       cseg.count);
724
                cseg.cmpr_pos += cseg.count;
725
                *read_cnt      = cseg.offset;
726
                DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */, "", &cseg);
727
        } else {
728
                *read_cnt = 0;
729
        }
730
        /* loop and uncompress until user buffer full or
731
         * deblock-buffer empty
732
         */
733
        TRACE(ft_t_data_flow, "compressed_sz: %d, compos : %d, *read_cnt: %d",
734
              cseg.cmpr_sz, cseg.cmpr_pos, *read_cnt);
735
        while ((cseg.spans == 0) && (remaining > 0)) {
736
                if (cseg.cmpr_pos  != 0) { /* cmpr buf is not empty */
737
                        uncompressed_sz =
738
                                zft_uncompress(zftc_buf,
739
                                               cseg.uncmpr == 0x8000 ?
740
                                               -cseg.cmpr_pos : cseg.cmpr_pos,
741
                                               zftc_scratch_buf,
742
                                               volume->blk_sz);
743
                        if (uncompressed_sz != volume->blk_sz) {
744
                                *read_cnt = 0;
745
                                TRACE_ABORT(-EIO, ft_t_warn,
746
                                      "Uncompressed blk (%d) != blk size (%d)",
747
                                      uncompressed_sz, volume->blk_sz);
748
                        }
749
#if LINUX_VERSION_CODE > KERNEL_VER(2,1,3)
750
                        if (copy_to_user(dst_buf + result,
751
                                         zftc_scratch_buf,
752
                                         uncompressed_sz) != 0 ) {
753
                                TRACE_EXIT -EFAULT;
754
                        }
755
#else
756
                        memcpy_tofs(dst_buf + result, zftc_scratch_buf,
757
                                    uncompressed_sz);
758
#endif
759
                        remaining      -= uncompressed_sz;
760
                        result     += uncompressed_sz;
761
                        cseg.cmpr_pos  = 0;
762
                }
763
                if (remaining > 0) {
764
                        get_next_cluster(&cseg, src_buf, seg_sz,
765
                                         volume->end_seg == pos->seg_pos);
766
                        if (cseg.count != 0) {
767
                                memcpy(zftc_buf, src_buf + cseg.offset,
768
                                       cseg.count);
769
                                cseg.cmpr_pos = cseg.count;
770
                                cseg.offset  += cseg.count;
771
                                *read_cnt += cseg.count + sizeof(__u16);
772
                        } else {
773
                                remaining = 0;
774
                        }
775
                }
776
                TRACE(ft_t_data_flow, "\n"
777
                      KERN_INFO "compressed_sz: %d\n"
778
                      KERN_INFO "compos       : %d\n"
779
                      KERN_INFO "*read_cnt    : %d",
780
                      cseg.cmpr_sz, cseg.cmpr_pos, *read_cnt);
781
        }
782
        if (seg_sz - cseg.offset <= 18) {
783
                *read_cnt += seg_sz - cseg.offset;
784
                TRACE(ft_t_data_flow, "expanding read cnt to: %d", *read_cnt);
785
        }
786
        TRACE(ft_t_data_flow, "\n"
787
              KERN_INFO "segment size   : %d\n"
788
              KERN_INFO "read count     : %d\n"
789
              KERN_INFO "buf_pos_read   : %d\n"
790
              KERN_INFO "remaining      : %d",
791
                seg_sz, *read_cnt, pos->seg_byte_pos,
792
                seg_sz - *read_cnt - pos->seg_byte_pos);
793
        TRACE(ft_t_data_flow, "returning: %d", result);
794
        TRACE_EXIT result;
795
}
796
 
797
/* seeks to the new data-position. Reads sometimes a segment.
798
 *
799
 * start_seg and end_seg give the boundaries of the current volume
800
 * blk_sz is the blk_sz of the current volume as stored in the
801
 * volume label
802
 *
803
 * We don't allow blocksizes less than 1024 bytes, therefore we don't need
804
 * a 64 bit argument for new_block_pos.
805
 */
806
 
807
static int seek_in_segment(const unsigned int to_do, cmpr_info  *c_info,
808
                           const char *src_buf, const int seg_sz,
809
                           const int seg_pos, const zft_volinfo *volume);
810
static int slow_seek_forward_until_error(const unsigned int distance,
811
                                         cmpr_info *c_info, zft_position *pos,
812
                                         const zft_volinfo *volume, __u8 *buf);
813
static int search_valid_segment(unsigned int segment,
814
                                const unsigned int end_seg,
815
                                const unsigned int max_foffs,
816
                                zft_position *pos, cmpr_info *c_info,
817
                                const zft_volinfo *volume, __u8 *buf);
818
static int slow_seek_forward(unsigned int dest, cmpr_info *c_info,
819
                             zft_position *pos, const zft_volinfo *volume,
820
                             __u8 *buf);
821
static int compute_seg_pos(unsigned int dest, zft_position *pos,
822
                           const zft_volinfo *volume);
823
 
824
#define ZFT_SLOW_SEEK_THRESHOLD  10 /* segments */
825
#define ZFT_FAST_SEEK_MAX_TRIALS 10 /* times */
826
#define ZFT_FAST_SEEK_BACKUP     10 /* segments */
827
 
828
static int zftc_seek(unsigned int new_block_pos,
829
                     zft_position *pos, const zft_volinfo *volume, __u8 *buf)
830
{
831
        unsigned int dest;
832
        int limit;
833
        int distance;
834
        int result = 0;
835
        int seg_dist;
836
        int new_seg;
837
        int old_seg = 0;
838
        int fast_seek_trials = 0;
839
        TRACE_FUN(ft_t_flow);
840
 
841
        keep_module_locked = 1;
842
#if LINUX_VERSION_CODE >= KERNEL_VER(2,1,18)
843
        MOD_INC_USE_COUNT; /*  sets MOD_VISITED and MOD_USED_ONCE,
844
                            *  locking is done with can_unload()
845
                            */
846
#else
847
        if (!MOD_IN_USE) {
848
                MOD_INC_USE_COUNT;
849
        }
850
#endif
851
        if (new_block_pos == 0) {
852
                pos->seg_pos      = volume->start_seg;
853
                pos->seg_byte_pos = 0;
854
                pos->volume_pos   = 0;
855
                zftc_reset();
856
                TRACE_EXIT 0;
857
        }
858
        dest = new_block_pos * (volume->blk_sz >> 10);
859
        distance = dest - (pos->volume_pos >> 10);
860
        while (distance != 0) {
861
                seg_dist = compute_seg_pos(dest, pos, volume);
862
                TRACE(ft_t_noise, "\n"
863
                      KERN_INFO "seg_dist: %d\n"
864
                      KERN_INFO "distance: %d\n"
865
                      KERN_INFO "dest    : %d\n"
866
                      KERN_INFO "vpos    : %d\n"
867
                      KERN_INFO "seg_pos : %d\n"
868
                      KERN_INFO "trials  : %d",
869
                      seg_dist, distance, dest,
870
                      (unsigned int)(pos->volume_pos>>10), pos->seg_pos,
871
                      fast_seek_trials);
872
                if (distance > 0) {
873
                        if (seg_dist < 0) {
874
                                TRACE(ft_t_bug, "BUG: distance %d > 0, "
875
                                      "segment difference %d < 0",
876
                                      distance, seg_dist);
877
                                result = -EIO;
878
                                break;
879
                        }
880
                        new_seg = pos->seg_pos + seg_dist;
881
                        if (new_seg > volume->end_seg) {
882
                                new_seg = volume->end_seg;
883
                        }
884
                        if (old_seg == new_seg || /* loop */
885
                            seg_dist <= ZFT_SLOW_SEEK_THRESHOLD ||
886
                            fast_seek_trials >= ZFT_FAST_SEEK_MAX_TRIALS) {
887
                                TRACE(ft_t_noise, "starting slow seek:\n"
888
                                   KERN_INFO "fast seek failed too often: %s\n"
889
                                   KERN_INFO "near target position      : %s\n"
890
                                   KERN_INFO "looping between two segs  : %s",
891
                                      (fast_seek_trials >=
892
                                       ZFT_FAST_SEEK_MAX_TRIALS)
893
                                      ? "yes" : "no",
894
                                      (seg_dist <= ZFT_SLOW_SEEK_THRESHOLD)
895
                                      ? "yes" : "no",
896
                                      (old_seg == new_seg)
897
                                      ? "yes" : "no");
898
                                result = slow_seek_forward(dest, &cseg,
899
                                                           pos, volume, buf);
900
                                break;
901
                        }
902
                        old_seg = new_seg;
903
                        limit = volume->end_seg;
904
                        fast_seek_trials ++;
905
                        for (;;) {
906
                                result = search_valid_segment(new_seg, limit,
907
                                                              volume->size,
908
                                                              pos, &cseg,
909
                                                              volume, buf);
910
                                if (result == 0 || result == -EINTR) {
911
                                        break;
912
                                }
913
                                if (new_seg == volume->start_seg) {
914
                                        result = -EIO; /* set errror
915
                                                        * condition
916
                                                        */
917
                                        break;
918
                                }
919
                                limit    = new_seg;
920
                                new_seg -= ZFT_FAST_SEEK_BACKUP;
921
                                if (new_seg < volume->start_seg) {
922
                                        new_seg = volume->start_seg;
923
                                }
924
                        }
925
                        if (result < 0) {
926
                                TRACE(ft_t_warn,
927
                                      "Couldn't find a readable segment");
928
                                break;
929
                        }
930
                } else /* if (distance < 0) */ {
931
                        if (seg_dist > 0) {
932
                                TRACE(ft_t_bug, "BUG: distance %d < 0, "
933
                                      "segment difference %d >0",
934
                                      distance, seg_dist);
935
                                result = -EIO;
936
                                break;
937
                        }
938
                        new_seg = pos->seg_pos + seg_dist;
939
                        if (fast_seek_trials > 0 && seg_dist == 0) {
940
                                /* this avoids sticking to the same
941
                                 * segment all the time. On the other hand:
942
                                 * if we got here for the first time, and the
943
                                 * deblock_buffer still contains a valid
944
                                 * segment, then there is no need to skip to
945
                                 * the previous segment if the desired position
946
                                 * is inside this segment.
947
                                 */
948
                                new_seg --;
949
                        }
950
                        if (new_seg < volume->start_seg) {
951
                                new_seg = volume->start_seg;
952
                        }
953
                        limit   = pos->seg_pos;
954
                        fast_seek_trials ++;
955
                        for (;;) {
956
                                result = search_valid_segment(new_seg, limit,
957
                                                              pos->volume_pos,
958
                                                              pos, &cseg,
959
                                                              volume, buf);
960
                                if (result == 0 || result == -EINTR) {
961
                                        break;
962
                                }
963
                                if (new_seg == volume->start_seg) {
964
                                        result = -EIO; /* set errror
965
                                                        * condition
966
                                                        */
967
                                        break;
968
                                }
969
                                limit    = new_seg;
970
                                new_seg -= ZFT_FAST_SEEK_BACKUP;
971
                                if (new_seg < volume->start_seg) {
972
                                        new_seg = volume->start_seg;
973
                                }
974
                        }
975
                        if (result < 0) {
976
                                TRACE(ft_t_warn,
977
                                      "Couldn't find a readable segment");
978
                                break;
979
                        }
980
                }
981
                distance = dest - (pos->volume_pos >> 10);
982
        }
983
        TRACE_EXIT result;
984
}
985
 
986
 
987
/*  advance inside the given segment at most to_do bytes.
988
 *  of kilobytes moved
989
 */
990
 
991
static int seek_in_segment(const unsigned int to_do,
992
                           cmpr_info  *c_info,
993
                           const char *src_buf,
994
                           const int seg_sz,
995
                           const int seg_pos,
996
                           const zft_volinfo *volume)
997
{
998
        int result = 0;
999
        int blk_sz = volume->blk_sz >> 10;
1000
        int remaining = to_do;
1001
        TRACE_FUN(ft_t_flow);
1002
 
1003
        if (c_info->offset == 0) {
1004
                /* new segment just read
1005
                 */
1006
                TRACE_CATCH(get_cseg(c_info, src_buf, seg_sz, volume),);
1007
                c_info->cmpr_pos += c_info->count;
1008
                DUMP_CMPR_INFO(ft_t_noise, "", c_info);
1009
        }
1010
        /* loop and uncompress until user buffer full or
1011
         * deblock-buffer empty
1012
         */
1013
        TRACE(ft_t_noise, "compressed_sz: %d, compos : %d",
1014
              c_info->cmpr_sz, c_info->cmpr_pos);
1015
        while (c_info->spans == 0 && remaining > 0) {
1016
                if (c_info->cmpr_pos  != 0) { /* cmpr buf is not empty */
1017
                        result       += blk_sz;
1018
                        remaining    -= blk_sz;
1019
                        c_info->cmpr_pos = 0;
1020
                }
1021
                if (remaining > 0) {
1022
                        get_next_cluster(c_info, src_buf, seg_sz,
1023
                                         volume->end_seg == seg_pos);
1024
                        if (c_info->count != 0) {
1025
                                c_info->cmpr_pos = c_info->count;
1026
                                c_info->offset  += c_info->count;
1027
                        } else {
1028
                                break;
1029
                        }
1030
                }
1031
                /*  Allow escape from this loop on signal!
1032
                 */
1033
                FT_SIGNAL_EXIT(_DONT_BLOCK);
1034
                DUMP_CMPR_INFO(ft_t_noise, "", c_info);
1035
                TRACE(ft_t_noise, "to_do: %d", remaining);
1036
        }
1037
        if (seg_sz - c_info->offset <= 18) {
1038
                c_info->offset = seg_sz;
1039
        }
1040
        TRACE(ft_t_noise, "\n"
1041
              KERN_INFO "segment size   : %d\n"
1042
              KERN_INFO "buf_pos_read   : %d\n"
1043
              KERN_INFO "remaining      : %d",
1044
              seg_sz, c_info->offset,
1045
              seg_sz - c_info->offset);
1046
        TRACE_EXIT result;
1047
}
1048
 
1049
static int slow_seek_forward_until_error(const unsigned int distance,
1050
                                         cmpr_info *c_info,
1051
                                         zft_position *pos,
1052
                                         const zft_volinfo *volume,
1053
                                         __u8 *buf)
1054
{
1055
        unsigned int remaining = distance;
1056
        int seg_sz;
1057
        int seg_pos;
1058
        int result;
1059
        TRACE_FUN(ft_t_flow);
1060
 
1061
        seg_pos = pos->seg_pos;
1062
        do {
1063
                TRACE_CATCH(seg_sz = zft_fetch_segment(seg_pos, buf,
1064
                                                       FT_RD_AHEAD),);
1065
                /* now we have the contents of the actual segment in
1066
                 * the deblock buffer
1067
                 */
1068
                TRACE_CATCH(result = seek_in_segment(remaining, c_info, buf,
1069
                                                     seg_sz, seg_pos,volume),);
1070
                remaining        -= result;
1071
                pos->volume_pos  += result<<10;
1072
                pos->seg_pos      = seg_pos;
1073
                pos->seg_byte_pos = c_info->offset;
1074
                seg_pos ++;
1075
                if (seg_pos <= volume->end_seg && c_info->offset == seg_sz) {
1076
                        pos->seg_pos ++;
1077
                        pos->seg_byte_pos = 0;
1078
                        c_info->offset = 0;
1079
                }
1080
                /*  Allow escape from this loop on signal!
1081
                 */
1082
                FT_SIGNAL_EXIT(_DONT_BLOCK);
1083
                TRACE(ft_t_noise, "\n"
1084
                      KERN_INFO "remaining:  %d\n"
1085
                      KERN_INFO "seg_pos:    %d\n"
1086
                      KERN_INFO "end_seg:    %d\n"
1087
                      KERN_INFO "result:     %d",
1088
                      remaining, seg_pos, volume->end_seg, result);
1089
        } while (remaining > 0 && seg_pos <= volume->end_seg);
1090
        TRACE_EXIT 0;
1091
}
1092
 
1093
/* return segment id of next segment containing valid data, -EIO otherwise
1094
 */
1095
static int search_valid_segment(unsigned int segment,
1096
                                const unsigned int end_seg,
1097
                                const unsigned int max_foffs,
1098
                                zft_position *pos,
1099
                                cmpr_info *c_info,
1100
                                const zft_volinfo *volume,
1101
                                __u8 *buf)
1102
{
1103
        cmpr_info tmp_info;
1104
        int seg_sz;
1105
        TRACE_FUN(ft_t_flow);
1106
 
1107
        memset(&tmp_info, 0, sizeof(cmpr_info));
1108
        while (segment <= end_seg) {
1109
                FT_SIGNAL_EXIT(_DONT_BLOCK);
1110
                TRACE(ft_t_noise,
1111
                      "Searching readable segment between %d and %d",
1112
                      segment, end_seg);
1113
                seg_sz = zft_fetch_segment(segment, buf, FT_RD_AHEAD);
1114
                if ((seg_sz > 0) &&
1115
                    (get_cseg (&tmp_info, buf, seg_sz, volume) >= 0) &&
1116
                    (tmp_info.foffs != 0 || segment == volume->start_seg)) {
1117
                        if ((tmp_info.foffs>>10) > max_foffs) {
1118
                                TRACE_ABORT(-EIO, ft_t_noise, "\n"
1119
                                            KERN_INFO "cseg.foff: %d\n"
1120
                                            KERN_INFO "dest     : %d",
1121
                                            (int)(tmp_info.foffs >> 10),
1122
                                            max_foffs);
1123
                        }
1124
                        DUMP_CMPR_INFO(ft_t_noise, "", &tmp_info);
1125
                        *c_info           = tmp_info;
1126
                        pos->seg_pos      = segment;
1127
                        pos->volume_pos   = c_info->foffs;
1128
                        pos->seg_byte_pos = c_info->offset;
1129
                        TRACE(ft_t_noise, "found segment at %d", segment);
1130
                        TRACE_EXIT 0;
1131
                }
1132
                segment++;
1133
        }
1134
        TRACE_EXIT -EIO;
1135
}
1136
 
1137
static int slow_seek_forward(unsigned int dest,
1138
                             cmpr_info *c_info,
1139
                             zft_position *pos,
1140
                             const zft_volinfo *volume,
1141
                             __u8 *buf)
1142
{
1143
        unsigned int distance;
1144
        int result = 0;
1145
        TRACE_FUN(ft_t_flow);
1146
 
1147
        distance = dest - (pos->volume_pos >> 10);
1148
        while ((distance > 0) &&
1149
               (result = slow_seek_forward_until_error(distance,
1150
                                                       c_info,
1151
                                                       pos,
1152
                                                       volume,
1153
                                                       buf)) < 0) {
1154
                if (result == -EINTR) {
1155
                        break;
1156
                }
1157
                TRACE(ft_t_noise, "seg_pos: %d", pos->seg_pos);
1158
                /* the failing segment is either pos->seg_pos or
1159
                 * pos->seg_pos + 1. There is no need to further try
1160
                 * that segment, because ftape_read_segment() already
1161
                 * has tried very much to read it. So we start with
1162
                 * following segment, which is pos->seg_pos + 1
1163
                 */
1164
                if(search_valid_segment(pos->seg_pos+1, volume->end_seg, dest,
1165
                                        pos, c_info,
1166
                                        volume, buf) < 0) {
1167
                        TRACE(ft_t_noise, "search_valid_segment() failed");
1168
                        result = -EIO;
1169
                        break;
1170
                }
1171
                distance = dest - (pos->volume_pos >> 10);
1172
                result = 0;
1173
                TRACE(ft_t_noise, "segment: %d", pos->seg_pos);
1174
                /* found valid segment, retry the seek */
1175
        }
1176
        TRACE_EXIT result;
1177
}
1178
 
1179
static int compute_seg_pos(const unsigned int dest,
1180
                           zft_position *pos,
1181
                           const zft_volinfo *volume)
1182
{
1183
        int segment;
1184
        int distance = dest - (pos->volume_pos >> 10);
1185
        unsigned int raw_size;
1186
        unsigned int virt_size;
1187
        unsigned int factor;
1188
        TRACE_FUN(ft_t_flow);
1189
 
1190
        if (distance >= 0) {
1191
                raw_size  = volume->end_seg - pos->seg_pos + 1;
1192
                virt_size = ((unsigned int)(volume->size>>10)
1193
                             - (unsigned int)(pos->volume_pos>>10)
1194
                             + FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS - 1);
1195
                virt_size /= FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS;
1196
                if (virt_size == 0 || raw_size == 0) {
1197
                        TRACE_EXIT 0;
1198
                }
1199
                if (raw_size >= (1<<25)) {
1200
                        factor = raw_size/(virt_size>>7);
1201
                } else {
1202
                        factor = (raw_size<<7)/virt_size;
1203
                }
1204
                segment = distance/(FT_SECTORS_PER_SEGMENT-FT_ECC_SECTORS);
1205
                segment = (segment * factor)>>7;
1206
        } else {
1207
                raw_size  = pos->seg_pos - volume->start_seg + 1;
1208
                virt_size = ((unsigned int)(pos->volume_pos>>10)
1209
                             + FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS - 1);
1210
                virt_size /= FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS;
1211
                if (virt_size == 0 || raw_size == 0) {
1212
                        TRACE_EXIT 0;
1213
                }
1214
                if (raw_size >= (1<<25)) {
1215
                        factor = raw_size/(virt_size>>7);
1216
                } else {
1217
                        factor = (raw_size<<7)/virt_size;
1218
                }
1219
                segment = distance/(FT_SECTORS_PER_SEGMENT-FT_ECC_SECTORS);
1220
        }
1221
        TRACE(ft_t_noise, "factor: %d/%d", factor, 1<<7);
1222
        TRACE_EXIT segment;
1223
}
1224
 
1225
static struct zft_cmpr_ops cmpr_ops = {
1226
        zftc_write,
1227
        zftc_read,
1228
        zftc_seek,
1229
        zftc_lock,
1230
        zftc_reset,
1231
        zftc_cleanup
1232
};
1233
 
1234
int zft_compressor_init(void)
1235
{
1236
        TRACE_FUN(ft_t_flow);
1237
 
1238
#ifdef MODULE
1239
        printk(KERN_INFO "zftape compressor v1.00a 970514 for " FTAPE_VERSION "\n");
1240
        if (TRACE_LEVEL >= ft_t_info) {
1241
                printk(
1242
KERN_INFO "(c) 1997 Claus-Justus Heine (claus@momo.math.rwth-aachen.de)\n"
1243
KERN_INFO "Compressor for zftape (lzrw3 algorithm)\n"
1244
KERN_INFO "Compiled for kernel version %s"
1245
#ifdef MODVERSIONS
1246
                " with versioned symbols"
1247
#endif
1248
                "\n", UTS_RELEASE);
1249
        }
1250
#else /* !MODULE */
1251
        /* print a short no-nonsense boot message */
1252
        printk("zftape compressor v1.00a 970514 for Linux " UTS_RELEASE "\n");
1253
        printk("For use with " FTAPE_VERSION "\n");
1254
#endif /* MODULE */
1255
        TRACE(ft_t_info, "zft_compressor_init @ 0x%p", zft_compressor_init);
1256
        TRACE(ft_t_info, "installing compressor for zftape ...");
1257
        TRACE_CATCH(zft_cmpr_register(&cmpr_ops),);
1258
        TRACE_EXIT 0;
1259
}
1260
 
1261
#ifdef MODULE
1262
 
1263
MODULE_AUTHOR(
1264
        "(c) 1996, 1997 Claus-Justus Heine (claus@momo.math.rwth-aachen.de");
1265
MODULE_DESCRIPTION(
1266
"Compression routines for zftape. Uses the lzrw3 algorithm by Ross Williams");
1267
MODULE_LICENSE("GPL");
1268
 
1269
#if LINUX_VERSION_CODE >= KERNEL_VER(2,1,18)
1270
static int can_unload(void)
1271
{
1272
        return keep_module_locked ? -EBUSY : 0;
1273
}
1274
#endif
1275
 
1276
/* Called by modules package when installing the driver
1277
 */
1278
int init_module(void)
1279
{
1280
        int result;
1281
 
1282
#if LINUX_VERSION_CODE < KERNEL_VER(2,1,18)
1283
        register_symtab(0); /* remove global ftape symbols */
1284
#else
1285
        if (!mod_member_present(&__this_module, can_unload))
1286
                return -EBUSY;
1287
        __this_module.can_unload = can_unload;
1288
        EXPORT_NO_SYMBOLS;
1289
#endif
1290
        result = zft_compressor_init();
1291
        keep_module_locked = 0;
1292
        return result;
1293
}
1294
 
1295
/* Called by modules package when removing the driver
1296
 */
1297
void cleanup_module(void)
1298
{
1299
        TRACE_FUN(ft_t_flow);
1300
 
1301
        if (zft_cmpr_unregister() != &cmpr_ops) {
1302
                TRACE(ft_t_info, "failed");
1303
        } else {
1304
                TRACE(ft_t_info, "successful");
1305
        }
1306
        zftc_cleanup();
1307
        printk(KERN_INFO "zft-compressor successfully unloaded.\n");
1308
        TRACE_EXIT;
1309
}
1310
#endif /* MODULE */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.