OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [mtd/] [mtdconcat.c] - Blame information for rev 1774

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * MTD device concatenation layer
3
 *
4
 * (C) 2002 Robert Kaiser <rkaiser@sysgo.de>
5
 *
6
 * This code is GPL
7
 *
8
 * $Id: mtdconcat.c,v 1.1.1.1 2004-04-15 01:51:44 phoenix Exp $
9
 */
10
 
11
#include <linux/module.h>
12
#include <linux/types.h>
13
#include <linux/kernel.h>
14
#include <linux/slab.h>
15
 
16
#include <linux/mtd/mtd.h>
17
#include <linux/mtd/concat.h>
18
 
19
/*
20
 * Our storage structure:
21
 * Subdev points to an array of pointers to struct mtd_info objects
22
 * which is allocated along with this structure
23
 *
24
 */
25
struct mtd_concat {
26
        struct mtd_info mtd;
27
        int             num_subdev;
28
        struct mtd_info **subdev;
29
};
30
 
31
/*
32
 * how to calculate the size required for the above structure,
33
 * including the pointer array subdev points to:
34
 */
35
#define SIZEOF_STRUCT_MTD_CONCAT(num_subdev)    \
36
        ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
37
 
38
 
39
/*
40
 * Given a pointer to the MTD object in the mtd_concat structure,
41
 * we can retrieve the pointer to that structure with this macro.
42
 */
43
#define CONCAT(x)  ((struct mtd_concat *)(x))
44
 
45
 
46
/*
47
 * MTD methods which look up the relevant subdevice, translate the
48
 * effective address and pass through to the subdevice.
49
 */
50
 
51
static int concat_read (struct mtd_info *mtd, loff_t from, size_t len,
52
                        size_t *retlen, u_char *buf)
53
{
54
        struct mtd_concat *concat = CONCAT(mtd);
55
        int err = -EINVAL;
56
        int i;
57
 
58
        *retlen = 0;
59
 
60
        for(i = 0; i < concat->num_subdev; i++)
61
        {
62
                struct mtd_info *subdev = concat->subdev[i];
63
                size_t size, retsize;
64
 
65
                if (from >= subdev->size)
66
                {
67
                        size  = 0;
68
                        from -= subdev->size;
69
                }
70
                else
71
                {
72
                        if (from + len > subdev->size)
73
                                size = subdev->size - from;
74
                        else
75
                                size = len;
76
 
77
                        err = subdev->read(subdev, from, size, &retsize, buf);
78
 
79
                        if(err)
80
                                break;
81
 
82
                        *retlen += retsize;
83
                        len -= size;
84
                        if(len == 0)
85
                                break;
86
 
87
                        err = -EINVAL;
88
                        buf += size;
89
                        from = 0;
90
                }
91
        }
92
        return err;
93
}
94
 
95
static int concat_write (struct mtd_info *mtd, loff_t to, size_t len,
96
                        size_t *retlen, const u_char *buf)
97
{
98
        struct mtd_concat *concat = CONCAT(mtd);
99
        int err = -EINVAL;
100
        int i;
101
 
102
        if (!(mtd->flags & MTD_WRITEABLE))
103
                return -EROFS;
104
 
105
        *retlen = 0;
106
 
107
        for(i = 0; i < concat->num_subdev; i++)
108
        {
109
                struct mtd_info *subdev = concat->subdev[i];
110
                size_t size, retsize;
111
 
112
                if (to >= subdev->size)
113
                {
114
                        size  = 0;
115
                        to -= subdev->size;
116
                }
117
                else
118
                {
119
                        if (to + len > subdev->size)
120
                                size = subdev->size - to;
121
                        else
122
                                size = len;
123
 
124
                        if (!(subdev->flags & MTD_WRITEABLE))
125
                                err = -EROFS;
126
                        else
127
                                err = subdev->write(subdev, to, size, &retsize, buf);
128
 
129
                        if(err)
130
                                break;
131
 
132
                        *retlen += retsize;
133
                        len -= size;
134
                        if(len == 0)
135
                                break;
136
 
137
                        err = -EINVAL;
138
                        buf += size;
139
                        to = 0;
140
                }
141
        }
142
        return err;
143
}
144
 
145
static void concat_erase_callback (struct erase_info *instr)
146
{
147
        wake_up((wait_queue_head_t *)instr->priv);
148
}
149
 
150
static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
151
{
152
        int err;
153
        wait_queue_head_t waitq;
154
        DECLARE_WAITQUEUE(wait, current);
155
 
156
        /*
157
         * This code was stol^H^H^H^Hinspired by mtdchar.c
158
         */
159
        init_waitqueue_head(&waitq);
160
 
161
        erase->mtd = mtd;
162
        erase->callback = concat_erase_callback;
163
        erase->priv = (unsigned long)&waitq;
164
 
165
        /*
166
         * FIXME: Allow INTERRUPTIBLE. Which means
167
         * not having the wait_queue head on the stack.
168
         */
169
        err = mtd->erase(mtd, erase);
170
        if (!err)
171
        {
172
                set_current_state(TASK_UNINTERRUPTIBLE);
173
                add_wait_queue(&waitq, &wait);
174
                if (erase->state != MTD_ERASE_DONE && erase->state != MTD_ERASE_FAILED)
175
                        schedule();
176
                remove_wait_queue(&waitq, &wait);
177
                set_current_state(TASK_RUNNING);
178
 
179
                err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
180
        }
181
        return err;
182
}
183
 
184
static int concat_erase (struct mtd_info *mtd, struct erase_info *instr)
185
{
186
        struct mtd_concat *concat = CONCAT(mtd);
187
        struct mtd_info *subdev;
188
        int i, err;
189
        u_int32_t length;
190
        struct erase_info *erase;
191
 
192
        if (!(mtd->flags & MTD_WRITEABLE))
193
                return -EROFS;
194
 
195
        if(instr->addr > concat->mtd.size)
196
                return -EINVAL;
197
 
198
        if(instr->len + instr->addr > concat->mtd.size)
199
                return -EINVAL;
200
 
201
        /*
202
         * Check for proper erase block alignment of the to-be-erased area.
203
         * It is easier to do this based on the super device's erase
204
         * region info rather than looking at each particular sub-device
205
         * in turn.
206
         */
207
        if (!concat->mtd.numeraseregions)
208
        {       /* the easy case: device has uniform erase block size */
209
                if(instr->addr & (concat->mtd.erasesize - 1))
210
                        return -EINVAL;
211
                if(instr->len & (concat->mtd.erasesize - 1))
212
                        return -EINVAL;
213
        }
214
        else
215
        {       /* device has variable erase size */
216
                struct mtd_erase_region_info *erase_regions = concat->mtd.eraseregions;
217
 
218
                /*
219
                 * Find the erase region where the to-be-erased area begins:
220
                 */
221
                for(i = 0; i < concat->mtd.numeraseregions &&
222
                           instr->addr >= erase_regions[i].offset; i++)
223
                        ;
224
                --i;
225
 
226
                /*
227
                 * Now erase_regions[i] is the region in which the
228
                 * to-be-erased area begins. Verify that the starting
229
                 * offset is aligned to this region's erase size:
230
                 */
231
                if (instr->addr & (erase_regions[i].erasesize-1))
232
                        return -EINVAL;
233
 
234
                /*
235
                 * now find the erase region where the to-be-erased area ends:
236
                 */
237
                for(; i < concat->mtd.numeraseregions &&
238
                      (instr->addr + instr->len) >=  erase_regions[i].offset ; ++i)
239
                        ;
240
                --i;
241
                /*
242
                 * check if the ending offset is aligned to this region's erase size
243
                 */
244
                if ((instr->addr + instr->len) & (erase_regions[i].erasesize-1))
245
                        return -EINVAL;
246
        }
247
 
248
        /* make a local copy of instr to avoid modifying the caller's struct */
249
        erase = kmalloc(sizeof(struct erase_info),GFP_KERNEL);
250
 
251
        if (!erase)
252
                return -ENOMEM;
253
 
254
        *erase = *instr;
255
        length = instr->len;
256
 
257
        /*
258
         * find the subdevice where the to-be-erased area begins, adjust
259
         * starting offset to be relative to the subdevice start
260
         */
261
        for(i = 0; i < concat->num_subdev; i++)
262
        {
263
                subdev = concat->subdev[i];
264
                if(subdev->size <= erase->addr)
265
                        erase->addr -= subdev->size;
266
                else
267
                        break;
268
    }
269
        if(i >= concat->num_subdev)     /* must never happen since size */
270
                BUG();                                  /* limit has been verified above */
271
 
272
        /* now do the erase: */
273
        err = 0;
274
        for(;length > 0; i++)    /* loop for all subevices affected by this request */
275
        {
276
                subdev = concat->subdev[i];             /* get current subdevice */
277
 
278
                /* limit length to subdevice's size: */
279
                if(erase->addr + length > subdev->size)
280
                        erase->len = subdev->size - erase->addr;
281
                else
282
                        erase->len = length;
283
 
284
                if (!(subdev->flags & MTD_WRITEABLE))
285
                {
286
                        err = -EROFS;
287
                        break;
288
                }
289
                length -= erase->len;
290
                if ((err = concat_dev_erase(subdev, erase)))
291
                {
292
                        if(err == -EINVAL)      /* sanity check: must never happen since */
293
                                BUG();                  /* block alignment has been checked above */
294
                        break;
295
                }
296
                /*
297
                 * erase->addr specifies the offset of the area to be
298
                 * erased *within the current subdevice*. It can be
299
                 * non-zero only the first time through this loop, i.e.
300
                 * for the first subdevice where blocks need to be erased.
301
                 * All the following erases must begin at the start of the
302
                 * current subdevice, i.e. at offset zero.
303
                 */
304
                erase->addr = 0;
305
        }
306
        kfree(erase);
307
        if (err)
308
                return err;
309
 
310
        instr->state = MTD_ERASE_DONE;
311
        if (instr->callback)
312
                instr->callback(instr);
313
        return 0;
314
}
315
 
316
static int concat_lock (struct mtd_info *mtd, loff_t ofs, size_t len)
317
{
318
        struct mtd_concat *concat = CONCAT(mtd);
319
        int i, err = -EINVAL;
320
 
321
        if ((len + ofs) > mtd->size)
322
                return -EINVAL;
323
 
324
        for(i = 0; i < concat->num_subdev; i++)
325
        {
326
                struct mtd_info *subdev = concat->subdev[i];
327
                size_t size;
328
 
329
                if (ofs >= subdev->size)
330
                {
331
                        size  = 0;
332
                        ofs -= subdev->size;
333
                }
334
                else
335
                {
336
                        if (ofs + len > subdev->size)
337
                                size = subdev->size - ofs;
338
                        else
339
                                size = len;
340
 
341
                        err = subdev->lock(subdev, ofs, size);
342
 
343
                        if(err)
344
                                break;
345
 
346
                        len -= size;
347
                        if(len == 0)
348
                                break;
349
 
350
                        err = -EINVAL;
351
                        ofs = 0;
352
                }
353
        }
354
        return err;
355
}
356
 
357
static int concat_unlock (struct mtd_info *mtd, loff_t ofs, size_t len)
358
{
359
        struct mtd_concat *concat = CONCAT(mtd);
360
        int i, err = 0;
361
 
362
        if ((len + ofs) > mtd->size)
363
                return -EINVAL;
364
 
365
        for(i = 0; i < concat->num_subdev; i++)
366
        {
367
                struct mtd_info *subdev = concat->subdev[i];
368
                size_t size;
369
 
370
                if (ofs >= subdev->size)
371
                {
372
                        size  = 0;
373
                        ofs -= subdev->size;
374
                }
375
                else
376
                {
377
                        if (ofs + len > subdev->size)
378
                                size = subdev->size - ofs;
379
                        else
380
                                size = len;
381
 
382
                        err = subdev->unlock(subdev, ofs, size);
383
 
384
                        if(err)
385
                                break;
386
 
387
                        len -= size;
388
                        if(len == 0)
389
                                break;
390
 
391
                        err = -EINVAL;
392
                        ofs = 0;
393
                }
394
        }
395
        return err;
396
}
397
 
398
static void concat_sync(struct mtd_info *mtd)
399
{
400
        struct mtd_concat *concat = CONCAT(mtd);
401
        int i;
402
 
403
        for(i = 0; i < concat->num_subdev; i++)
404
        {
405
                struct mtd_info *subdev = concat->subdev[i];
406
                subdev->sync(subdev);
407
        }
408
}
409
 
410
static int concat_suspend(struct mtd_info *mtd)
411
{
412
        struct mtd_concat *concat = CONCAT(mtd);
413
        int i, rc = 0;
414
 
415
        for(i = 0; i < concat->num_subdev; i++)
416
        {
417
                struct mtd_info *subdev = concat->subdev[i];
418
                if((rc = subdev->suspend(subdev)) < 0)
419
                        return rc;
420
        }
421
        return rc;
422
}
423
 
424
static void concat_resume(struct mtd_info *mtd)
425
{
426
        struct mtd_concat *concat = CONCAT(mtd);
427
        int i;
428
 
429
        for(i = 0; i < concat->num_subdev; i++)
430
        {
431
                struct mtd_info *subdev = concat->subdev[i];
432
                subdev->resume(subdev);
433
        }
434
}
435
 
436
/*
437
 * This function constructs a virtual MTD device by concatenating
438
 * num_devs MTD devices. A pointer to the new device object is
439
 * stored to *new_dev upon success. This function does _not_
440
 * register any devices: this is the caller's responsibility.
441
 */
442
struct mtd_info *mtd_concat_create(
443
        struct mtd_info *subdev[],      /* subdevices to concatenate */
444
        int num_devs,                           /* number of subdevices      */
445
        char *name)                                     /* name for the new device   */
446
{
447
        int i;
448
        size_t size;
449
        struct mtd_concat *concat;
450
        u_int32_t max_erasesize, curr_erasesize;
451
        int num_erase_region;
452
 
453
        printk(KERN_NOTICE "Concatenating MTD devices:\n");
454
        for(i = 0; i < num_devs; i++)
455
                printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
456
        printk(KERN_NOTICE "into device \"%s\"\n", name);
457
 
458
        /* allocate the device structure */
459
        size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
460
        concat = kmalloc (size, GFP_KERNEL);
461
        if(!concat)
462
        {
463
                printk ("memory allocation error while creating concatenated device \"%s\"\n",
464
                                name);
465
                        return NULL;
466
        }
467
        memset(concat, 0, size);
468
        concat->subdev = (struct mtd_info **)(concat + 1);
469
 
470
        /*
471
         * Set up the new "super" device's MTD object structure, check for
472
         * incompatibilites between the subdevices.
473
         */
474
        concat->mtd.type      = subdev[0]->type;
475
        concat->mtd.flags     = subdev[0]->flags;
476
        concat->mtd.size      = subdev[0]->size;
477
        concat->mtd.erasesize = subdev[0]->erasesize;
478
        concat->mtd.oobblock  = subdev[0]->oobblock;
479
        concat->mtd.oobsize   = subdev[0]->oobsize;
480
        concat->mtd.ecctype   = subdev[0]->ecctype;
481
        concat->mtd.eccsize   = subdev[0]->eccsize;
482
 
483
        concat->subdev[0]   = subdev[0];
484
 
485
        for(i = 1; i < num_devs; i++)
486
        {
487
                if(concat->mtd.type != subdev[i]->type)
488
                {
489
                        kfree(concat);
490
                        printk ("Incompatible device type on \"%s\"\n", subdev[i]->name);
491
                        return NULL;
492
                }
493
                if(concat->mtd.flags != subdev[i]->flags)
494
                {       /*
495
                         * Expect all flags except MTD_WRITEABLE to be equal on
496
                         * all subdevices.
497
                         */
498
                        if((concat->mtd.flags ^ subdev[i]->flags) & ~MTD_WRITEABLE)
499
                        {
500
                                kfree(concat);
501
                                printk ("Incompatible device flags on \"%s\"\n", subdev[i]->name);
502
                                return NULL;
503
                        }
504
                        else    /* if writeable attribute differs, make super device writeable */
505
                                concat->mtd.flags |= subdev[i]->flags & MTD_WRITEABLE;
506
                }
507
                concat->mtd.size += subdev[i]->size;
508
                if(concat->mtd.oobblock != subdev[i]->oobblock ||
509
                   concat->mtd.oobsize  != subdev[i]->oobsize  ||
510
                   concat->mtd.ecctype  != subdev[i]->ecctype  ||
511
                   concat->mtd.eccsize  != subdev[i]->eccsize)
512
                {
513
                        kfree(concat);
514
                        printk ("Incompatible OOB or ECC data on \"%s\"\n", subdev[i]->name);
515
                        return NULL;
516
                }
517
                concat->subdev[i] = subdev[i];
518
 
519
        }
520
 
521
        concat->num_subdev  = num_devs;
522
        concat->mtd.name    = name;
523
 
524
        /*
525
         * NOTE: for now, we do not provide any readv()/writev() methods
526
         *       because they are messy to implement and they are not
527
         *       used to a great extent anyway.
528
         */
529
        concat->mtd.erase   = concat_erase;
530
        concat->mtd.read    = concat_read;
531
        concat->mtd.write   = concat_write;
532
        concat->mtd.sync    = concat_sync;
533
        concat->mtd.lock    = concat_lock;
534
        concat->mtd.unlock  = concat_unlock;
535
        concat->mtd.suspend = concat_suspend;
536
        concat->mtd.resume  = concat_resume;
537
 
538
 
539
        /*
540
         * Combine the erase block size info of the subdevices:
541
         *
542
         * first, walk the map of the new device and see how
543
         * many changes in erase size we have
544
         */
545
        max_erasesize = curr_erasesize = subdev[0]->erasesize;
546
        num_erase_region = 1;
547
        for(i = 0; i < num_devs; i++)
548
        {
549
                if(subdev[i]->numeraseregions == 0)
550
                {       /* current subdevice has uniform erase size */
551
                        if(subdev[i]->erasesize != curr_erasesize)
552
                        {       /* if it differs from the last subdevice's erase size, count it */
553
                                ++num_erase_region;
554
                                curr_erasesize = subdev[i]->erasesize;
555
                                if(curr_erasesize > max_erasesize)
556
                                        max_erasesize = curr_erasesize;
557
                        }
558
                }
559
                else
560
                {       /* current subdevice has variable erase size */
561
                        int j;
562
                        for(j = 0; j < subdev[i]->numeraseregions; j++)
563
                        {       /* walk the list of erase regions, count any changes */
564
                                if(subdev[i]->eraseregions[j].erasesize != curr_erasesize)
565
                                {
566
                                        ++num_erase_region;
567
                                        curr_erasesize = subdev[i]->eraseregions[j].erasesize;
568
                                        if(curr_erasesize > max_erasesize)
569
                                                max_erasesize = curr_erasesize;
570
                                }
571
                        }
572
                }
573
        }
574
 
575
        if(num_erase_region == 1)
576
        {       /*
577
                 * All subdevices have the same uniform erase size.
578
                 * This is easy:
579
                 */
580
                concat->mtd.erasesize = curr_erasesize;
581
                concat->mtd.numeraseregions = 0;
582
        }
583
        else
584
        {       /*
585
                 * erase block size varies across the subdevices: allocate
586
                 * space to store the data describing the variable erase regions
587
                 */
588
                struct mtd_erase_region_info *erase_region_p;
589
                u_int32_t begin, position;
590
 
591
                concat->mtd.erasesize = max_erasesize;
592
                concat->mtd.numeraseregions = num_erase_region;
593
                concat->mtd.eraseregions = erase_region_p = kmalloc (
594
                     num_erase_region * sizeof(struct mtd_erase_region_info), GFP_KERNEL);
595
                if(!erase_region_p)
596
                {
597
                        kfree(concat);
598
                        printk ("memory allocation error while creating erase region list"
599
                                " for device \"%s\"\n", name);
600
                        return NULL;
601
                }
602
 
603
                /*
604
                 * walk the map of the new device once more and fill in
605
                 * in erase region info:
606
                 */
607
                curr_erasesize = subdev[0]->erasesize;
608
                begin = position = 0;
609
                for(i = 0; i < num_devs; i++)
610
                {
611
                        if(subdev[i]->numeraseregions == 0)
612
                        {       /* current subdevice has uniform erase size */
613
                                if(subdev[i]->erasesize != curr_erasesize)
614
                                {       /*
615
                                         *  fill in an mtd_erase_region_info structure for the area
616
                                         *  we have walked so far:
617
                                         */
618
                                        erase_region_p->offset    = begin;
619
                                        erase_region_p->erasesize = curr_erasesize;
620
                                        erase_region_p->numblocks = (position - begin) / curr_erasesize;
621
                                        begin = position;
622
 
623
                                        curr_erasesize = subdev[i]->erasesize;
624
                                        ++erase_region_p;
625
                                }
626
                                position += subdev[i]->size;
627
                        }
628
                        else
629
                        {       /* current subdevice has variable erase size */
630
                                int j;
631
                                for(j = 0; j < subdev[i]->numeraseregions; j++)
632
                                {       /* walk the list of erase regions, count any changes */
633
                                        if(subdev[i]->eraseregions[j].erasesize != curr_erasesize)
634
                                        {
635
                                                erase_region_p->offset    = begin;
636
                                                erase_region_p->erasesize = curr_erasesize;
637
                                                erase_region_p->numblocks = (position - begin) / curr_erasesize;
638
                                                begin = position;
639
 
640
                                                curr_erasesize = subdev[i]->eraseregions[j].erasesize;
641
                                                ++erase_region_p;
642
                                        }
643
                                        position += subdev[i]->eraseregions[j].numblocks * curr_erasesize;
644
                                }
645
                        }
646
                }
647
                /* Now write the final entry */
648
                erase_region_p->offset    = begin;
649
                erase_region_p->erasesize = curr_erasesize;
650
                erase_region_p->numblocks = (position - begin) / curr_erasesize;
651
        }
652
 
653
        return &concat->mtd;
654
}
655
 
656
/*
657
 * This function destroys an MTD object obtained from concat_mtd_devs()
658
 */
659
 
660
void mtd_concat_destroy(struct mtd_info *mtd)
661
{
662
        struct mtd_concat *concat = CONCAT(mtd);
663
        if(concat->mtd.numeraseregions)
664
                kfree(concat->mtd.eraseregions);
665
        kfree(concat);
666
}
667
 
668
 
669
EXPORT_SYMBOL(mtd_concat_create);
670
EXPORT_SYMBOL(mtd_concat_destroy);
671
 
672
 
673
MODULE_LICENSE("GPL");
674
MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
675
MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.