OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [uclinux/] [uClinux-2.0.x/] [drivers/] [block/] [md.c] - Blame information for rev 1782

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 199 simons
 
2
/*
3
   md.c : Multiple Devices driver for Linux
4
          Copyright (C) 1994-96 Marc ZYNGIER
5
          <zyngier@ufr-info-p7.ibp.fr> or
6
          <maz@gloups.fdn.fr>
7
 
8
   A lot of inspiration came from hd.c ...
9
 
10
   kerneld support by Boris Tobotras <boris@xtalk.msk.su>
11
 
12
   RAID-1/RAID-5 extensions by:
13
        Ingo Molnar, Miguel de Icaza, Gadi Oxman
14
 
15
   This program is free software; you can redistribute it and/or modify
16
   it under the terms of the GNU General Public License as published by
17
   the Free Software Foundation; either version 2, or (at your option)
18
   any later version.
19
 
20
   You should have received a copy of the GNU General Public License
21
   (for example /usr/src/linux/COPYING); if not, write to the Free
22
   Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23
*/
24
 
25
#include <linux/config.h>
26
#include <linux/module.h>
27
#include <linux/version.h>
28
#include <linux/malloc.h>
29
#include <linux/mm.h>
30
#include <linux/md.h>
31
#include <linux/hdreg.h>
32
#include <linux/stat.h>
33
#include <linux/fs.h>
34
#include <linux/proc_fs.h>
35
#include <linux/blkdev.h>
36
#include <linux/genhd.h>
37
#include <linux/smp_lock.h>
38
#ifdef CONFIG_KERNELD
39
#include <linux/kerneld.h>
40
#endif
41
#include <linux/errno.h>
42
/*
43
 * For kernel_thread()
44
 */
45
#define __KERNEL_SYSCALLS__
46
#include <linux/unistd.h>
47
 
48
#define MAJOR_NR MD_MAJOR
49
#define MD_DRIVER
50
 
51
#include <linux/blk.h>
52
#include <asm/bitops.h>
53
#include <asm/atomic.h>
54
 
55
static struct hd_struct md_hd_struct[MAX_MD_DEV];
56
static int md_blocksizes[MAX_MD_DEV];
57
static struct md_thread md_threads[MAX_MD_THREADS];
58
 
59
int md_size[MAX_MD_DEV]={0, };
60
 
61
static void md_geninit (struct gendisk *);
62
 
63
static struct gendisk md_gendisk=
64
{
65
  MD_MAJOR,
66
  "md",
67
  0,
68
  1,
69
  MAX_MD_DEV,
70
  md_geninit,
71
  md_hd_struct,
72
  md_size,
73
  MAX_MD_DEV,
74
  NULL,
75
  NULL
76
};
77
 
78
static struct md_personality *pers[MAX_PERSONALITY]={NULL, };
79
 
80
struct md_dev md_dev[MAX_MD_DEV];
81
 
82
static struct gendisk *find_gendisk (kdev_t dev)
83
{
84
  struct gendisk *tmp=gendisk_head;
85
 
86
  while (tmp != NULL)
87
  {
88
    if (tmp->major==MAJOR(dev))
89
      return (tmp);
90
 
91
    tmp=tmp->next;
92
  }
93
 
94
  return (NULL);
95
}
96
 
97
 
98
char *partition_name (kdev_t dev)
99
{
100
  static char name[40];         /* This should be long
101
                                   enough for a device name ! */
102
  struct gendisk *hd = find_gendisk (dev);
103
 
104
  if (!hd)
105
  {
106
    sprintf (name, "[dev %s]", kdevname(dev));
107
    return (name);
108
  }
109
 
110
  return disk_name (hd, MINOR(dev), name);  /* routine in genhd.c */
111
}
112
 
113
 
114
static void set_ra (void)
115
{
116
  int i, j, minra=INT_MAX;
117
 
118
  for (i=0; i<MAX_MD_DEV; i++)
119
  {
120
    if (!md_dev[i].pers)
121
      continue;
122
 
123
    for (j=0; j<md_dev[i].nb_dev; j++)
124
      if (read_ahead[MAJOR(md_dev[i].devices[j].dev)]<minra)
125
        minra=read_ahead[MAJOR(md_dev[i].devices[j].dev)];
126
  }
127
 
128
  read_ahead[MD_MAJOR]=minra;
129
}
130
 
131
static int legacy_raid_sb (int minor, int pnum)
132
{
133
        int i, factor;
134
 
135
        factor = 1 << FACTOR_SHIFT(FACTOR((md_dev+minor)));
136
 
137
        /*****
138
         * do size and offset calculations.
139
         */
140
        for (i=0; i<md_dev[minor].nb_dev; i++) {
141
                md_dev[minor].devices[i].size &= ~(factor - 1);
142
                md_size[minor] += md_dev[minor].devices[i].size;
143
                md_dev[minor].devices[i].offset=i ? (md_dev[minor].devices[i-1].offset +
144
                                                        md_dev[minor].devices[i-1].size) : 0;
145
        }
146
        return 0;
147
}
148
 
149
static void free_sb (struct md_dev *mddev)
150
{
151
        int i;
152
        struct real_dev *realdev;
153
 
154
        if (mddev->sb) {
155
                free_page((unsigned long) mddev->sb);
156
                mddev->sb = NULL;
157
        }
158
        for (i = 0; i <mddev->nb_dev; i++) {
159
                realdev = mddev->devices + i;
160
                if (realdev->sb) {
161
                        free_page((unsigned long) realdev->sb);
162
                        realdev->sb = NULL;
163
                }
164
        }
165
}
166
 
167
static int analyze_sb (int minor, int pnum)
168
{
169
        int i;
170
        struct md_dev *mddev = md_dev + minor;
171
        struct buffer_head *bh;
172
        kdev_t dev;
173
        struct real_dev *realdev;
174
        u32 sb_offset, device_size;
175
        md_superblock_t *sb = NULL;
176
 
177
        /*
178
         * raid-0 and linear don't use a raid superblock
179
         */
180
        if (pnum == RAID0 >> PERSONALITY_SHIFT || pnum == LINEAR >> PERSONALITY_SHIFT)
181
                return legacy_raid_sb(minor, pnum);
182
 
183
        /*
184
         * Verify the raid superblock on each real device
185
         */
186
        for (i = 0; i < mddev->nb_dev; i++) {
187
                realdev = mddev->devices + i;
188
                dev = realdev->dev;
189
                device_size = blk_size[MAJOR(dev)][MINOR(dev)];
190
                realdev->sb_offset = sb_offset = MD_NEW_SIZE_BLOCKS(device_size);
191
                set_blocksize(dev, MD_SB_BYTES);
192
                bh = bread(dev, sb_offset / MD_SB_BLOCKS, MD_SB_BYTES);
193
                if (bh) {
194
                        sb = (md_superblock_t *) bh->b_data;
195
                        if (sb->md_magic != MD_SB_MAGIC) {
196
                                printk("md: %s: invalid raid superblock magic (%x) on block %u\n", kdevname(dev), sb->md_magic, sb_offset);
197
                                goto abort;
198
                        }
199
                        if (!mddev->sb) {
200
                                mddev->sb = (md_superblock_t *) __get_free_page(GFP_KERNEL);
201
                                if (!mddev->sb)
202
                                        goto abort;
203
                                memcpy(mddev->sb, sb, MD_SB_BYTES);
204
                        }
205
                        realdev->sb = (md_superblock_t *) __get_free_page(GFP_KERNEL);
206
                        if (!realdev->sb)
207
                                goto abort;
208
                        memcpy(realdev->sb, bh->b_data, MD_SB_BYTES);
209
 
210
                        if (memcmp(mddev->sb, sb, MD_SB_GENERIC_CONSTANT_WORDS * 4)) {
211
                                printk(KERN_ERR "md: superblock inconsistenty -- run ckraid\n");
212
                                goto abort;
213
                        }
214
                        /*
215
                         * Find the newest superblock version
216
                         */
217
                        if (sb->utime != mddev->sb->utime) {
218
                                printk(KERN_ERR "md: superblock update time inconsistenty -- using the most recent one\n");
219
                                if (sb->utime > mddev->sb->utime)
220
                                        memcpy(mddev->sb, sb, MD_SB_BYTES);
221
                        }
222
                        realdev->size = sb->size;
223
                } else
224
                        printk(KERN_ERR "md: disabled device %s\n", kdevname(dev));
225
        }
226
        if (!mddev->sb) {
227
                printk(KERN_ERR "md: couldn't access raid array %s\n", kdevname(MKDEV(MD_MAJOR, minor)));
228
                goto abort;
229
        }
230
        sb = mddev->sb;
231
 
232
        /*
233
         * Check if we can support this raid array
234
         */
235
        if (sb->major_version != MD_MAJOR_VERSION || sb->minor_version > MD_MINOR_VERSION) {
236
                printk("md: %s: unsupported raid array version %d.%d.%d\n", kdevname(MKDEV(MD_MAJOR, minor)),
237
                sb->major_version, sb->minor_version, sb->patch_version);
238
                goto abort;
239
        }
240
        if (sb->state != (1 << MD_SB_CLEAN)) {
241
                printk(KERN_ERR "md: %s: raid array is not clean -- run ckraid\n", kdevname(MKDEV(MD_MAJOR, minor)));
242
                goto abort;
243
        }
244
        switch (sb->level) {
245
                case 1:
246
                        md_size[minor] = sb->size;
247
                        break;
248
                case 4:
249
                case 5:
250
                        md_size[minor] = sb->size * (sb->raid_disks - 1);
251
                        break;
252
                default:
253
                        printk(KERN_ERR "md: %s: unsupported raid level %d\n", kdevname(MKDEV(MD_MAJOR, minor)), sb->level);
254
                        goto abort;
255
        }
256
        return 0;
257
abort:
258
        free_sb(mddev);
259
        return 1;
260
}
261
 
262
int md_update_sb(int minor)
263
{
264
        struct md_dev *mddev = md_dev + minor;
265
        struct buffer_head *bh;
266
        md_superblock_t *sb = mddev->sb;
267
        struct real_dev *realdev;
268
        kdev_t dev;
269
        int i;
270
        u32 sb_offset;
271
 
272
        sb->utime = CURRENT_TIME;
273
        for (i = 0; i < mddev->nb_dev; i++) {
274
                realdev = mddev->devices + i;
275
                if (!realdev->sb)
276
                        continue;
277
                dev = realdev->dev;
278
                sb_offset = realdev->sb_offset;
279
                set_blocksize(dev, MD_SB_BYTES);
280
                printk("md: updating raid superblock on device %s, sb_offset == %u\n", kdevname(dev), sb_offset);
281
                bh = getblk(dev, sb_offset / MD_SB_BLOCKS, MD_SB_BYTES);
282
                if (bh) {
283
                        sb = (md_superblock_t *) bh->b_data;
284
                        memcpy(sb, mddev->sb, MD_SB_BYTES);
285
                        memcpy(&sb->descriptor, sb->disks + realdev->sb->descriptor.number, MD_SB_DESCRIPTOR_WORDS * 4);
286
                        mark_buffer_uptodate(bh, 1);
287
                        mark_buffer_dirty(bh, 1);
288
                        ll_rw_block(WRITE, 1, &bh);
289
                        wait_on_buffer(bh);
290
                        bforget(bh);
291
                        fsync_dev(dev);
292
                        invalidate_buffers(dev);
293
                } else
294
                        printk(KERN_ERR "md: getblk failed for device %s\n", kdevname(dev));
295
        }
296
        return 0;
297
}
298
 
299
static int do_md_run (int minor, int repart)
300
{
301
  int pnum, i, min, factor, current_ra, err;
302
 
303
  if (!md_dev[minor].nb_dev)
304
    return -EINVAL;
305
 
306
  if (md_dev[minor].pers)
307
    return -EBUSY;
308
 
309
  md_dev[minor].repartition=repart;
310
 
311
  if ((pnum=PERSONALITY(&md_dev[minor]) >> (PERSONALITY_SHIFT))
312
      >= MAX_PERSONALITY)
313
    return -EINVAL;
314
 
315
  /* Only RAID-1 and RAID-5 can have MD devices as underlying devices */
316
  if (pnum != (RAID1 >> PERSONALITY_SHIFT) && pnum != (RAID5 >> PERSONALITY_SHIFT)){
317
          for (i = 0; i < md_dev [minor].nb_dev; i++)
318
                  if (MAJOR (md_dev [minor].devices [i].dev) == MD_MAJOR)
319
                          return -EINVAL;
320
  }
321
  if (!pers[pnum])
322
  {
323
#ifdef CONFIG_KERNELD
324
    char module_name[80];
325
    sprintf (module_name, "md-personality-%d", pnum);
326
    request_module (module_name);
327
    if (!pers[pnum])
328
#endif
329
      return -EINVAL;
330
  }
331
 
332
  factor = min = 1 << FACTOR_SHIFT(FACTOR((md_dev+minor)));
333
 
334
  for (i=0; i<md_dev[minor].nb_dev; i++)
335
    if (md_dev[minor].devices[i].size<min)
336
    {
337
      printk ("Dev %s smaller than %dk, cannot shrink\n",
338
              partition_name (md_dev[minor].devices[i].dev), min);
339
      return -EINVAL;
340
    }
341
 
342
  for (i=0; i<md_dev[minor].nb_dev; i++) {
343
    fsync_dev(md_dev[minor].devices[i].dev);
344
    invalidate_buffers(md_dev[minor].devices[i].dev);
345
  }
346
 
347
  /* Resize devices according to the factor. It is used to align
348
     partitions size on a given chunk size. */
349
  md_size[minor]=0;
350
 
351
  /*
352
   * Analyze the raid superblock
353
   */
354
  if (analyze_sb(minor, pnum))
355
    return -EINVAL;
356
 
357
  md_dev[minor].pers=pers[pnum];
358
 
359
  if ((err=md_dev[minor].pers->run (minor, md_dev+minor)))
360
  {
361
    md_dev[minor].pers=NULL;
362
    free_sb(md_dev + minor);
363
    return (err);
364
  }
365
 
366
  if (pnum != RAID0 >> PERSONALITY_SHIFT && pnum != LINEAR >> PERSONALITY_SHIFT)
367
  {
368
    md_dev[minor].sb->state &= ~(1 << MD_SB_CLEAN);
369
    md_update_sb(minor);
370
  }
371
 
372
  /* FIXME : We assume here we have blocks
373
     that are twice as large as sectors.
374
     THIS MAY NOT BE TRUE !!! */
375
  md_hd_struct[minor].start_sect=0;
376
  md_hd_struct[minor].nr_sects=md_size[minor]<<1;
377
 
378
  /* It would be better to have a per-md-dev read_ahead. Currently,
379
     we only use the smallest read_ahead among md-attached devices */
380
 
381
  current_ra=read_ahead[MD_MAJOR];
382
 
383
  for (i=0; i<md_dev[minor].nb_dev; i++)
384
    if (current_ra>read_ahead[MAJOR(md_dev[minor].devices[i].dev)])
385
      current_ra=read_ahead[MAJOR(md_dev[minor].devices[i].dev)];
386
 
387
  read_ahead[MD_MAJOR]=current_ra;
388
 
389
  return (0);
390
}
391
 
392
 
393
static int do_md_stop (int minor, struct inode *inode)
394
{
395
  int i;
396
 
397
  if (inode->i_count>1 || md_dev[minor].busy>1) /* ioctl : one open channel */
398
  {
399
    printk ("STOP_MD md%x failed : i_count=%ld, busy=%d\n", minor, inode->i_count, md_dev[minor].busy);
400
    return -EBUSY;
401
  }
402
 
403
  if (md_dev[minor].pers)
404
  {
405
    /*  The device won't exist anymore -> flush it now */
406
    fsync_dev (inode->i_rdev);
407
    invalidate_buffers (inode->i_rdev);
408
    if (md_dev[minor].sb)
409
    {
410
      md_dev[minor].sb->state |= 1 << MD_SB_CLEAN;
411
      md_update_sb(minor);
412
    }
413
    md_dev[minor].pers->stop (minor, md_dev+minor);
414
  }
415
 
416
  /* Remove locks. */
417
  if (md_dev[minor].sb)
418
    free_sb(md_dev + minor);
419
  for (i=0; i<md_dev[minor].nb_dev; i++)
420
    clear_inode (md_dev[minor].devices[i].inode);
421
 
422
  md_dev[minor].nb_dev=md_size[minor]=0;
423
  md_hd_struct[minor].nr_sects=0;
424
  md_dev[minor].pers=NULL;
425
 
426
  set_ra ();                    /* calculate new read_ahead */
427
 
428
  return (0);
429
}
430
 
431
 
432
static int do_md_add (int minor, kdev_t dev)
433
{
434
  int i;
435
 
436
  if (md_dev[minor].nb_dev==MAX_REAL)
437
    return -EINVAL;
438
 
439
  if (!fs_may_mount (dev) || md_dev[minor].pers)
440
    return -EBUSY;
441
 
442
  i=md_dev[minor].nb_dev++;
443
  md_dev[minor].devices[i].dev=dev;
444
 
445
  /* Lock the device by inserting a dummy inode. This doesn't
446
     smell very good, but I need to be consistent with the
447
     mount stuff, specially with fs_may_mount. If someone have
448
     a better idea, please help ! */
449
 
450
  md_dev[minor].devices[i].inode=get_empty_inode ();
451
  md_dev[minor].devices[i].inode->i_dev=dev; /* don't care about
452
                                                other fields */
453
  insert_inode_hash (md_dev[minor].devices[i].inode);
454
 
455
  /* Sizes are now rounded at run time */
456
 
457
/*  md_dev[minor].devices[i].size=gen_real->sizes[MINOR(dev)]; HACKHACK*/
458
 
459
  if (blk_size[MAJOR(dev)][MINOR(dev)] == 0) {
460
        printk("md_add(): zero device size, huh, bailing out.\n");
461
  }
462
 
463
  md_dev[minor].devices[i].size=blk_size[MAJOR(dev)][MINOR(dev)];
464
 
465
  printk ("REGISTER_DEV %s to md%x done\n", partition_name(dev), minor);
466
  return (0);
467
}
468
 
469
 
470
static int md_ioctl (struct inode *inode, struct file *file,
471
                     unsigned int cmd, unsigned long arg)
472
{
473
  int minor, err;
474
  struct hd_geometry *loc = (struct hd_geometry *) arg;
475
 
476
  if (!suser())
477
    return -EACCES;
478
 
479
  if (((minor=MINOR(inode->i_rdev)) & 0x80) &&
480
      (minor & 0x7f) < MAX_PERSONALITY &&
481
      pers[minor & 0x7f] &&
482
      pers[minor & 0x7f]->ioctl)
483
    return (pers[minor & 0x7f]->ioctl (inode, file, cmd, arg));
484
 
485
  if (minor >= MAX_MD_DEV)
486
    return -EINVAL;
487
 
488
  switch (cmd)
489
  {
490
    case REGISTER_DEV:
491
      return do_md_add (minor, to_kdev_t ((dev_t) arg));
492
 
493
    case START_MD:
494
      return do_md_run (minor, (int) arg);
495
 
496
    case STOP_MD:
497
      return do_md_stop (minor, inode);
498
 
499
    case BLKGETSIZE:   /* Return device size */
500
    if  (!arg)  return -EINVAL;
501
    err=verify_area (VERIFY_WRITE, (long *) arg, sizeof(long));
502
    if (err)
503
      return err;
504
    put_user (md_hd_struct[MINOR(inode->i_rdev)].nr_sects, (long *) arg);
505
    break;
506
 
507
    case BLKFLSBUF:
508
    fsync_dev (inode->i_rdev);
509
    invalidate_buffers (inode->i_rdev);
510
    break;
511
 
512
    case BLKRASET:
513
    if (arg > 0xff)
514
      return -EINVAL;
515
    read_ahead[MAJOR(inode->i_rdev)] = arg;
516
    return 0;
517
 
518
    case BLKRAGET:
519
    if  (!arg)  return -EINVAL;
520
    err=verify_area (VERIFY_WRITE, (long *) arg, sizeof(long));
521
    if (err)
522
      return err;
523
    put_user (read_ahead[MAJOR(inode->i_rdev)], (long *) arg);
524
    break;
525
 
526
    /* We have a problem here : there is no easy way to give a CHS
527
       virtual geometry. We currently pretend that we have a 2 heads
528
       4 sectors (with a BIG number of cylinders...). This drives dosfs
529
       just mad... ;-) */
530
 
531
    case HDIO_GETGEO:
532
    if (!loc)  return -EINVAL;
533
    err = verify_area(VERIFY_WRITE, loc, sizeof(*loc));
534
    if (err)
535
      return err;
536
    put_user (2, (char *) &loc->heads);
537
    put_user (4, (char *) &loc->sectors);
538
    put_user (md_hd_struct[minor].nr_sects/8, (short *) &loc->cylinders);
539
    put_user (md_hd_struct[MINOR(inode->i_rdev)].start_sect,
540
                (long *) &loc->start);
541
    break;
542
 
543
    RO_IOCTLS(inode->i_rdev,arg);
544
 
545
    default:
546
    return -EINVAL;
547
  }
548
 
549
  return (0);
550
}
551
 
552
 
553
static int md_open (struct inode *inode, struct file *file)
554
{
555
  int minor=MINOR(inode->i_rdev);
556
 
557
  md_dev[minor].busy++;
558
  return (0);                    /* Always succeed */
559
}
560
 
561
 
562
static void md_release (struct inode *inode, struct file *file)
563
{
564
  int minor=MINOR(inode->i_rdev);
565
 
566
  sync_dev (inode->i_rdev);
567
  md_dev[minor].busy--;
568
}
569
 
570
 
571
static int md_read (struct inode *inode, struct file *file,
572
                    char *buf, int count)
573
{
574
  int minor=MINOR(inode->i_rdev);
575
 
576
  if (!md_dev[minor].pers)      /* Check if device is being run */
577
    return -ENXIO;
578
 
579
  return block_read (inode, file, buf, count);
580
}
581
 
582
static int md_write (struct inode *inode, struct file *file,
583
                     const char *buf, int count)
584
{
585
  int minor=MINOR(inode->i_rdev);
586
 
587
  if (!md_dev[minor].pers)      /* Check if device is being run */
588
    return -ENXIO;
589
 
590
  return block_write (inode, file, buf, count);
591
}
592
 
593
static struct file_operations md_fops=
594
{
595
  NULL,
596
  md_read,
597
  md_write,
598
  NULL,
599
  NULL,
600
  md_ioctl,
601
  NULL,
602
  md_open,
603
  md_release,
604
  block_fsync
605
};
606
 
607
int md_map (int minor, kdev_t *rdev, unsigned long *rsector, unsigned long size)
608
{
609
  if ((unsigned int) minor >= MAX_MD_DEV)
610
  {
611
    printk ("Bad md device %d\n", minor);
612
    return (-1);
613
  }
614
 
615
  if (!md_dev[minor].pers)
616
  {
617
    printk ("Oops ! md%d not running, giving up !\n", minor);
618
    return (-1);
619
  }
620
 
621
  return (md_dev[minor].pers->map(md_dev+minor, rdev, rsector, size));
622
}
623
 
624
int md_make_request (int minor, int rw, struct buffer_head * bh)
625
{
626
        if (md_dev [minor].pers->make_request) {
627
                if (buffer_locked(bh))
628
                        return 0;
629
                if (rw == WRITE || rw == WRITEA) {
630
                        if (!buffer_dirty(bh))
631
                                return 0;
632
                        set_bit(BH_Lock, &bh->b_state);
633
                }
634
                if (rw == READ || rw == READA) {
635
                        if (buffer_uptodate(bh))
636
                                return 0;
637
                        set_bit (BH_Lock, &bh->b_state);
638
                }
639
                return (md_dev[minor].pers->make_request(md_dev+minor, rw, bh));
640
        } else {
641
                make_request (MAJOR(bh->b_rdev), rw, bh);
642
                return 0;
643
        }
644
}
645
 
646
static void do_md_request (void)
647
{
648
  printk ("Got md request, not good...");
649
  return;
650
}
651
 
652
/*
653
 * We run MAX_MD_THREADS from md_init() and arbitrate them in run time.
654
 * This is not so elegant, but how can we use kernel_thread() from within
655
 * loadable modules?
656
 */
657
struct md_thread *md_register_thread (void (*run) (void *), void *data)
658
{
659
        int i;
660
        for (i = 0; i < MAX_MD_THREADS; i++) {
661
                if (md_threads[i].run == NULL) {
662
                        md_threads[i].run = run;
663
                        md_threads[i].data = data;
664
                        return md_threads + i;
665
                }
666
        }
667
        return NULL;
668
}
669
 
670
 
671
void md_unregister_thread (struct md_thread *thread)
672
{
673
        thread->run = NULL;
674
        thread->data = NULL;
675
        thread->flags = 0;
676
}
677
 
678
void md_wakeup_thread(struct md_thread *thread)
679
{
680
        set_bit(THREAD_WAKEUP, &thread->flags);
681
        wake_up(&thread->wqueue);
682
}
683
 
684
struct buffer_head *efind_buffer(kdev_t dev, int block, int size);
685
 
686
static struct symbol_table md_symbol_table=
687
{
688
#include <linux/symtab_begin.h>
689
 
690
  X(md_size),
691
  X(register_md_personality),
692
  X(unregister_md_personality),
693
  X(partition_name),
694
  X(md_dev),
695
  X(md_error),
696
  X(md_register_thread),
697
  X(md_unregister_thread),
698
  X(md_update_sb),
699
  X(md_map),
700
  X(md_wakeup_thread),
701
  X(efind_buffer),
702
 
703
#include <linux/symtab_end.h>
704
};
705
 
706
static void md_geninit (struct gendisk *gdisk)
707
{
708
  int i;
709
 
710
  for(i=0;i<MAX_MD_DEV;i++)
711
  {
712
    md_blocksizes[i] = 1024;
713
    md_gendisk.part[i].start_sect=-1; /* avoid partition check */
714
    md_gendisk.part[i].nr_sects=0;
715
    md_dev[i].pers=NULL;
716
  }
717
 
718
  blksize_size[MAJOR_NR] = md_blocksizes;
719
  register_symtab (&md_symbol_table);
720
 
721
  proc_register(&proc_root,
722
                &(struct proc_dir_entry)
723
              {
724
                PROC_MD, 6, "mdstat",
725
                S_IFREG | S_IRUGO, 1, 0, 0,
726
              });
727
}
728
 
729
int md_error (kdev_t mddev, kdev_t rdev)
730
{
731
    unsigned int minor = MINOR (mddev);
732
    if (MAJOR(mddev) != MD_MAJOR || minor > MAX_MD_DEV)
733
        panic ("md_error gets unknown device\n");
734
    if (!md_dev [minor].pers)
735
        panic ("md_error gets an error for an unknown device\n");
736
    if (md_dev [minor].pers->error_handler)
737
        return (md_dev [minor].pers->error_handler (md_dev+minor, rdev));
738
    return 0;
739
}
740
 
741
int get_md_status (char *page)
742
{
743
  int sz=0, i, j, size;
744
 
745
  sz+=sprintf( page+sz, "Personalities : ");
746
  for (i=0; i<MAX_PERSONALITY; i++)
747
    if (pers[i])
748
      sz+=sprintf (page+sz, "[%d %s] ", i, pers[i]->name);
749
 
750
  page[sz-1]='\n';
751
 
752
  sz+=sprintf (page+sz, "read_ahead ");
753
  if (read_ahead[MD_MAJOR]==INT_MAX)
754
    sz+=sprintf (page+sz, "not set\n");
755
  else
756
    sz+=sprintf (page+sz, "%d sectors\n", read_ahead[MD_MAJOR]);
757
 
758
  for (i=0; i<MAX_MD_DEV; i++)
759
  {
760
    sz+=sprintf (page+sz, "md%d : %sactive", i, md_dev[i].pers ? "" : "in");
761
 
762
    if (md_dev[i].pers)
763
      sz+=sprintf (page+sz, " %s", md_dev[i].pers->name);
764
 
765
    size=0;
766
    for (j=0; j<md_dev[i].nb_dev; j++)
767
    {
768
      sz+=sprintf (page+sz, " %s",
769
                   partition_name(md_dev[i].devices[j].dev));
770
      size+=md_dev[i].devices[j].size;
771
    }
772
 
773
    if (md_dev[i].nb_dev) {
774
      if (md_dev[i].pers)
775
        sz+=sprintf (page+sz, " %d blocks", md_size[i]);
776
      else
777
        sz+=sprintf (page+sz, " %d blocks", size);
778
    }
779
 
780
    if (!md_dev[i].pers)
781
    {
782
      sz+=sprintf (page+sz, "\n");
783
      continue;
784
    }
785
 
786
    if (md_dev[i].pers->max_invalid_dev)
787
      sz+=sprintf (page+sz, " maxfault=%ld", MAX_FAULT(md_dev+i));
788
 
789
    sz+=md_dev[i].pers->status (page+sz, i, md_dev+i);
790
    sz+=sprintf (page+sz, "\n");
791
  }
792
 
793
  return (sz);
794
}
795
 
796
int register_md_personality (int p_num, struct md_personality *p)
797
{
798
  int i=(p_num >> PERSONALITY_SHIFT);
799
 
800
  if (i >= MAX_PERSONALITY)
801
    return -EINVAL;
802
 
803
  if (pers[i])
804
    return -EBUSY;
805
 
806
  pers[i]=p;
807
  printk ("%s personality registered\n", p->name);
808
  return 0;
809
}
810
 
811
int unregister_md_personality (int p_num)
812
{
813
  int i=(p_num >> PERSONALITY_SHIFT);
814
 
815
  if (i >= MAX_PERSONALITY)
816
    return -EINVAL;
817
 
818
  printk ("%s personality unregistered\n", pers[i]->name);
819
  pers[i]=NULL;
820
  return 0;
821
}
822
 
823
int md_thread(void * arg)
824
{
825
        struct md_thread *thread = arg;
826
 
827
        current->session = 1;
828
        current->pgrp = 1;
829
        sprintf(current->comm, "md_thread");
830
 
831
#ifdef __SMP__
832
        lock_kernel();
833
        syscall_count++;
834
#endif
835
        for (;;) {
836
                sti();
837
                clear_bit(THREAD_WAKEUP, &thread->flags);
838
                if (thread->run) {
839
                        thread->run(thread->data);
840
                        run_task_queue(&tq_disk);
841
                }
842
                current->signal = 0;
843
                cli();
844
                if (!test_bit(THREAD_WAKEUP, &thread->flags))
845
                        interruptible_sleep_on(&thread->wqueue);
846
        }
847
}
848
 
849
void linear_init (void);
850
void raid0_init (void);
851
void raid1_init (void);
852
void raid5_init (void);
853
 
854
int md_init (void)
855
{
856
  int i;
857
 
858
  printk ("md driver %d.%d.%d MAX_MD_DEV=%d, MAX_REAL=%d\n",
859
    MD_MAJOR_VERSION, MD_MINOR_VERSION, MD_PATCHLEVEL_VERSION,
860
    MAX_MD_DEV, MAX_REAL);
861
 
862
  if (register_blkdev (MD_MAJOR, "md", &md_fops))
863
  {
864
    printk ("Unable to get major %d for md\n", MD_MAJOR);
865
    return (-1);
866
  }
867
 
868
  for (i = 0; i < MAX_MD_THREADS; i++) {
869
    md_threads[i].run = NULL;
870
    init_waitqueue(&md_threads[i].wqueue);
871
    md_threads[i].flags = 0;
872
    kernel_thread (md_thread, md_threads + i, 0);
873
  }
874
 
875
  blk_dev[MD_MAJOR].request_fn=DEVICE_REQUEST;
876
  blk_dev[MD_MAJOR].current_request=NULL;
877
  read_ahead[MD_MAJOR]=INT_MAX;
878
  memset(md_dev, 0, MAX_MD_DEV * sizeof (struct md_dev));
879
  md_gendisk.next=gendisk_head;
880
 
881
  gendisk_head=&md_gendisk;
882
 
883
#ifdef CONFIG_MD_LINEAR
884
  linear_init ();
885
#endif
886
#ifdef CONFIG_MD_STRIPED
887
  raid0_init ();
888
#endif
889
#ifdef CONFIG_MD_MIRRORING
890
  raid1_init ();
891
#endif
892
#ifdef CONFIG_MD_RAID5
893
  raid5_init ();
894
#endif
895
 
896
  return (0);
897
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.