OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [rc203soc/] [sw/] [uClinux/] [arch/] [armnommu/] [drivers/] [block/] [ll_rw_blk.c] - Blame information for rev 1782

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1622 jcastillo
/*
2
 *  linux/drivers/block/ll_rw_blk.c
3
 *
4
 * Copyright (C) 1991, 1992 Linus Torvalds
5
 * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
6
 */
7
 
8
/*
9
 * This handles all read/write requests to block devices
10
 */
11
#include <linux/sched.h>
12
#include <linux/kernel.h>
13
#include <linux/kernel_stat.h>
14
#include <linux/errno.h>
15
#include <linux/string.h>
16
#include <linux/config.h>
17
#include <linux/locks.h>
18
#include <linux/mm.h>
19
 
20
#include <asm/system.h>
21
#include <asm/io.h>
22
#include "blk.h"
23
 
24
/*
25
 * The request-struct contains all necessary data
26
 * to load a nr of sectors into memory
27
 */
28
static struct request all_requests[NR_REQUEST];
29
 
30
/*
31
 * The "disk" task queue is used to start the actual requests
32
 * after a plug
33
 */
34
DECLARE_TASK_QUEUE(tq_disk);
35
 
36
/*
37
 * used to wait on when there are no free requests
38
 */
39
struct wait_queue * wait_for_request;
40
 
41
/* This specifies how many sectors to read ahead on the disk.  */
42
 
43
int read_ahead[MAX_BLKDEV];
44
 
45
/* blk_dev_struct is:
46
 *      *request_fn
47
 *      *current_request
48
 */
49
struct blk_dev_struct blk_dev[MAX_BLKDEV]; /* initialized by blk_dev_init() */
50
 
51
/*
52
 * blk_size contains the size of all block-devices in units of 1024 byte
53
 * sectors:
54
 *
55
 * blk_size[MAJOR][MINOR]
56
 *
57
 * if (!blk_size[MAJOR]) then no minor size checking is done.
58
 */
59
int * blk_size[MAX_BLKDEV];
60
 
61
/*
62
 * blksize_size contains the size of all block-devices:
63
 *
64
 * blksize_size[MAJOR][MINOR]
65
 *
66
 * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
67
 */
68
int * blksize_size[MAX_BLKDEV];
69
 
70
/*
71
 * hardsect_size contains the size of the hardware sector of a device.
72
 *
73
 * hardsect_size[MAJOR][MINOR]
74
 *
75
 * if (!hardsect_size[MAJOR])
76
 *              then 512 bytes is assumed.
77
 * else
78
 *              sector_size is hardsect_size[MAJOR][MINOR]
79
 * This is currently set by some scsi device and read by the msdos fs driver
80
 * This might be a some uses later.
81
 */
82
int * hardsect_size[MAX_BLKDEV];
83
 
84
/*
85
 * remove the plug and let it rip..
86
 */
87
void unplug_device(void * data)
88
{
89
        struct blk_dev_struct * dev = (struct blk_dev_struct *) data;
90
        unsigned long flags;
91
 
92
        save_flags_cli(flags);
93
        if (dev->current_request == &dev->plug) {
94
                struct request * next = dev->plug.next;
95
                dev->current_request = next;
96
                if (next) {
97
                        dev->plug.next = NULL;
98
                        (dev->request_fn)();
99
                }
100
        }
101
        restore_flags(flags);
102
}
103
 
104
/*
105
 * "plug" the device if there are no outstanding requests: this will
106
 * force the transfer to start only after we have put all the requests
107
 * on the list.
108
 *
109
 * This is called with interrupts off and no requests on the queue.
110
 */
111
static inline void plug_device(struct blk_dev_struct * dev)
112
{
113
        dev->current_request = &dev->plug;
114
        queue_task_irq_off(&dev->plug_tq, &tq_disk);
115
}
116
 
117
/*
118
 * look for a free request in the first N entries.
119
 * NOTE: interrupts must be disabled on the way in, and will still
120
 *       be disabled on the way out.
121
 */
122
static inline struct request * get_request(int n, kdev_t dev)
123
{
124
        static struct request *prev_found = NULL, *prev_limit = NULL;
125
        register struct request *req, *limit;
126
 
127
        if (n <= 0)
128
                panic("get_request(%d): impossible!\n", n);
129
 
130
        limit = all_requests + n;
131
        if (limit != prev_limit) {
132
                prev_limit = limit;
133
                prev_found = all_requests;
134
        }
135
        req = prev_found;
136
        for (;;) {
137
                req = ((req > all_requests) ? req : limit) - 1;
138
                if (req->rq_status == RQ_INACTIVE)
139
                        break;
140
                if (req == prev_found)
141
                        return NULL;
142
        }
143
        prev_found = req;
144
        req->rq_status = RQ_ACTIVE;
145
        req->rq_dev = dev;
146
        return req;
147
}
148
 
149
/*
150
 * wait until a free request in the first N entries is available.
151
 */
152
static struct request * __get_request_wait(int n, kdev_t dev)
153
{
154
        register struct request *req;
155
        struct wait_queue wait = { current, NULL };
156
 
157
        add_wait_queue(&wait_for_request, &wait);
158
        for (;;) {
159
                current->state = TASK_UNINTERRUPTIBLE;
160
                cli();
161
                req = get_request(n, dev);
162
                sti();
163
                if (req)
164
                        break;
165
                run_task_queue(&tq_disk);
166
                schedule();
167
        }
168
        remove_wait_queue(&wait_for_request, &wait);
169
        current->state = TASK_RUNNING;
170
        return req;
171
}
172
 
173
static inline struct request * get_request_wait(int n, kdev_t dev)
174
{
175
        register struct request *req;
176
 
177
        cli();
178
        req = get_request(n, dev);
179
        sti();
180
        if (req)
181
                return req;
182
        return __get_request_wait(n, dev);
183
}
184
 
185
/* RO fail safe mechanism */
186
 
187
static long ro_bits[MAX_BLKDEV][8];
188
 
189
int is_read_only(kdev_t dev)
190
{
191
        int minor,major;
192
 
193
        major = MAJOR(dev);
194
        minor = MINOR(dev);
195
        if (major < 0 || major >= MAX_BLKDEV) return 0;
196
        return ro_bits[major][minor >> 5] & (1 << (minor & 31));
197
}
198
 
199
void set_device_ro(kdev_t dev,int flag)
200
{
201
        int minor,major;
202
 
203
        major = MAJOR(dev);
204
        minor = MINOR(dev);
205
        if (major < 0 || major >= MAX_BLKDEV) return;
206
        if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
207
        else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
208
}
209
 
210
static inline void drive_stat_acct(int cmd, unsigned long nr_sectors,
211
                                   short disk_index)
212
{
213
        kstat.dk_drive[disk_index]++;
214
        if (cmd == READ) {
215
                kstat.dk_drive_rio[disk_index]++;
216
                kstat.dk_drive_rblk[disk_index] += nr_sectors;
217
        } else if (cmd == WRITE) {
218
                kstat.dk_drive_wio[disk_index]++;
219
                kstat.dk_drive_wblk[disk_index] += nr_sectors;
220
        } else
221
                printk(KERN_ERR "drive_stat_acct: cmd not R/W?\n");
222
}
223
 
224
/*
225
 * add-request adds a request to the linked list.
226
 * It disables interrupts so that it can muck with the
227
 * request-lists in peace.
228
 *
229
 * By this point, req->cmd is always either READ/WRITE, never READA/WRITEA,
230
 * which is important for drive_stat_acct() above.
231
 */
232
 
233
void add_request(struct blk_dev_struct * dev, struct request * req)
234
{
235
        struct request * tmp;
236
        short            disk_index;
237
 
238
        switch (MAJOR(req->rq_dev)) {
239
                case SCSI_DISK_MAJOR:
240
                        disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
241
                        if (disk_index < 4)
242
                                drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
243
                        break;
244
                case IDE0_MAJOR:        /* same as HD_MAJOR */
245
                case XT_DISK_MAJOR:
246
                        disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
247
                        drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
248
                        break;
249
                case IDE1_MAJOR:
250
                        disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
251
                        drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
252
                default:
253
                        break;
254
        }
255
 
256
        req->next = NULL;
257
        cli();
258
        if (req->bh)
259
                mark_buffer_clean(req->bh);
260
        if (!(tmp = dev->current_request)) {
261
                dev->current_request = req;
262
                (dev->request_fn)();
263
                sti();
264
                return;
265
        }
266
        for ( ; tmp->next ; tmp = tmp->next) {
267
                if ((IN_ORDER(tmp,req) ||
268
                    !IN_ORDER(tmp,tmp->next)) &&
269
                    IN_ORDER(req,tmp->next))
270
                        break;
271
        }
272
        req->next = tmp->next;
273
        tmp->next = req;
274
 
275
/* for SCSI devices, call request_fn unconditionally */
276
        if (scsi_blk_major(MAJOR(req->rq_dev)))
277
                (dev->request_fn)();
278
 
279
        sti();
280
}
281
 
282
#define MAX_SECTORS 244
283
 
284
static inline void attempt_merge (struct request *req)
285
{
286
        struct request *next = req->next;
287
 
288
        if (!next)
289
                return;
290
        if (req->sector + req->nr_sectors != next->sector)
291
                return;
292
        if (next->sem || req->cmd != next->cmd || req->rq_dev != next->rq_dev || req->nr_sectors + next->nr_sectors >= MAX_SECTORS)
293
                return;
294
#if 0
295
        printk ("%s: merge %ld, %ld + %ld == %ld\n", kdevname(req->rq_dev), req->sector, req->nr_sectors, next->nr_sectors, req->nr_sectors + next->nr_sectors);
296
#endif
297
        req->bhtail->b_reqnext = next->bh;
298
        req->bhtail = next->bhtail;
299
        req->nr_sectors += next->nr_sectors;
300
        next->rq_status = RQ_INACTIVE;
301
        req->next = next->next;
302
        wake_up (&wait_for_request);
303
}
304
 
305
void make_request(int major,int rw, struct buffer_head * bh)
306
{
307
        unsigned int sector, count;
308
        struct request * req;
309
        int rw_ahead, max_req;
310
 
311
        count = bh->b_size >> 9;
312
        sector = bh->b_rsector;
313
 
314
        /* Uhhuh.. Nasty dead-lock possible here.. */
315
        if (buffer_locked(bh)) {
316
#if 0
317
                printk("make_request(): buffer already locked\n");
318
#endif
319
                return;
320
        }
321
        /* Maybe the above fixes it, and maybe it doesn't boot. Life is interesting */
322
 
323
        lock_buffer(bh);
324
 
325
        if (blk_size[major])
326
                if (blk_size[major][MINOR(bh->b_rdev)] < (sector + count)>>1) {
327
                        bh->b_state &= (1 << BH_Lock) | (1 << BH_FreeOnIO);
328
                        /* This may well happen - the kernel calls bread()
329
                           without checking the size of the device, e.g.,
330
                           when mounting a device. */
331
                        printk(KERN_INFO
332
                               "attempt to access beyond end of device\n");
333
                        printk(KERN_INFO "%s: rw=%d, want=%d, limit=%d\n",
334
                               kdevname(bh->b_rdev), rw,
335
                               (sector + count)>>1,
336
                               blk_size[major][MINOR(bh->b_rdev)]);
337
                        unlock_buffer(bh);
338
                        return;
339
                }
340
 
341
        rw_ahead = 0;    /* normal case; gets changed below for READA/WRITEA */
342
        switch (rw) {
343
                case READA:
344
                        rw_ahead = 1;
345
                        rw = READ;      /* drop into READ */
346
                case READ:
347
                        if (buffer_uptodate(bh)) {
348
#if 0
349
                                printk ("make_request(): buffer uptodate for READ\n");
350
#endif
351
                                unlock_buffer(bh); /* Hmmph! Already have it */
352
                                return;
353
                        }
354
                        kstat.pgpgin++;
355
                        max_req = NR_REQUEST;   /* reads take precedence */
356
                        break;
357
                case WRITEA:
358
                        rw_ahead = 1;
359
                        rw = WRITE;     /* drop into WRITE */
360
                case WRITE:
361
                        if (!buffer_dirty(bh)) {
362
#if 0
363
                                printk ("make_request(): buffer clean for WRITE\n");
364
#endif
365
                                unlock_buffer(bh); /* Hmmph! Nothing to write */
366
                                return;
367
                        }
368
                        /* We don't allow the write-requests to fill up the
369
                         * queue completely:  we want some room for reads,
370
                         * as they take precedence. The last third of the
371
                         * requests are only for reads.
372
                         */
373
                        kstat.pgpgout++;
374
                        max_req = (NR_REQUEST * 2) / 3;
375
                        break;
376
                default:
377
                        printk(KERN_ERR "make_request: bad block dev cmd,"
378
                               " must be R/W/RA/WA\n");
379
                        unlock_buffer(bh);
380
                        return;
381
        }
382
 
383
/* look for a free request. */
384
       /* Loop uses two requests, 1 for loop and 1 for the real device.
385
        * Cut max_req in half to avoid running out and deadlocking. */
386
        if (major == LOOP_MAJOR)
387
             max_req >>= 1;
388
 
389
        /*
390
         * Try to coalesce the new request with old requests
391
         */
392
        cli();
393
        req = blk_dev[major].current_request;
394
        if (!req) {
395
                /* MD and loop can't handle plugging without deadlocking */
396
                if (major != MD_MAJOR && major != LOOP_MAJOR)
397
                        plug_device(blk_dev + major);
398
        } else switch (major) {
399
             case IDE0_MAJOR:   /* same as HD_MAJOR */
400
             case IDE1_MAJOR:
401
             case FLOPPY_MAJOR:
402
             case IDE2_MAJOR:
403
             case IDE3_MAJOR:
404
             case XT_DISK_MAJOR:
405
                /*
406
                 * The scsi disk and cdrom drivers completely remove the request
407
                 * from the queue when they start processing an entry.  For this
408
                 * reason it is safe to continue to add links to the top entry for
409
                 * those devices.
410
                 *
411
                 * All other drivers need to jump over the first entry, as that
412
                 * entry may be busy being processed and we thus can't change it.
413
                 */
414
                req = req->next;
415
                if (!req)
416
                        break;
417
                /* fall through */
418
 
419
             case SCSI_DISK_MAJOR:
420
             case SCSI_CDROM_MAJOR:
421
 
422
                do {
423
                        if (req->sem)
424
                                continue;
425
                        if (req->cmd != rw)
426
                                continue;
427
                        if (req->nr_sectors >= MAX_SECTORS)
428
                                continue;
429
                        if (req->rq_dev != bh->b_rdev)
430
                                continue;
431
                        /* Can we add it to the end of this request? */
432
                        if (req->sector + req->nr_sectors == sector) {
433
                                req->bhtail->b_reqnext = bh;
434
                                req->bhtail = bh;
435
                                req->nr_sectors += count;
436
                                /* Can we now merge this req with the next? */
437
                                attempt_merge(req);
438
                        /* or to the beginning? */
439
                        } else if (req->sector - count == sector) {
440
                                bh->b_reqnext = req->bh;
441
                                req->bh = bh;
442
                                req->buffer = bh->b_data;
443
                                req->current_nr_sectors = count;
444
                                req->sector = sector;
445
                                req->nr_sectors += count;
446
                        } else
447
                                continue;
448
 
449
                        mark_buffer_clean(bh);
450
                        sti();
451
                        return;
452
                } while ((req = req->next) != NULL);
453
        }
454
 
455
/* find an unused request. */
456
        req = get_request(max_req, bh->b_rdev);
457
        sti();
458
 
459
/* if no request available: if rw_ahead, forget it; otherwise try again blocking.. */
460
        if (!req) {
461
                if (rw_ahead) {
462
                        unlock_buffer(bh);
463
                        return;
464
                }
465
                req = __get_request_wait(max_req, bh->b_rdev);
466
        }
467
 
468
/* fill up the request-info, and add it to the queue */
469
        req->cmd = rw;
470
        req->errors = 0;
471
        req->sector = sector;
472
        req->nr_sectors = count;
473
        req->current_nr_sectors = count;
474
        req->buffer = bh->b_data;
475
        req->sem = NULL;
476
        req->bh = bh;
477
        req->bhtail = bh;
478
        req->next = NULL;
479
        add_request(major+blk_dev,req);
480
}
481
 
482
/* This function can be used to request a number of buffers from a block
483
   device. Currently the only restriction is that all buffers must belong to
484
   the same device */
485
 
486
void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
487
{
488
        unsigned int major;
489
        int correct_size;
490
        struct blk_dev_struct * dev;
491
        int i;
492
 
493
        /* Make sure that the first block contains something reasonable */
494
        while (!*bh) {
495
                bh++;
496
                if (--nr <= 0)
497
                        return;
498
        }
499
 
500
        dev = NULL;
501
        if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
502
                dev = blk_dev + major;
503
        if (!dev || !dev->request_fn) {
504
                printk(KERN_ERR
505
        "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
506
                kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
507
                goto sorry;
508
        }
509
 
510
        /* Determine correct block size for this device.  */
511
        correct_size = BLOCK_SIZE;
512
        if (blksize_size[major]) {
513
                i = blksize_size[major][MINOR(bh[0]->b_dev)];
514
                if (i)
515
                        correct_size = i;
516
        }
517
 
518
        /* Verify requested block sizes.  */
519
        for (i = 0; i < nr; i++) {
520
                if (bh[i] && bh[i]->b_size != correct_size) {
521
                        printk(KERN_NOTICE "ll_rw_block: device %s: "
522
                               "only %d-char blocks implemented (%lu)\n",
523
                               kdevname(bh[0]->b_dev),
524
                               correct_size, bh[i]->b_size);
525
                        goto sorry;
526
                }
527
 
528
                /* Md remaps blocks now */
529
                bh[i]->b_rdev = bh[i]->b_dev;
530
                bh[i]->b_rsector=bh[i]->b_blocknr*(bh[i]->b_size >> 9);
531
#ifdef CONFIG_BLK_DEV_MD
532
                if (major==MD_MAJOR &&
533
                    md_map (MINOR(bh[i]->b_dev), &bh[i]->b_rdev,
534
                            &bh[i]->b_rsector, bh[i]->b_size >> 9)) {
535
                        printk (KERN_ERR
536
                                "Bad md_map in ll_rw_block\n");
537
                        goto sorry;
538
                }
539
#endif
540
        }
541
 
542
        if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
543
                printk(KERN_NOTICE "Can't write to read-only device %s\n",
544
                       kdevname(bh[0]->b_dev));
545
                goto sorry;
546
        }
547
 
548
        for (i = 0; i < nr; i++) {
549
                if (bh[i]) {
550
                        set_bit(BH_Req, &bh[i]->b_state);
551
#ifdef CONFIG_BLK_DEV_MD
552
                        if (MAJOR(bh[i]->b_dev) == MD_MAJOR) {
553
                                md_make_request(MINOR (bh[i]->b_dev), rw, bh[i]);
554
                                continue;
555
                        }
556
#endif
557
                        make_request(MAJOR(bh[i]->b_rdev), rw, bh[i]);
558
                }
559
        }
560
        return;
561
 
562
      sorry:
563
        for (i = 0; i < nr; i++) {
564
                if (bh[i]) {
565
                        clear_bit(BH_Dirty, &bh[i]->b_state);
566
                        clear_bit(BH_Uptodate, &bh[i]->b_state);
567
                }
568
        }
569
        return;
570
}
571
 
572
void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
573
{
574
        int i, j;
575
        int buffersize;
576
        int max_req;
577
        unsigned long rsector;
578
        kdev_t rdev;
579
        struct request * req[8];
580
        unsigned int major = MAJOR(dev);
581
        struct semaphore sem = MUTEX_LOCKED;
582
 
583
        if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
584
                printk(KERN_NOTICE "ll_rw_swap_file: trying to swap to"
585
                                   " nonexistent block-device\n");
586
                return;
587
        }
588
        max_req = NR_REQUEST;
589
        switch (rw) {
590
                case READ:
591
                        break;
592
                case WRITE:
593
                        max_req = (NR_REQUEST * 2) / 3;
594
                        if (is_read_only(dev)) {
595
                                printk(KERN_NOTICE
596
                                       "Can't swap to read-only device %s\n",
597
                                        kdevname(dev));
598
                                return;
599
                        }
600
                        break;
601
                default:
602
                        panic("ll_rw_swap: bad block dev cmd, must be R/W");
603
        }
604
        buffersize = PAGE_SIZE / nb;
605
 
606
        if (major == LOOP_MAJOR)
607
             max_req >>= 1;
608
        for (j=0, i=0; i<nb;)
609
        {
610
                for (; j < 8 && i < nb; j++, i++, buf += buffersize)
611
                {
612
                        rdev = dev;
613
                        rsector = b[i] * (buffersize >> 9);
614
#ifdef CONFIG_BLK_DEV_MD
615
                        if (major==MD_MAJOR &&
616
                            md_map (MINOR(dev), &rdev,
617
                                    &rsector, buffersize >> 9)) {
618
                                printk (KERN_ERR
619
                                        "Bad md_map in ll_rw_swap_file\n");
620
                                return;
621
                        }
622
#endif
623
 
624
                        if (j == 0) {
625
                                req[j] = get_request_wait(max_req, rdev);
626
                        } else {
627
                                cli();
628
                                req[j] = get_request(max_req, rdev);
629
                                sti();
630
                                if (req[j] == NULL)
631
                                        break;
632
                        }
633
                        req[j]->cmd = rw;
634
                        req[j]->errors = 0;
635
                        req[j]->sector = rsector;
636
                        req[j]->nr_sectors = buffersize >> 9;
637
                        req[j]->current_nr_sectors = buffersize >> 9;
638
                        req[j]->buffer = buf;
639
                        req[j]->sem = &sem;
640
                        req[j]->bh = NULL;
641
                        req[j]->next = NULL;
642
                        add_request(MAJOR(rdev)+blk_dev,req[j]);
643
                }
644
                run_task_queue(&tq_disk);
645
                while (j > 0) {
646
                        j--;
647
                        down(&sem);
648
                }
649
        }
650
}
651
 
652
int blk_dev_init(void)
653
{
654
        struct request * req;
655
        struct blk_dev_struct *dev;
656
 
657
        for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) {
658
                dev->request_fn      = NULL;
659
                dev->current_request = NULL;
660
                dev->plug.rq_status  = RQ_INACTIVE;
661
                dev->plug.cmd        = -1;
662
                dev->plug.next       = NULL;
663
                dev->plug_tq.routine = &unplug_device;
664
                dev->plug_tq.data    = dev;
665
        }
666
 
667
        req = all_requests + NR_REQUEST;
668
        while (--req >= all_requests) {
669
                req->rq_status = RQ_INACTIVE;
670
                req->next = NULL;
671
        }
672
        memset(ro_bits,0,sizeof(ro_bits));
673
#ifdef CONFIG_BLK_DEV_RAM
674
        rd_init();
675
#endif
676
#ifdef CONFIG_BLK_DEV_LOOP
677
        loop_init();
678
#endif
679
#ifdef CONFIG_BLK_DEV_IDE
680
        ide_init();             /* this MUST precede hd_init */
681
#endif
682
#ifdef CONFIG_BLK_DEV_HD
683
        hd_init();
684
#endif
685
#ifdef CONFIG_BLK_DEV_XD
686
        xd_init();
687
#endif
688
#ifdef CONFIG_BLK_DEV_FD
689
        floppy_init();
690
#else
691
#if !defined(CONFIG_ARCH_ARC) && !defined(CONFIG_ARCH_SA110EVAL)
692
        outb_p(0xc, 0x3f2);
693
#endif
694
#endif
695
#ifdef CONFIG_BLK_DEV_MD
696
        md_init();
697
#endif CONFIG_BLK_DEV_MD
698
        return 0;
699
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.