OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [uclinux/] [uClinux-2.0.x/] [drivers/] [block/] [ll_rw_blk.c] - Blame information for rev 1782

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 199 simons
/*
2
 *  linux/drivers/block/ll_rw_blk.c
3
 *
4
 * Copyright (C) 1991, 1992 Linus Torvalds
5
 * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
6
 */
7
 
8
/*
9
 * This handles all read/write requests to block devices
10
 */
11
#include <linux/sched.h>
12
#include <linux/kernel.h>
13
#include <linux/kernel_stat.h>
14
#include <linux/errno.h>
15
#include <linux/string.h>
16
#include <linux/config.h>
17
#include <linux/locks.h>
18
#include <linux/mm.h>
19
 
20
#include <asm/system.h>
21
#include <asm/io.h>
22
#include <linux/blk.h>
23
 
24
/*
25
 * The request-struct contains all necessary data
26
 * to load a nr of sectors into memory
27
 */
28
static struct request all_requests[NR_REQUEST];
29
 
30
/*
31
 * The "disk" task queue is used to start the actual requests
32
 * after a plug
33
 */
34
DECLARE_TASK_QUEUE(tq_disk);
35
 
36
/*
37
 * used to wait on when there are no free requests
38
 */
39
struct wait_queue * wait_for_request = NULL;
40
 
41
/* This specifies how many sectors to read ahead on the disk.  */
42
 
43
int read_ahead[MAX_BLKDEV] = {0, };
44
 
45
/* blk_dev_struct is:
46
 *      *request_fn
47
 *      *current_request
48
 */
49
struct blk_dev_struct blk_dev[MAX_BLKDEV]; /* initialized by blk_dev_init() */
50
 
51
/*
52
 * blk_size contains the size of all block-devices in units of 1024 byte
53
 * sectors:
54
 *
55
 * blk_size[MAJOR][MINOR]
56
 *
57
 * if (!blk_size[MAJOR]) then no minor size checking is done.
58
 */
59
int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
60
 
61
/*
62
 * blksize_size contains the size of all block-devices:
63
 *
64
 * blksize_size[MAJOR][MINOR]
65
 *
66
 * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
67
 */
68
int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
69
 
70
/*
71
 * hardsect_size contains the size of the hardware sector of a device.
72
 *
73
 * hardsect_size[MAJOR][MINOR]
74
 *
75
 * if (!hardsect_size[MAJOR])
76
 *              then 512 bytes is assumed.
77
 * else
78
 *              sector_size is hardsect_size[MAJOR][MINOR]
79
 * This is currently set by some scsi device and read by the msdos fs driver
80
 * This might be a some uses later.
81
 */
82
int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
83
 
84
/*
85
 * Max number of sectors per request
86
 */
87
int * max_sectors[MAX_BLKDEV] = { NULL, NULL, };
88
 
89
/*
90
 * Max number of segments per request
91
 */
92
int * max_segments[MAX_BLKDEV] = { NULL, NULL, };
93
 
94
static inline int get_max_sectors(kdev_t dev)
95
{
96
        if (!max_sectors[MAJOR(dev)])
97
                return MAX_SECTORS;
98
        return max_sectors[MAJOR(dev)][MINOR(dev)];
99
}
100
 
101
static inline int get_max_segments(kdev_t dev)
102
{
103
        if (!max_segments[MAJOR(dev)])
104
                return MAX_SEGMENTS;
105
        return max_segments[MAJOR(dev)][MINOR(dev)];
106
}
107
 
108
/*
109
 * remove the plug and let it rip..
110
 */
111
void unplug_device(void * data)
112
{
113
        struct blk_dev_struct * dev = (struct blk_dev_struct *) data;
114
        unsigned long flags;
115
 
116
        save_flags(flags);
117
        cli();
118
        if (dev->current_request == &dev->plug) {
119
                struct request * next = dev->plug.next;
120
                dev->current_request = next;
121
                if (next) {
122
                        dev->plug.next = NULL;
123
                        (dev->request_fn)();
124
                }
125
        }
126
        restore_flags(flags);
127
}
128
 
129
/*
130
 * "plug" the device if there are no outstanding requests: this will
131
 * force the transfer to start only after we have put all the requests
132
 * on the list.
133
 *
134
 * This is called with interrupts off and no requests on the queue.
135
 */
136
static inline void plug_device(struct blk_dev_struct * dev)
137
{
138
        dev->current_request = &dev->plug;
139
        queue_task_irq_off(&dev->plug_tq, &tq_disk);
140
}
141
 
142
/*
143
 * look for a free request in the first N entries.
144
 * NOTE: interrupts must be disabled on the way in, and will still
145
 *       be disabled on the way out.
146
 */
147
static inline struct request * get_request(int n, kdev_t dev)
148
{
149
        static struct request *prev_found = NULL, *prev_limit = NULL;
150
        register struct request *req, *limit;
151
 
152
        if (n <= 0)
153
                panic("get_request(%d): impossible!\n", n);
154
 
155
        limit = all_requests + n;
156
        if (limit != prev_limit) {
157
                prev_limit = limit;
158
                prev_found = all_requests;
159
        }
160
        req = prev_found;
161
        for (;;) {
162
                req = ((req > all_requests) ? req : limit) - 1;
163
                if (req->rq_status == RQ_INACTIVE)
164
                        break;
165
                if (req == prev_found)
166
                        return NULL;
167
        }
168
        prev_found = req;
169
        req->rq_status = RQ_ACTIVE;
170
        req->rq_dev = dev;
171
        return req;
172
}
173
 
174
/*
175
 * wait until a free request in the first N entries is available.
176
 */
177
static struct request * __get_request_wait(int n, kdev_t dev)
178
{
179
        register struct request *req;
180
        struct wait_queue wait = { current, NULL };
181
 
182
        add_wait_queue(&wait_for_request, &wait);
183
        for (;;) {
184
                current->state = TASK_UNINTERRUPTIBLE;
185
                cli();
186
                req = get_request(n, dev);
187
                sti();
188
                if (req)
189
                        break;
190
                run_task_queue(&tq_disk);
191
                schedule();
192
        }
193
        remove_wait_queue(&wait_for_request, &wait);
194
        current->state = TASK_RUNNING;
195
        return req;
196
}
197
 
198
static inline struct request * get_request_wait(int n, kdev_t dev)
199
{
200
        register struct request *req;
201
 
202
        cli();
203
        req = get_request(n, dev);
204
        sti();
205
        if (req)
206
                return req;
207
        return __get_request_wait(n, dev);
208
}
209
 
210
/* RO fail safe mechanism */
211
 
212
static long ro_bits[MAX_BLKDEV][8];
213
 
214
int is_read_only(kdev_t dev)
215
{
216
        int minor,major;
217
 
218
        major = MAJOR(dev);
219
        minor = MINOR(dev);
220
        if (major < 0 || major >= MAX_BLKDEV) return 0;
221
        return ro_bits[major][minor >> 5] & (1 << (minor & 31));
222
}
223
 
224
void set_device_ro(kdev_t dev,int flag)
225
{
226
        int minor,major;
227
 
228
        major = MAJOR(dev);
229
        minor = MINOR(dev);
230
        if (major < 0 || major >= MAX_BLKDEV) return;
231
        if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
232
        else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
233
}
234
 
235
static inline void drive_stat_acct(int cmd, unsigned long nr_sectors,
236
                                   short disk_index)
237
{
238
        kstat.dk_drive[disk_index]++;
239
        if (cmd == READ) {
240
                kstat.dk_drive_rio[disk_index]++;
241
                kstat.dk_drive_rblk[disk_index] += nr_sectors;
242
        } else if (cmd == WRITE) {
243
                kstat.dk_drive_wio[disk_index]++;
244
                kstat.dk_drive_wblk[disk_index] += nr_sectors;
245
        } else
246
                printk(KERN_ERR "drive_stat_acct: cmd not R/W?\n");
247
}
248
 
249
/*
250
 * add-request adds a request to the linked list.
251
 * It disables interrupts so that it can muck with the
252
 * request-lists in peace.
253
 *
254
 * By this point, req->cmd is always either READ/WRITE, never READA/WRITEA,
255
 * which is important for drive_stat_acct() above.
256
 */
257
 
258
void add_request(struct blk_dev_struct * dev, struct request * req)
259
{
260
        int major = MAJOR(req->rq_dev);
261
        int minor = MINOR(req->rq_dev);
262
        struct request * tmp;
263
        short            disk_index;
264
 
265
        switch (major) {
266
                case DAC960_MAJOR+0:
267
                        disk_index = (minor & 0x00f8) >> 3;
268
                        if (disk_index < 4)
269
                                drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
270
                        break;
271
                case SCSI_DISK_MAJOR:
272
                        disk_index = (minor & 0x0070) >> 4;
273
                        if (disk_index < 4)
274
                                drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
275
                        break;
276
                case IDE0_MAJOR:        /* same as HD_MAJOR */
277
                case XT_DISK_MAJOR:
278
                        disk_index = (minor & 0x0040) >> 6;
279
                        drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
280
                        break;
281
                case IDE1_MAJOR:
282
                        disk_index = ((minor & 0x0040) >> 6) + 2;
283
                        drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
284
                default:
285
                        break;
286
        }
287
 
288
        req->next = NULL;
289
        cli();
290
        if (req->bh)
291
                mark_buffer_clean(req->bh);
292
        if (!(tmp = dev->current_request)) {
293
                dev->current_request = req;
294
                (dev->request_fn)();
295
                sti();
296
                return;
297
        }
298
        for ( ; tmp->next ; tmp = tmp->next) {
299
                if ((IN_ORDER(tmp,req) ||
300
                    !IN_ORDER(tmp,tmp->next)) &&
301
                    IN_ORDER(req,tmp->next))
302
                        break;
303
        }
304
        req->next = tmp->next;
305
        tmp->next = req;
306
 
307
/* for SCSI devices, call request_fn unconditionally */
308
        if (scsi_blk_major(major))
309
                (dev->request_fn)();
310
 
311
        if ( (major >= DAC960_MAJOR+0 && major <= DAC960_MAJOR+7) ||
312
             (major >= COMPAQ_SMART2_MAJOR+0 && major <= COMPAQ_SMART2_MAJOR+7))
313
          (dev->request_fn)();
314
 
315
        sti();
316
}
317
 
318
static inline void attempt_merge (struct request *req,
319
                                  int max_sectors,
320
                                  int max_segments)
321
{
322
        struct request *next = req->next;
323
        int total_segments;
324
 
325
        if (!next)
326
                return;
327
        if (req->sector + req->nr_sectors != next->sector)
328
                return;
329
        if (next->sem || req->cmd != next->cmd || req->rq_dev != next->rq_dev ||
330
            req->nr_sectors + next->nr_sectors > max_sectors)
331
                return;
332
        total_segments = req->nr_segments + next->nr_segments;
333
        if (req->bhtail->b_data + req->bhtail->b_size == next->bh->b_data)
334
                total_segments--;
335
        if (total_segments > max_segments)
336
                return;
337
        req->bhtail->b_reqnext = next->bh;
338
        req->bhtail = next->bhtail;
339
        req->nr_sectors += next->nr_sectors;
340
        req->nr_segments = total_segments;
341
        next->rq_status = RQ_INACTIVE;
342
        req->next = next->next;
343
        wake_up (&wait_for_request);
344
}
345
 
346
void make_request(int major,int rw, struct buffer_head * bh)
347
{
348
        unsigned int sector, count;
349
        struct request * req;
350
        int rw_ahead, max_req, max_sectors, max_segments;
351
 
352
        count = bh->b_size >> 9;
353
        sector = bh->b_rsector;
354
 
355
        /* Uhhuh.. Nasty dead-lock possible here.. */
356
        if (buffer_locked(bh)) {
357
#if 0
358
                printk("make_request(): buffer already locked\n");
359
#endif
360
                return;
361
        }
362
        /* Maybe the above fixes it, and maybe it doesn't boot. Life is interesting */
363
 
364
        lock_buffer(bh);
365
 
366
        if (blk_size[major])
367
               if (blk_size[major][MINOR(bh->b_rdev)] < (sector + count)>>1) {
368
                        bh->b_state &= (1 << BH_Lock) | (1 << BH_FreeOnIO);
369
                        /* This may well happen - the kernel calls bread()
370
                           without checking the size of the device, e.g.,
371
                           when mounting a device. */
372
                        printk(KERN_INFO
373
                               "attempt to access beyond end of device\n");
374
                        printk(KERN_INFO "%s: rw=%d, want=%d, limit=%d\n",
375
                               kdevname(bh->b_rdev), rw,
376
                               (sector + count)>>1,
377
                               blk_size[major][MINOR(bh->b_rdev)]);
378
                        unlock_buffer(bh);
379
                        return;
380
                }
381
 
382
        rw_ahead = 0;    /* normal case; gets changed below for READA/WRITEA */
383
        switch (rw) {
384
                case READA:
385
                        rw_ahead = 1;
386
                        rw = READ;      /* drop into READ */
387
                case READ:
388
                        if (buffer_uptodate(bh)) {
389
#if 0
390
                                printk ("make_request(): buffer uptodate for READ\n");
391
#endif
392
                                unlock_buffer(bh); /* Hmmph! Already have it */
393
                                return;
394
                        }
395
                        kstat.pgpgin++;
396
                        max_req = NR_REQUEST;   /* reads take precedence */
397
                        break;
398
                case WRITEA:
399
                        rw_ahead = 1;
400
                        rw = WRITE;     /* drop into WRITE */
401
                case WRITE:
402
                        if (!buffer_dirty(bh)) {
403
#if 0
404
                                printk ("make_request(): buffer clean for WRITE\n");
405
#endif
406
                                unlock_buffer(bh); /* Hmmph! Nothing to write */
407
                                return;
408
                        }
409
                        /* We don't allow the write-requests to fill up the
410
                         * queue completely:  we want some room for reads,
411
                         * as they take precedence. The last third of the
412
                         * requests are only for reads.
413
                         */
414
                        kstat.pgpgout++;
415
                        max_req = (NR_REQUEST * 2) / 3;
416
                        break;
417
                default:
418
                        printk(KERN_ERR "make_request: bad block dev cmd,"
419
                               " must be R/W/RA/WA\n");
420
                        unlock_buffer(bh);
421
                        return;
422
        }
423
 
424
/* look for a free request. */
425
       /* Loop uses two requests, 1 for loop and 1 for the real device.
426
        * Cut max_req in half to avoid running out and deadlocking. */
427
        if (major == LOOP_MAJOR)
428
             max_req >>= 1;
429
 
430
        /*
431
         * Try to coalesce the new request with old requests
432
         */
433
        max_sectors = get_max_sectors(bh->b_rdev);
434
        max_segments = get_max_segments(bh->b_rdev);
435
 
436
        cli();
437
        req = blk_dev[major].current_request;
438
        if (!req) {
439
                /* MD and loop can't handle plugging without deadlocking */
440
                if (major != MD_MAJOR && major != LOOP_MAJOR)
441
                        plug_device(blk_dev + major);
442
        } else switch (major) {
443
             case IDE0_MAJOR:   /* same as HD_MAJOR */
444
             case IDE1_MAJOR:
445
             case FLOPPY_MAJOR:
446
             case IDE2_MAJOR:
447
             case IDE3_MAJOR:
448
                /*
449
                 * The scsi disk and cdrom drivers completely remove the request
450
                 * from the queue when they start processing an entry.  For this
451
                 * reason it is safe to continue to add links to the top entry for
452
                 * those devices.
453
                 *
454
                 * All other drivers need to jump over the first entry, as that
455
                 * entry may be busy being processed and we thus can't change it.
456
                 */
457
                req = req->next;
458
                if (!req)
459
                        break;
460
                /* fall through */
461
 
462
             case SCSI_DISK_MAJOR:
463
             case SCSI_CDROM_MAJOR:
464
             case DAC960_MAJOR+0:
465
             case DAC960_MAJOR+1:
466
             case DAC960_MAJOR+2:
467
             case DAC960_MAJOR+3:
468
             case DAC960_MAJOR+4:
469
             case DAC960_MAJOR+5:
470
             case DAC960_MAJOR+6:
471
             case DAC960_MAJOR+7:
472
             case COMPAQ_SMART2_MAJOR+0:
473
             case COMPAQ_SMART2_MAJOR+1:
474
             case COMPAQ_SMART2_MAJOR+2:
475
             case COMPAQ_SMART2_MAJOR+3:
476
             case COMPAQ_SMART2_MAJOR+4:
477
             case COMPAQ_SMART2_MAJOR+5:
478
             case COMPAQ_SMART2_MAJOR+6:
479
             case COMPAQ_SMART2_MAJOR+7:
480
                do {
481
                        if (req->sem)
482
                                continue;
483
                        if (req->cmd != rw)
484
                                continue;
485
                        if (req->nr_sectors + count > max_sectors)
486
                                continue;
487
                        if (req->rq_dev != bh->b_rdev)
488
                                continue;
489
                        /* Can we add it to the end of this request? */
490
                        if (req->sector + req->nr_sectors == sector) {
491
                                if (req->bhtail->b_data + req->bhtail->b_size
492
                                    != bh->b_data) {
493
                                        if (req->nr_segments < max_segments)
494
                                                req->nr_segments++;
495
                                        else continue;
496
                                }
497
                                req->bhtail->b_reqnext = bh;
498
                                req->bhtail = bh;
499
                                req->nr_sectors += count;
500
                                /* Can we now merge this req with the next? */
501
                                attempt_merge(req, max_sectors, max_segments);
502
                        /* or to the beginning? */
503
                        } else if (req->sector - count == sector) {
504
                                if (bh->b_data + bh->b_size
505
                                    != req->bh->b_data) {
506
                                        if (req->nr_segments < max_segments)
507
                                                req->nr_segments++;
508
                                        else continue;
509
                                }
510
                                bh->b_reqnext = req->bh;
511
                                req->bh = bh;
512
                                req->buffer = bh->b_data;
513
                                req->current_nr_sectors = count;
514
                                req->sector = sector;
515
                                req->nr_sectors += count;
516
                        } else
517
                                continue;
518
 
519
                        mark_buffer_clean(bh);
520
                        sti();
521
                        return;
522
                } while ((req = req->next) != NULL);
523
        }
524
 
525
/* find an unused request. */
526
        req = get_request(max_req, bh->b_rdev);
527
        sti();
528
 
529
/* if no request available: if rw_ahead, forget it; otherwise try again blocking.. */
530
        if (!req) {
531
                if (rw_ahead) {
532
                        unlock_buffer(bh);
533
                        return;
534
                }
535
                req = __get_request_wait(max_req, bh->b_rdev);
536
        }
537
 
538
/* fill up the request-info, and add it to the queue */
539
        req->cmd = rw;
540
        req->errors = 0;
541
        req->sector = sector;
542
        req->nr_sectors = count;
543
        req->nr_segments = 1;
544
        req->current_nr_sectors = count;
545
        req->buffer = bh->b_data;
546
        req->sem = NULL;
547
        req->bh = bh;
548
        req->bhtail = bh;
549
        req->next = NULL;
550
        add_request(major+blk_dev,req);
551
}
552
 
553
/* This function can be used to request a number of buffers from a block
554
   device. Currently the only restriction is that all buffers must belong to
555
   the same device */
556
 
557
void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
558
{
559
        unsigned int major;
560
        int correct_size;
561
        struct blk_dev_struct * dev;
562
        int i;
563
 
564
        /* Make sure that the first block contains something reasonable */
565
        while (!*bh) {
566
                bh++;
567
                if (--nr <= 0)
568
                        return;
569
        }
570
 
571
        dev = NULL;
572
        if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
573
                dev = blk_dev + major;
574
        if (!dev || !dev->request_fn) {
575
                printk(KERN_ERR
576
        "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
577
                kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
578
                goto sorry;
579
        }
580
 
581
        /* Determine correct block size for this device.  */
582
        correct_size = BLOCK_SIZE;
583
        if (blksize_size[major]) {
584
                i = blksize_size[major][MINOR(bh[0]->b_dev)];
585
                if (i)
586
                        correct_size = i;
587
        }
588
 
589
        /* Verify requested block sizes.  */
590
        for (i = 0; i < nr; i++) {
591
                if (bh[i] && bh[i]->b_size != correct_size) {
592
                        printk(KERN_NOTICE "ll_rw_block: device %s: "
593
                               "only %d-char blocks implemented (%lu)\n",
594
                               kdevname(bh[0]->b_dev),
595
                               correct_size, bh[i]->b_size);
596
                        goto sorry;
597
                }
598
 
599
                /* Md remaps blocks now */
600
                bh[i]->b_rdev = bh[i]->b_dev;
601
                bh[i]->b_rsector=bh[i]->b_blocknr*(bh[i]->b_size >> 9);
602
#ifdef CONFIG_BLK_DEV_MD
603
                if (major==MD_MAJOR &&
604
                    md_map (MINOR(bh[i]->b_dev), &bh[i]->b_rdev,
605
                            &bh[i]->b_rsector, bh[i]->b_size >> 9)) {
606
                        printk (KERN_ERR
607
                                "Bad md_map in ll_rw_block\n");
608
                        goto sorry;
609
                }
610
#endif
611
        }
612
 
613
        if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
614
                printk(KERN_NOTICE "Can't write to read-only device %s\n",
615
                       kdevname(bh[0]->b_dev));
616
                goto sorry;
617
        }
618
 
619
        for (i = 0; i < nr; i++) {
620
                if (bh[i]) {
621
                        set_bit(BH_Req, &bh[i]->b_state);
622
#ifdef CONFIG_BLK_DEV_MD
623
                        if (MAJOR(bh[i]->b_dev) == MD_MAJOR) {
624
                                md_make_request(MINOR (bh[i]->b_dev), rw, bh[i]);
625
                                continue;
626
                        }
627
#endif
628
                        make_request(MAJOR(bh[i]->b_rdev), rw, bh[i]);
629
                }
630
        }
631
        return;
632
 
633
      sorry:
634
        for (i = 0; i < nr; i++) {
635
                if (bh[i]) {
636
                        clear_bit(BH_Dirty, &bh[i]->b_state);
637
                        clear_bit(BH_Uptodate, &bh[i]->b_state);
638
                }
639
        }
640
        return;
641
}
642
 
643
void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
644
{
645
        int i, j;
646
        int buffersize;
647
        int max_req;
648
        unsigned long rsector;
649
        kdev_t rdev;
650
        struct request * req[8];
651
        unsigned int major = MAJOR(dev);
652
        struct semaphore sem = MUTEX_LOCKED;
653
 
654
        if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
655
                printk(KERN_NOTICE "ll_rw_swap_file: trying to swap to"
656
                                   " nonexistent block-device\n");
657
                return;
658
        }
659
        max_req = NR_REQUEST;
660
        switch (rw) {
661
                case READ:
662
                        break;
663
                case WRITE:
664
                        max_req = (NR_REQUEST * 2) / 3;
665
                        if (is_read_only(dev)) {
666
                                printk(KERN_NOTICE
667
                                       "Can't swap to read-only device %s\n",
668
                                        kdevname(dev));
669
                                return;
670
                        }
671
                        break;
672
                default:
673
                        panic("ll_rw_swap: bad block dev cmd, must be R/W");
674
        }
675
        buffersize = PAGE_SIZE / nb;
676
 
677
        if (major == LOOP_MAJOR)
678
             max_req >>= 1;
679
        for (j=0, i=0; i<nb;)
680
        {
681
                for (; j < 8 && i < nb; j++, i++, buf += buffersize)
682
                {
683
                        rdev = dev;
684
                        rsector = b[i] * (buffersize >> 9);
685
#ifdef CONFIG_BLK_DEV_MD
686
                        if (major==MD_MAJOR &&
687
                            md_map (MINOR(dev), &rdev,
688
                                    &rsector, buffersize >> 9)) {
689
                                printk (KERN_ERR
690
                                        "Bad md_map in ll_rw_swap_file\n");
691
                                return;
692
                        }
693
#endif
694
 
695
                        if (j == 0) {
696
                                req[j] = get_request_wait(max_req, rdev);
697
                        } else {
698
                                cli();
699
                                req[j] = get_request(max_req, rdev);
700
                                sti();
701
                                if (req[j] == NULL)
702
                                        break;
703
                        }
704
                        req[j]->cmd = rw;
705
                        req[j]->errors = 0;
706
                        req[j]->sector = rsector;
707
                        req[j]->nr_sectors = buffersize >> 9;
708
                        req[j]->nr_segments = 1;
709
                        req[j]->current_nr_sectors = buffersize >> 9;
710
                        req[j]->buffer = buf;
711
                        req[j]->sem = &sem;
712
                        req[j]->bh = NULL;
713
                        req[j]->next = NULL;
714
                        add_request(MAJOR(rdev)+blk_dev,req[j]);
715
                }
716
                run_task_queue(&tq_disk);
717
                while (j > 0) {
718
                        j--;
719
                        down(&sem);
720
                }
721
        }
722
}
723
 
724
int blk_dev_init(void)
725
{
726
        struct request * req;
727
        struct blk_dev_struct *dev;
728
 
729
        for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) {
730
                dev->request_fn      = NULL;
731
                dev->current_request = NULL;
732
                dev->plug.rq_status  = RQ_INACTIVE;
733
                dev->plug.cmd        = -1;
734
                dev->plug.next       = NULL;
735
                dev->plug_tq.routine = &unplug_device;
736
                dev->plug_tq.data    = dev;
737
        }
738
 
739
        req = all_requests + NR_REQUEST;
740
        while (--req >= all_requests) {
741
                req->rq_status = RQ_INACTIVE;
742
                req->next = NULL;
743
        }
744
        memset(ro_bits,0,sizeof(ro_bits));
745
#ifdef CONFIG_BLK_DEV_RAM
746
        rd_init();
747
#endif
748
#ifdef CONFIG_BLK_DEV_BLKMEM
749
        blkmem_init();
750
#endif
751
#ifdef CONFIG_DEV_FLASH
752
        flash_init(); /* this also does the char dev if need be */
753
#endif
754
#ifdef CONFIG_BLK_DEV_LOOP
755
        loop_init();
756
#endif
757
#ifdef CONFIG_CDI_INIT
758
        cdi_init();             /* this MUST precede ide_init */
759
#endif CONFIG_CDI_INIT
760
#ifdef CONFIG_BLK_DEV_IDE
761
        ide_init();             /* this MUST precede hd_init */
762
#endif
763
#ifdef CONFIG_BLK_DEV_HD
764
        hd_init();
765
#endif
766
#ifdef CONFIG_BLK_DEV_XD
767
        xd_init();
768
#endif
769
#ifdef CONFIG_PARIDE
770
        { extern void paride_init(void); paride_init(); };
771
#endif
772
#ifdef CONFIG_BLK_DEV_FD
773
        floppy_init();
774
#else
775
#ifndef CONFIG_COLDFIRE
776
        outb_p(0xc, 0x3f2);
777
#endif
778
#endif
779
#ifdef CONFIG_CDU31A
780
        cdu31a_init();
781
#endif CONFIG_CDU31A
782
#ifdef CONFIG_MCD
783
        mcd_init();
784
#endif CONFIG_MCD
785
#ifdef CONFIG_MCDX
786
        mcdx_init();
787
#endif CONFIG_MCDX
788
#ifdef CONFIG_SBPCD
789
        sbpcd_init();
790
#endif CONFIG_SBPCD
791
#ifdef CONFIG_AZTCD
792
        aztcd_init();
793
#endif CONFIG_AZTCD
794
#ifdef CONFIG_CDU535
795
        sony535_init();
796
#endif CONFIG_CDU535
797
#ifdef CONFIG_GSCD
798
        gscd_init();
799
#endif CONFIG_GSCD
800
#ifdef CONFIG_CM206
801
        cm206_init();
802
#endif
803
#ifdef CONFIG_OPTCD
804
        optcd_init();
805
#endif CONFIG_OPTCD
806
#ifdef CONFIG_SJCD
807
        sjcd_init();
808
#endif CONFIG_SJCD
809
#ifdef CONFIG_BLK_DEV_MD
810
        md_init();
811
#endif CONFIG_BLK_DEV_MD
812
        return 0;
813
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.