OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [ide/] [ide-io.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *      IDE I/O functions
3
 *
4
 *      Basic PIO and command management functionality.
5
 *
6
 * This code was split off from ide.c. See ide.c for history and original
7
 * copyrights.
8
 *
9
 * This program is free software; you can redistribute it and/or modify it
10
 * under the terms of the GNU General Public License as published by the
11
 * Free Software Foundation; either version 2, or (at your option) any
12
 * later version.
13
 *
14
 * This program is distributed in the hope that it will be useful, but
15
 * WITHOUT ANY WARRANTY; without even the implied warranty of
16
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17
 * General Public License for more details.
18
 *
19
 * For the avoidance of doubt the "preferred form" of this code is one which
20
 * is in an open non patent encumbered format. Where cryptographic key signing
21
 * forms part of the process of creating an executable the information
22
 * including keys needed to generate an equivalently functional executable
23
 * are deemed to be part of the source code.
24
 */
25
 
26
 
27
#include <linux/config.h>
28
#include <linux/module.h>
29
#include <linux/types.h>
30
#include <linux/string.h>
31
#include <linux/kernel.h>
32
#include <linux/timer.h>
33
#include <linux/mm.h>
34
#include <linux/interrupt.h>
35
#include <linux/major.h>
36
#include <linux/errno.h>
37
#include <linux/genhd.h>
38
#include <linux/blkpg.h>
39
#include <linux/slab.h>
40
#include <linux/init.h>
41
#include <linux/pci.h>
42
#include <linux/delay.h>
43
#include <linux/ide.h>
44
#include <linux/devfs_fs_kernel.h>
45
#include <linux/completion.h>
46
#include <linux/reboot.h>
47
#include <linux/cdrom.h>
48
#include <linux/seq_file.h>
49
#include <linux/kmod.h>
50
 
51
#include <asm/byteorder.h>
52
#include <asm/irq.h>
53
#include <asm/uaccess.h>
54
#include <asm/io.h>
55
#include <asm/bitops.h>
56
 
57
#include "ide_modes.h"
58
 
59
/*
60
 *      ide_end_request         -       complete an IDE I/O
61
 *      @drive: IDE device for the I/O
62
 *      @uptodate:
63
 *
64
 *      This is our end_request wrapper function. We complete the I/O
65
 *      update random number input and dequeue the request.
66
 */
67
 
68
int ide_end_request (ide_drive_t *drive, int uptodate)
69
{
70
        struct request *rq;
71
        unsigned long flags;
72
        int ret = 1;
73
 
74
        spin_lock_irqsave(&io_request_lock, flags);
75
        rq = HWGROUP(drive)->rq;
76
 
77
        /*
78
         * decide whether to reenable DMA -- 3 is a random magic for now,
79
         * if we DMA timeout more than 3 times, just stay in PIO
80
         */
81
        if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) {
82
                drive->state = 0;
83
                HWGROUP(drive)->hwif->ide_dma_on(drive);
84
        }
85
 
86
        if (!end_that_request_first(rq, uptodate, drive->name)) {
87
                add_blkdev_randomness(MAJOR(rq->rq_dev));
88
                blkdev_dequeue_request(rq);
89
                HWGROUP(drive)->rq = NULL;
90
                end_that_request_last(rq);
91
                ret = 0;
92
        }
93
 
94
        spin_unlock_irqrestore(&io_request_lock, flags);
95
        return ret;
96
}
97
 
98
EXPORT_SYMBOL(ide_end_request);
99
 
100
/**
101
 *      ide_end_drive_cmd       -       end an explicit drive command
102
 *      @drive: command
103
 *      @stat: status bits
104
 *      @err: error bits
105
 *
106
 *      Clean up after success/failure of an explicit drive command.
107
 *      These get thrown onto the queue so they are synchronized with
108
 *      real I/O operations on the drive.
109
 *
110
 *      In LBA48 mode we have to read the register set twice to get
111
 *      all the extra information out.
112
 */
113
 
114
void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
115
{
116
        ide_hwif_t *hwif = HWIF(drive);
117
        unsigned long flags;
118
        struct request *rq;
119
 
120
        spin_lock_irqsave(&io_request_lock, flags);
121
        rq = HWGROUP(drive)->rq;
122
        spin_unlock_irqrestore(&io_request_lock, flags);
123
 
124
        switch(rq->cmd) {
125
                case IDE_DRIVE_CMD:
126
                {
127
                        u8 *args = (u8 *) rq->buffer;
128
                        if (rq->errors == 0)
129
                                rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
130
 
131
                        if (args) {
132
                                args[0] = stat;
133
                                args[1] = err;
134
                                args[2] = hwif->INB(IDE_NSECTOR_REG);
135
                        }
136
                        break;
137
                }
138
                case IDE_DRIVE_TASK:
139
                {
140
                        u8 *args = (u8 *) rq->buffer;
141
                        if (rq->errors == 0)
142
                                rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
143
 
144
                        if (args) {
145
                                args[0] = stat;
146
                                args[1] = err;
147
                                args[2] = hwif->INB(IDE_NSECTOR_REG);
148
                                args[3] = hwif->INB(IDE_SECTOR_REG);
149
                                args[4] = hwif->INB(IDE_LCYL_REG);
150
                                args[5] = hwif->INB(IDE_HCYL_REG);
151
                                args[6] = hwif->INB(IDE_SELECT_REG);
152
                        }
153
                        break;
154
                }
155
                case IDE_DRIVE_TASKFILE:
156
                {
157
                        ide_task_t *args = (ide_task_t *) rq->special;
158
                        if (rq->errors == 0)
159
                                rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
160
 
161
                        if (args) {
162
                                if (args->tf_in_flags.b.data) {
163
                                        u16 data                        = hwif->INW(IDE_DATA_REG);
164
                                        args->tfRegister[IDE_DATA_OFFSET]       = (data) & 0xFF;
165
                                        args->hobRegister[IDE_DATA_OFFSET_HOB]  = (data >> 8) & 0xFF;
166
                                }
167
                                args->tfRegister[IDE_ERROR_OFFSET]   = err;
168
                                args->tfRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG);
169
                                args->tfRegister[IDE_SECTOR_OFFSET]  = hwif->INB(IDE_SECTOR_REG);
170
                                args->tfRegister[IDE_LCYL_OFFSET]    = hwif->INB(IDE_LCYL_REG);
171
                                args->tfRegister[IDE_HCYL_OFFSET]    = hwif->INB(IDE_HCYL_REG);
172
                                args->tfRegister[IDE_SELECT_OFFSET]  = hwif->INB(IDE_SELECT_REG);
173
                                args->tfRegister[IDE_STATUS_OFFSET]  = stat;
174
 
175
                                if (drive->addressing == 1) {
176
                                        hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG_HOB);
177
                                        args->hobRegister[IDE_FEATURE_OFFSET_HOB] = hwif->INB(IDE_FEATURE_REG);
178
                                        args->hobRegister[IDE_NSECTOR_OFFSET_HOB] = hwif->INB(IDE_NSECTOR_REG);
179
                                        args->hobRegister[IDE_SECTOR_OFFSET_HOB]  = hwif->INB(IDE_SECTOR_REG);
180
                                        args->hobRegister[IDE_LCYL_OFFSET_HOB]    = hwif->INB(IDE_LCYL_REG);
181
                                        args->hobRegister[IDE_HCYL_OFFSET_HOB]    = hwif->INB(IDE_HCYL_REG);
182
                                }
183
                        }
184
                        break;
185
                }
186
                default:
187
                        break;
188
        }
189
        spin_lock_irqsave(&io_request_lock, flags);
190
        blkdev_dequeue_request(rq);
191
        HWGROUP(drive)->rq = NULL;
192
        end_that_request_last(rq);
193
        spin_unlock_irqrestore(&io_request_lock, flags);
194
}
195
 
196
EXPORT_SYMBOL(ide_end_drive_cmd);
197
 
198
/**
199
 *      try_to_flush_leftover_data      -       flush junk
200
 *      @drive: drive to flush
201
 *
202
 *      try_to_flush_leftover_data() is invoked in response to a drive
203
 *      unexpectedly having its DRQ_STAT bit set.  As an alternative to
204
 *      resetting the drive, this routine tries to clear the condition
205
 *      by read a sector's worth of data from the drive.  Of course,
206
 *      this may not help if the drive is *waiting* for data from *us*.
207
 */
208
 
209
void try_to_flush_leftover_data (ide_drive_t *drive)
210
{
211
        int i = (drive->mult_count ? drive->mult_count : 1) * SECTOR_WORDS;
212
 
213
        if (drive->media != ide_disk)
214
                return;
215
        while (i > 0) {
216
                u32 buffer[16];
217
                u32 wcount = (i > 16) ? 16 : i;
218
 
219
                i -= wcount;
220
                HWIF(drive)->ata_input_data(drive, buffer, wcount);
221
        }
222
}
223
 
224
EXPORT_SYMBOL(try_to_flush_leftover_data);
225
 
226
/*
227
 * FIXME Add an ATAPI error
228
 */
229
 
230
/**
231
 *      ide_error       -       handle an error on the IDE
232
 *      @drive: drive the error occurred on
233
 *      @msg: message to report
234
 *      @stat: status bits
235
 *
236
 *      ide_error() takes action based on the error returned by the drive.
237
 *      For normal I/O that may well include retries. We deal with
238
 *      both new-style (taskfile) and old style command handling here.
239
 *      In the case of taskfile command handling there is work left to
240
 *      do
241
 */
242
 
243
ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat)
244
{
245
        ide_hwif_t *hwif;
246
        struct request *rq;
247
        u8 err;
248
 
249
        err = ide_dump_status(drive, msg, stat);
250
        if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL)
251
                return ide_stopped;
252
 
253
        hwif = HWIF(drive);
254
        /* retry only "normal" I/O: */
255
        if (rq->cmd == IDE_DRIVE_CMD || rq->cmd == IDE_DRIVE_TASK) {
256
                rq->errors = 1;
257
                ide_end_drive_cmd(drive, stat, err);
258
                return ide_stopped;
259
        }
260
        if (rq->cmd == IDE_DRIVE_TASKFILE) {
261
                rq->errors = 1;
262
                ide_end_drive_cmd(drive, stat, err);
263
//              ide_end_taskfile(drive, stat, err);
264
                return ide_stopped;
265
        }
266
 
267
        if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) {
268
                 /* other bits are useless when BUSY */
269
                rq->errors |= ERROR_RESET;
270
        } else {
271
                if (drive->media != ide_disk)
272
                        goto media_out;
273
 
274
                if (stat & ERR_STAT) {
275
                        /* err has different meaning on cdrom and tape */
276
                        if (err == ABRT_ERR) {
277
                                if (drive->select.b.lba &&
278
                                    (hwif->INB(IDE_COMMAND_REG) == WIN_SPECIFY))
279
                                        /* some newer drives don't
280
                                         * support WIN_SPECIFY
281
                                         */
282
                                        return ide_stopped;
283
                        } else if ((err & BAD_CRC) == BAD_CRC) {
284
                                drive->crc_count++;
285
                                /* UDMA crc error -- just retry the operation */
286
                        } else if (err & (BBD_ERR | ECC_ERR)) {
287
                                /* retries won't help these */
288
                                rq->errors = ERROR_MAX;
289
                        } else if (err & TRK0_ERR) {
290
                                /* help it find track zero */
291
                                rq->errors |= ERROR_RECAL;
292
                        }
293
                }
294
media_out:
295
                if ((stat & DRQ_STAT) && rq->cmd != WRITE)
296
                        try_to_flush_leftover_data(drive);
297
        }
298
        if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT)) {
299
                /* force an abort */
300
                hwif->OUTB(WIN_IDLEIMMEDIATE,IDE_COMMAND_REG);
301
        }
302
        if (rq->errors >= ERROR_MAX) {
303
                DRIVER(drive)->end_request(drive, 0);
304
        } else {
305
                if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
306
                        ++rq->errors;
307
                        return ide_do_reset(drive);
308
                }
309
                if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
310
                        drive->special.b.recalibrate = 1;
311
                ++rq->errors;
312
        }
313
        return ide_stopped;
314
}
315
 
316
EXPORT_SYMBOL(ide_error);
317
 
318
/**
319
 *      ide_abort       -       abort pending IDE operatins
320
 *      @drive: drive the error occurred on
321
 *      @msg: message to report
322
 *
323
 *      ide_abort kills and cleans up when we are about to do a
324
 *      host initiated reset on active commands. Longer term we
325
 *      want handlers to have sensible abort handling themselves
326
 *
327
 *      This differs fundamentally from ide_error because in
328
 *      this case the command is doing just fine when we
329
 *      blow it away.
330
 */
331
 
332
ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg)
333
{
334
        ide_hwif_t *hwif;
335
        struct request *rq;
336
 
337
        if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL)
338
                return ide_stopped;
339
 
340
        hwif = HWIF(drive);
341
        /* retry only "normal" I/O: */
342
        if (rq->cmd == IDE_DRIVE_CMD || rq->cmd == IDE_DRIVE_TASK) {
343
                rq->errors = 1;
344
                ide_end_drive_cmd(drive, BUSY_STAT, 0);
345
                return ide_stopped;
346
        }
347
        if (rq->cmd == IDE_DRIVE_TASKFILE) {
348
                rq->errors = 1;
349
                ide_end_drive_cmd(drive, BUSY_STAT, 0);
350
//              ide_end_taskfile(drive, BUSY_STAT, 0);
351
                return ide_stopped;
352
        }
353
 
354
        rq->errors |= ERROR_RESET;
355
        DRIVER(drive)->end_request(drive, 0);
356
        return ide_stopped;
357
}
358
 
359
EXPORT_SYMBOL(ide_abort);
360
 
361
/**
362
 *      ide_cmd         -       issue a simple drive command
363
 *      @drive: drive the command is for
364
 *      @cmd: command byte
365
 *      @nsect: sector byte
366
 *      @handler: handler for the command completion
367
 *
368
 *      Issue a simple drive command with interrupts.
369
 *      The drive must be selected beforehand.
370
 */
371
 
372
void ide_cmd (ide_drive_t *drive, u8 cmd, u8 nsect, ide_handler_t *handler)
373
{
374
        ide_hwif_t *hwif = HWIF(drive);
375
        if (IDE_CONTROL_REG)
376
                hwif->OUTB(drive->ctl,IDE_CONTROL_REG); /* clear nIEN */
377
        SELECT_MASK(drive,0);
378
        hwif->OUTB(nsect,IDE_NSECTOR_REG);
379
        ide_execute_command(drive, cmd, handler, WAIT_CMD, NULL);
380
}
381
 
382
EXPORT_SYMBOL(ide_cmd);
383
 
384
/**
385
 *      drive_cmd_intr          -       drive command completion interrupt
386
 *      @drive: drive the completion interrupt occurred on
387
 *
388
 *      drive_cmd_intr() is invoked on completion of a special DRIVE_CMD.
389
 *      We do any neccessary daya reading and then wait for the drive to
390
 *      go non busy. At that point we may read the error data and complete
391
 *      the request
392
 */
393
 
394
ide_startstop_t drive_cmd_intr (ide_drive_t *drive)
395
{
396
        struct request *rq = HWGROUP(drive)->rq;
397
        ide_hwif_t *hwif = HWIF(drive);
398
        u8 *args = (u8 *) rq->buffer;
399
        u8 stat = hwif->INB(IDE_STATUS_REG);
400
        int retries = 10;
401
 
402
        local_irq_enable();
403
        if ((stat & DRQ_STAT) && args && args[3]) {
404
                u8 io_32bit = drive->io_32bit;
405
                drive->io_32bit = 0;
406
                hwif->ata_input_data(drive, &args[4], args[3] * SECTOR_WORDS);
407
                drive->io_32bit = io_32bit;
408
                while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--)
409
                        udelay(100);
410
        }
411
 
412
        if (!OK_STAT(stat, READY_STAT, BAD_STAT))
413
                return DRIVER(drive)->error(drive, "drive_cmd", stat);
414
                /* calls ide_end_drive_cmd */
415
        ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG));
416
        return ide_stopped;
417
}
418
 
419
EXPORT_SYMBOL(drive_cmd_intr);
420
 
421
/**
422
 *      do_special              -       issue some special commands
423
 *      @drive: drive the command is for
424
 *
425
 *      do_special() is used to issue WIN_SPECIFY, WIN_RESTORE, and WIN_SETMULT
426
 *      commands to a drive.  It used to do much more, but has been scaled
427
 *      back.
428
 */
429
 
430
ide_startstop_t do_special (ide_drive_t *drive)
431
{
432
        special_t *s = &drive->special;
433
 
434
#ifdef DEBUG
435
        printk("%s: do_special: 0x%02x\n", drive->name, s->all);
436
#endif
437
        if (s->b.set_tune) {
438
                s->b.set_tune = 0;
439
                if (HWIF(drive)->tuneproc != NULL)
440
                        HWIF(drive)->tuneproc(drive, drive->tune_req);
441
                return ide_stopped;
442
        }
443
        else
444
                return DRIVER(drive)->special(drive);
445
}
446
 
447
EXPORT_SYMBOL(do_special);
448
 
449
/**
450
 *      execute_drive_command   -       issue special drive command
451
 *      @drive: the drive to issue th command on
452
 *      @rq: the request structure holding the command
453
 *
454
 *      execute_drive_cmd() issues a special drive command,  usually
455
 *      initiated by ioctl() from the external hdparm program. The
456
 *      command can be a drive command, drive task or taskfile
457
 *      operation. Weirdly you can call it with NULL to wait for
458
 *      all commands to finish. Don't do this as that is due to change
459
 */
460
 
461
ide_startstop_t execute_drive_cmd (ide_drive_t *drive, struct request *rq)
462
{
463
        ide_hwif_t *hwif = HWIF(drive);
464
        switch(rq->cmd) {
465
                case IDE_DRIVE_TASKFILE:
466
                {
467
                        ide_task_t *args = rq->special;
468
 
469
                        if (!(args)) break;
470
 
471
                        if (args->tf_out_flags.all != 0)
472
                                return flagged_taskfile(drive, args);
473
                        return do_rw_taskfile(drive, args);
474
                }
475
                case IDE_DRIVE_TASK:
476
                {
477
                        u8 *args = rq->buffer;
478
                        u8 sel;
479
 
480
                        if (!(args)) break;
481
#ifdef DEBUG
482
                        printk("%s: DRIVE_TASK_CMD ", drive->name);
483
                        printk("cmd=0x%02x ", args[0]);
484
                        printk("fr=0x%02x ", args[1]);
485
                        printk("ns=0x%02x ", args[2]);
486
                        printk("sc=0x%02x ", args[3]);
487
                        printk("lcyl=0x%02x ", args[4]);
488
                        printk("hcyl=0x%02x ", args[5]);
489
                        printk("sel=0x%02x\n", args[6]);
490
#endif
491
                        hwif->OUTB(args[1], IDE_FEATURE_REG);
492
                        hwif->OUTB(args[3], IDE_SECTOR_REG);
493
                        hwif->OUTB(args[4], IDE_LCYL_REG);
494
                        hwif->OUTB(args[5], IDE_HCYL_REG);
495
                        sel = (args[6] & ~0x10);
496
                        if (drive->select.b.unit)
497
                                sel |= 0x10;
498
                        hwif->OUTB(sel, IDE_SELECT_REG);
499
                        ide_cmd(drive, args[0], args[2], &drive_cmd_intr);
500
                        return ide_started;
501
                }
502
                case IDE_DRIVE_CMD:
503
                {
504
                        u8 *args = rq->buffer;
505
 
506
                        if (!(args)) break;
507
#ifdef DEBUG
508
                        printk("%s: DRIVE_CMD ", drive->name);
509
                        printk("cmd=0x%02x ", args[0]);
510
                        printk("sc=0x%02x ", args[1]);
511
                        printk("fr=0x%02x ", args[2]);
512
                        printk("xx=0x%02x\n", args[3]);
513
#endif
514
                        if (args[0] == WIN_SMART) {
515
                                hwif->OUTB(0x4f, IDE_LCYL_REG);
516
                                hwif->OUTB(0xc2, IDE_HCYL_REG);
517
                                hwif->OUTB(args[2],IDE_FEATURE_REG);
518
                                hwif->OUTB(args[1],IDE_SECTOR_REG);
519
                                ide_cmd(drive, args[0], args[3], &drive_cmd_intr);
520
                                return ide_started;
521
                        }
522
                        hwif->OUTB(args[2],IDE_FEATURE_REG);
523
                        ide_cmd(drive, args[0], args[1], &drive_cmd_intr);
524
                        return ide_started;
525
                }
526
                default:
527
                        break;
528
        }
529
        /*
530
         * NULL is actually a valid way of waiting for
531
         * all current requests to be flushed from the queue.
532
         */
533
#ifdef DEBUG
534
        printk("%s: DRIVE_CMD (null)\n", drive->name);
535
#endif
536
        ide_end_drive_cmd(drive,
537
                        hwif->INB(IDE_STATUS_REG),
538
                        hwif->INB(IDE_ERROR_REG));
539
        return ide_stopped;
540
}
541
 
542
EXPORT_SYMBOL(execute_drive_cmd);
543
 
544
/**
545
 *      ide_start_request       -       start of I/O and command issuing for IDE
546
 *
547
 *      ide_start_request() initiates handling of a new I/O request. It
548
 *      accepts commands and I/O (read/write) requests. It also does
549
 *      the final remapping for weird stuff like EZDrive. Once
550
 *      device mapper can work sector level the EZDrive stuff can go away
551
 *
552
 *      FIXME: this function needs a rename
553
 */
554
 
555
static ide_startstop_t ide_start_request (ide_drive_t *drive, struct request *rq)
556
{
557
        ide_startstop_t startstop;
558
        unsigned long block, blockend;
559
        unsigned int minor = MINOR(rq->rq_dev), unit = minor >> PARTN_BITS;
560
        ide_hwif_t *hwif = HWIF(drive);
561
 
562
#ifdef DEBUG
563
        printk("%s: ide_start_request: current=0x%08lx\n",
564
                hwif->name, (unsigned long) rq);
565
#endif
566
 
567
        /* bail early if we've exceeded max_failures */
568
        if (!drive->present || (drive->max_failures && (drive->failures > drive->max_failures))) {
569
                goto kill_rq;
570
        }
571
 
572
        /*
573
         * bail early if we've sent a device to sleep, however how to wake
574
         * this needs to be a masked flag.  FIXME for proper operations.
575
         */
576
        if (drive->suspend_reset) {
577
                goto kill_rq;
578
        }
579
 
580
        if (unit >= MAX_DRIVES) {
581
                printk(KERN_ERR "%s: bad device number: %s\n",
582
                        hwif->name, kdevname(rq->rq_dev));
583
                goto kill_rq;
584
        }
585
#ifdef DEBUG
586
        if (rq->bh && !buffer_locked(rq->bh)) {
587
                printk(KERN_ERR "%s: block not locked\n", drive->name);
588
                goto kill_rq;
589
        }
590
#endif
591
        block    = rq->sector;
592
        blockend = block + rq->nr_sectors;
593
 
594
        if (blk_fs_request(rq) &&
595
            (drive->media == ide_disk || drive->media == ide_floppy)) {
596
                if ((blockend < block) || (blockend > drive->part[minor&PARTN_MASK].nr_sects)) {
597
                        printk(KERN_ERR "%s%c: bad access: block=%ld, count=%ld\n", drive->name,
598
                         (minor&PARTN_MASK)?'0'+(minor&PARTN_MASK):' ', block, rq->nr_sectors);
599
                        goto kill_rq;
600
                }
601
                block += drive->part[minor&PARTN_MASK].start_sect + drive->sect0;
602
        }
603
        /* Yecch - this will shift the entire interval,
604
           possibly killing some innocent following sector */
605
        if (block == 0 && drive->remap_0_to_1 == 1)
606
                block = 1;  /* redirect MBR access to EZ-Drive partn table */
607
 
608
        SELECT_DRIVE(drive);
609
        if (ide_wait_stat(&startstop, drive, drive->ready_stat, BUSY_STAT|DRQ_STAT, WAIT_READY)) {
610
                printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
611
                return startstop;
612
        }
613
        if (!drive->special.all) {
614
                switch(rq->cmd) {
615
                        case IDE_DRIVE_CMD:
616
                        case IDE_DRIVE_TASK:
617
                                return execute_drive_cmd(drive, rq);
618
                        case IDE_DRIVE_TASKFILE:
619
                                return execute_drive_cmd(drive, rq);
620
                        default:
621
                                break;
622
                }
623
                return (DRIVER(drive)->do_request(drive, rq, block));
624
        }
625
        return do_special(drive);
626
kill_rq:
627
        DRIVER(drive)->end_request(drive, 0);
628
        return ide_stopped;
629
}
630
 
631
/**
632
 *      ide_stall_queue         -       pause an IDE device
633
 *      @drive: drive to stall
634
 *      @timeout: time to stall for (jiffies)
635
 *
636
 *      ide_stall_queue() can be used by a drive to give excess bandwidth back
637
 *      to the hwgroup by sleeping for timeout jiffies.
638
 */
639
 
640
void ide_stall_queue (ide_drive_t *drive, unsigned long timeout)
641
{
642
        if (timeout > WAIT_WORSTCASE)
643
                timeout = WAIT_WORSTCASE;
644
        drive->sleep = timeout + jiffies;
645
}
646
 
647
EXPORT_SYMBOL(ide_stall_queue);
648
 
649
#define WAKEUP(drive)   ((drive)->service_start + 2 * (drive)->service_time)
650
 
651
/**
652
 *      choose_drive            -       select a drive to service
653
 *      @hwgroup: hardware group to select on
654
 *
655
 *      choose_drive() selects the next drive which will be serviced.
656
 *      This is neccessary because the IDE layer can't issue commands
657
 *      to both drives on the same cable, unlike SCSI.
658
 */
659
 
660
static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup)
661
{
662
        ide_drive_t *drive, *best;
663
 
664
repeat:
665
        best = NULL;
666
        drive = hwgroup->drive;
667
        do {
668
                if (!blk_queue_empty(&drive->queue) && (!drive->sleep || time_after_eq(jiffies, drive->sleep))) {
669
                        if (!best
670
                         || (drive->sleep && (!best->sleep || 0 < (signed long)(best->sleep - drive->sleep)))
671
                         || (!best->sleep && 0 < (signed long)(WAKEUP(best) - WAKEUP(drive))))
672
                        {
673
                                if (!blk_queue_plugged(&drive->queue))
674
                                        best = drive;
675
                        }
676
                }
677
        } while ((drive = drive->next) != hwgroup->drive);
678
        if (best && best->nice1 && !best->sleep && best != hwgroup->drive && best->service_time > WAIT_MIN_SLEEP) {
679
                long t = (signed long)(WAKEUP(best) - jiffies);
680
                if (t >= WAIT_MIN_SLEEP) {
681
                /*
682
                 * We *may* have some time to spare, but first let's see if
683
                 * someone can potentially benefit from our nice mood today..
684
                 */
685
                        drive = best->next;
686
                        do {
687
                                if (!drive->sleep
688
                                 && 0 < (signed long)(WAKEUP(drive) - (jiffies - best->service_time))
689
                                 && 0 < (signed long)((jiffies + t) - WAKEUP(drive)))
690
                                {
691
                                        ide_stall_queue(best, IDE_MIN(t, 10 * WAIT_MIN_SLEEP));
692
                                        goto repeat;
693
                                }
694
                        } while ((drive = drive->next) != best);
695
                }
696
        }
697
        return best;
698
}
699
 
700
/*
701
 * Issue a new request to a drive from hwgroup
702
 * Caller must have already done spin_lock_irqsave(&io_request_lock, ..);
703
 *
704
 * A hwgroup is a serialized group of IDE interfaces.  Usually there is
705
 * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640)
706
 * may have both interfaces in a single hwgroup to "serialize" access.
707
 * Or possibly multiple ISA interfaces can share a common IRQ by being grouped
708
 * together into one hwgroup for serialized access.
709
 *
710
 * Note also that several hwgroups can end up sharing a single IRQ,
711
 * possibly along with many other devices.  This is especially common in
712
 * PCI-based systems with off-board IDE controller cards.
713
 *
714
 * The IDE driver uses the single global io_request_lock spinlock to protect
715
 * access to the request queues, and to protect the hwgroup->busy flag.
716
 *
717
 * The first thread into the driver for a particular hwgroup sets the
718
 * hwgroup->busy flag to indicate that this hwgroup is now active,
719
 * and then initiates processing of the top request from the request queue.
720
 *
721
 * Other threads attempting entry notice the busy setting, and will simply
722
 * queue their new requests and exit immediately.  Note that hwgroup->busy
723
 * remains set even when the driver is merely awaiting the next interrupt.
724
 * Thus, the meaning is "this hwgroup is busy processing a request".
725
 *
726
 * When processing of a request completes, the completing thread or IRQ-handler
727
 * will start the next request from the queue.  If no more work remains,
728
 * the driver will clear the hwgroup->busy flag and exit.
729
 *
730
 * The io_request_lock (spinlock) is used to protect all access to the
731
 * hwgroup->busy flag, but is otherwise not needed for most processing in
732
 * the driver.  This makes the driver much more friendlier to shared IRQs
733
 * than previous designs, while remaining 100% (?) SMP safe and capable.
734
 */
735
/* --BenH: made non-static as ide-pmac.c uses it to kick the hwgroup back
736
 *         into life on wakeup from machine sleep.
737
 */
738
void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
739
{
740
        ide_drive_t     *drive;
741
        ide_hwif_t      *hwif;
742
        struct request  *rq;
743
        ide_startstop_t startstop;
744
 
745
        /* for atari only: POSSIBLY BROKEN HERE(?) */
746
        ide_get_lock(ide_intr, hwgroup);
747
 
748
        /* necessary paranoia: ensure IRQs are masked on local CPU */
749
        local_irq_disable();
750
 
751
        while (!hwgroup->busy) {
752
                hwgroup->busy = 1;
753
                drive = choose_drive(hwgroup);
754
                if (drive == NULL) {
755
                        unsigned long sleep = 0;
756
                        hwgroup->rq = NULL;
757
                        drive = hwgroup->drive;
758
                        do {
759
                                if (drive->sleep && (!sleep || 0 < (signed long)(sleep - drive->sleep)))
760
                                        sleep = drive->sleep;
761
                        } while ((drive = drive->next) != hwgroup->drive);
762
                        if (sleep) {
763
                /*
764
                 * Take a short snooze, and then wake up this hwgroup again.
765
                 * This gives other hwgroups on the same a chance to
766
                 * play fairly with us, just in case there are big differences
767
                 * in relative throughputs.. don't want to hog the cpu too much.
768
                 */
769
                                if (time_before(sleep, jiffies + WAIT_MIN_SLEEP))
770
                                        sleep = jiffies + WAIT_MIN_SLEEP;
771
#if 1
772
                                if (timer_pending(&hwgroup->timer))
773
                                        printk(KERN_ERR "ide_set_handler: timer already active\n");
774
#endif
775
                                /* so that ide_timer_expiry knows what to do */
776
                                hwgroup->sleeping = 1;
777
                                mod_timer(&hwgroup->timer, sleep);
778
                                /* we purposely leave hwgroup->busy==1
779
                                 * while sleeping */
780
                        } else {
781
                                /* Ugly, but how can we sleep for the lock
782
                                 * otherwise? perhaps from tq_disk?
783
                                 */
784
 
785
                                /* for atari only */
786
                                ide_release_lock();
787
                                hwgroup->busy = 0;
788
                        }
789
                        /* no more work for this hwgroup (for now) */
790
                        return;
791
                }
792
                hwif = HWIF(drive);
793
                if (hwgroup->hwif->sharing_irq &&
794
                    hwif != hwgroup->hwif &&
795
                    hwif->io_ports[IDE_CONTROL_OFFSET]) {
796
                        /* set nIEN for previous hwif */
797
                        SELECT_INTERRUPT(drive);
798
                }
799
                hwgroup->hwif = hwif;
800
                hwgroup->drive = drive;
801
                drive->sleep = 0;
802
                drive->service_start = jiffies;
803
 
804
                /* paranoia */
805
                if (blk_queue_plugged(&drive->queue))
806
                        printk(KERN_ERR "%s: Huh? nuking plugged queue\n", drive->name);
807
 
808
                rq = blkdev_entry_next_request(&drive->queue.queue_head);
809
                hwgroup->rq = rq;
810
                /*
811
                 * Some systems have trouble with IDE IRQs arriving while
812
                 * the driver is still setting things up.  So, here we disable
813
                 * the IRQ used by this interface while the request is being started.
814
                 * This may look bad at first, but pretty much the same thing
815
                 * happens anyway when any interrupt comes in, IDE or otherwise
816
                 *  -- the kernel masks the IRQ while it is being handled.
817
                 */
818
                if (hwif->irq != masked_irq)
819
                        disable_irq_nosync(hwif->irq);
820
                spin_unlock(&io_request_lock);
821
                local_irq_enable();
822
                        /* allow other IRQs while we start this request */
823
                startstop = ide_start_request(drive, rq);
824
                spin_lock_irq(&io_request_lock);
825
                if (hwif->irq != masked_irq)
826
                        enable_irq(hwif->irq);
827
                if (startstop == ide_stopped)
828
                        hwgroup->busy = 0;
829
        }
830
}
831
 
832
EXPORT_SYMBOL(ide_do_request);
833
 
834
/*
835
 * ide_get_queue() returns the queue which corresponds to a given device.
836
 */
837
request_queue_t *ide_get_queue (kdev_t dev)
838
{
839
        ide_hwif_t *hwif = (ide_hwif_t *)blk_dev[MAJOR(dev)].data;
840
 
841
        return &hwif->drives[DEVICE_NR(dev) & 1].queue;
842
}
843
 
844
EXPORT_SYMBOL(ide_get_queue);
845
 
846
/*
847
 * Passes the stuff to ide_do_request
848
 */
849
void do_ide_request(request_queue_t *q)
850
{
851
        ide_do_request(q->queuedata, IDE_NO_IRQ);
852
}
853
 
854
EXPORT_SYMBOL(do_ide_request);
855
 
856
/*
857
 * un-busy the hwgroup etc, and clear any pending DMA status. we want to
858
 * retry the current request in pio mode instead of risking tossing it
859
 * all away
860
 */
861
static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
862
{
863
        ide_hwif_t *hwif = HWIF(drive);
864
        struct request *rq;
865
        ide_startstop_t ret = ide_stopped;
866
 
867
        /*
868
         * end current dma transaction
869
         */
870
        (void) hwif->ide_dma_end(drive);
871
 
872
        /*
873
         * complain a little, later we might remove some of this verbosity
874
         */
875
 
876
        if (error < 0) {
877
                printk(KERN_ERR "%s: error waiting for DMA\n", drive->name);
878
                (void)HWIF(drive)->ide_dma_end(drive);
879
                ret = DRIVER(drive)->error(drive, "dma timeout retry",
880
                                hwif->INB(IDE_STATUS_REG));
881
        } else {
882
                printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
883
                (void) hwif->ide_dma_timeout(drive);
884
        }
885
 
886
        /*
887
         * disable dma for now, but remember that we did so because of
888
         * a timeout -- we'll reenable after we finish this next request
889
         * (or rather the first chunk of it) in pio.
890
         */
891
        drive->retry_pio++;
892
        drive->state = DMA_PIO_RETRY;
893
        (void) hwif->ide_dma_off_quietly(drive);
894
 
895
        /*
896
         * un-busy drive etc (hwgroup->busy is cleared on return) and
897
         * make sure request is sane
898
         */
899
        rq = HWGROUP(drive)->rq;
900
        HWGROUP(drive)->rq = NULL;
901
 
902
        rq->errors = 0;
903
        rq->sector = rq->bh->b_rsector;
904
        rq->current_nr_sectors = rq->bh->b_size >> 9;
905
        rq->hard_cur_sectors = rq->current_nr_sectors;
906
        rq->buffer = rq->bh->b_data;
907
 
908
        return ret;
909
}
910
 
911
/**
912
 *      ide_timer_expiry        -       handle lack of an IDE interrupt
913
 *      @data: timer callback magic (hwgroup)
914
 *
915
 *      An IDE command has timed out before the expected drive return
916
 *      occurred. At this point we attempt to clean up the current
917
 *      mess. If the current handler includes an expiry handler then
918
 *      we invoke the expiry handler, and providing it is happy the
919
 *      work is done. If that fails we apply generic recovery rules
920
 *      invoking the handler and checking the drive DMA status. We
921
 *      have an excessively incestuous relationship with the DMA
922
 *      logic that wants cleaning up.
923
 */
924
 
925
void ide_timer_expiry (unsigned long data)
926
{
927
        ide_hwgroup_t   *hwgroup = (ide_hwgroup_t *) data;
928
        ide_handler_t   *handler;
929
        ide_expiry_t    *expiry;
930
        unsigned long   flags;
931
        unsigned long   wait = -1;
932
 
933
        spin_lock_irqsave(&io_request_lock, flags);
934
 
935
        if ((handler = hwgroup->handler) == NULL) {
936
                /*
937
                 * Either a marginal timeout occurred
938
                 * (got the interrupt just as timer expired),
939
                 * or we were "sleeping" to give other devices a chance.
940
                 * Either way, we don't really want to complain about anything.
941
                 */
942
                if (hwgroup->sleeping) {
943
                        hwgroup->sleeping = 0;
944
                        hwgroup->busy = 0;
945
                }
946
        } else {
947
                ide_drive_t *drive = hwgroup->drive;
948
                if (!drive) {
949
                        printk(KERN_ERR "ide_timer_expiry: hwgroup->drive was NULL\n");
950
                        hwgroup->handler = NULL;
951
                } else {
952
                        ide_hwif_t *hwif;
953
                        ide_startstop_t startstop = ide_stopped;
954
                        if (!hwgroup->busy) {
955
                                hwgroup->busy = 1;      /* paranoia */
956
                                printk(KERN_ERR "%s: ide_timer_expiry: hwgroup->busy was 0 ??\n", drive->name);
957
                        }
958
                        if ((expiry = hwgroup->expiry) != NULL) {
959
                                /* continue */
960
                                if ((wait = expiry(drive)) > 0) {
961
                                        /* reset timer */
962
                                        hwgroup->timer.expires  = jiffies + wait;
963
                                        add_timer(&hwgroup->timer);
964
                                        spin_unlock_irqrestore(&io_request_lock, flags);
965
                                        return;
966
                                }
967
                        }
968
                        hwgroup->handler = NULL;
969
                        /*
970
                         * We need to simulate a real interrupt when invoking
971
                         * the handler() function, which means we need to
972
                         * globally mask the specific IRQ:
973
                         */
974
                        spin_unlock(&io_request_lock);
975
                        hwif  = HWIF(drive);
976
#if DISABLE_IRQ_NOSYNC
977
                        disable_irq_nosync(hwif->irq);
978
#else
979
                        /* disable_irq_nosync ?? */
980
                        disable_irq(hwif->irq);
981
#endif /* DISABLE_IRQ_NOSYNC */
982
 
983
                        /* local CPU only,
984
                         * as if we were handling an interrupt */
985
                        local_irq_disable();
986
                        if (hwgroup->poll_timeout != 0) {
987
                                startstop = handler(drive);
988
                        } else if (drive_is_ready(drive)) {
989
                                if (drive->waiting_for_dma)
990
                                        (void) hwgroup->hwif->ide_dma_lostirq(drive);
991
                                (void)ide_ack_intr(hwif);
992
                                printk(KERN_ERR "%s: lost interrupt\n", drive->name);
993
                                startstop = handler(drive);
994
                        } else {
995
                                if (drive->waiting_for_dma) {
996
                                        startstop = ide_dma_timeout_retry(drive, wait);
997
                                } else {
998
                                        startstop = DRIVER(drive)->error(drive, "irq timeout", hwif->INB(IDE_STATUS_REG));
999
                                }
1000
                        }
1001
                        drive->service_time = jiffies - drive->service_start;
1002
                        spin_lock_irq(&io_request_lock);
1003
                        enable_irq(hwif->irq);
1004
                        if (startstop == ide_stopped)
1005
                                hwgroup->busy = 0;
1006
                }
1007
        }
1008
        ide_do_request(hwgroup, IDE_NO_IRQ);
1009
        spin_unlock_irqrestore(&io_request_lock, flags);
1010
}
1011
 
1012
EXPORT_SYMBOL(ide_timer_expiry);
1013
 
1014
/**
1015
 *      unexpected_intr         -       handle an unexpected IDE interrupt
1016
 *      @irq: interrupt line
1017
 *      @hwgroup: hwgroup being processed
1018
 *
1019
 *      There's nothing really useful we can do with an unexpected interrupt,
1020
 *      other than reading the status register (to clear it), and logging it.
1021
 *      There should be no way that an irq can happen before we're ready for it,
1022
 *      so we needn't worry much about losing an "important" interrupt here.
1023
 *
1024
 *      On laptops (and "green" PCs), an unexpected interrupt occurs whenever
1025
 *      the drive enters "idle", "standby", or "sleep" mode, so if the status
1026
 *      looks "good", we just ignore the interrupt completely.
1027
 *
1028
 *      This routine assumes __cli() is in effect when called.
1029
 *
1030
 *      If an unexpected interrupt happens on irq15 while we are handling irq14
1031
 *      and if the two interfaces are "serialized" (CMD640), then it looks like
1032
 *      we could screw up by interfering with a new request being set up for
1033
 *      irq15.
1034
 *
1035
 *      In reality, this is a non-issue.  The new command is not sent unless
1036
 *      the drive is ready to accept one, in which case we know the drive is
1037
 *      not trying to interrupt us.  And ide_set_handler() is always invoked
1038
 *      before completing the issuance of any new drive command, so we will not
1039
 *      be accidentally invoked as a result of any valid command completion
1040
 *      interrupt.
1041
 *
1042
 *      Note that we must walk the entire hwgroup here. We know which hwif
1043
 *      is doing the current command, but we don't know which hwif burped
1044
 *      mysteriously.
1045
 */
1046
 
1047
static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup)
1048
{
1049
        u8 stat;
1050
        ide_hwif_t *hwif = hwgroup->hwif;
1051
 
1052
        /*
1053
         * handle the unexpected interrupt
1054
         */
1055
        do {
1056
                if (hwif->irq == irq) {
1057
                        stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
1058
                        if (!OK_STAT(stat, READY_STAT, BAD_STAT)) {
1059
                                /* Try to not flood the console with msgs */
1060
                                static unsigned long last_msgtime, count;
1061
                                ++count;
1062
                                if (time_after(jiffies, last_msgtime + HZ)) {
1063
                                        last_msgtime = jiffies;
1064
                                        printk(KERN_ERR "%s%s: unexpected interrupt, "
1065
                                                "status=0x%02x, count=%ld\n",
1066
                                                hwif->name,
1067
                                                (hwif->next==hwgroup->hwif) ? "" : "(?)", stat, count);
1068
                                }
1069
                        }
1070
                }
1071
        } while ((hwif = hwif->next) != hwgroup->hwif);
1072
}
1073
 
1074
/**
1075
 *      ide_intr        -       default IDE interrupt handler
1076
 *      @irq: interrupt number
1077
 *      @dev_id: hwif group
1078
 *      @regs: unused weirdness from the kernel irq layer
1079
 *
1080
 *      This is the default IRQ handler for the IDE layer. You should
1081
 *      not need to override it. If you do be aware it is subtle in
1082
 *      places
1083
 *
1084
 *      hwgroup->hwif is the interface in the group currently performing
1085
 *      a command. hwgroup->drive is the drive and hwgroup->handler is
1086
 *      the IRQ handler to call. As we issue a command the handlers
1087
 *      step through multiple states, reassigning the handler to the
1088
 *      next step in the process. Unlike a smart SCSI controller IDE
1089
 *      expects the main processor to sequence the various transfer
1090
 *      stages. We also manage a poll timer to catch up with most
1091
 *      timeout situations. There are still a few where the handlers
1092
 *      don't ever decide to give up.
1093
 *
1094
 *      The handler eventually returns ide_stopped to indicate the
1095
 *      request completed. At this point we issue the next request
1096
 *      on the hwgroup and the process begins again.
1097
 */
1098
 
1099
void ide_intr (int irq, void *dev_id, struct pt_regs *regs)
1100
{
1101
        unsigned long flags;
1102
        ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id;
1103
        ide_hwif_t *hwif;
1104
        ide_drive_t *drive;
1105
        ide_handler_t *handler;
1106
        ide_startstop_t startstop;
1107
 
1108
        spin_lock_irqsave(&io_request_lock, flags);
1109
        hwif = hwgroup->hwif;
1110
 
1111
        if (!ide_ack_intr(hwif)) {
1112
                spin_unlock_irqrestore(&io_request_lock, flags);
1113
                return;
1114
        }
1115
 
1116
        if ((handler = hwgroup->handler) == NULL ||
1117
            hwgroup->poll_timeout != 0) {
1118
                /*
1119
                 * Not expecting an interrupt from this drive.
1120
                 * That means this could be:
1121
                 *      (1) an interrupt from another PCI device
1122
                 *      sharing the same PCI INT# as us.
1123
                 * or   (2) a drive just entered sleep or standby mode,
1124
                 *      and is interrupting to let us know.
1125
                 * or   (3) a spurious interrupt of unknown origin.
1126
                 *
1127
                 * For PCI, we cannot tell the difference,
1128
                 * so in that case we just ignore it and hope it goes away.
1129
                 */
1130
#ifdef CONFIG_BLK_DEV_IDEPCI
1131
                if (hwif->pci_dev && !hwif->pci_dev->vendor)
1132
#endif  /* CONFIG_BLK_DEV_IDEPCI */
1133
                {
1134
                        /*
1135
                         * Probably not a shared PCI interrupt,
1136
                         * so we can safely try to do something about it:
1137
                         */
1138
                        unexpected_intr(irq, hwgroup);
1139
#ifdef CONFIG_BLK_DEV_IDEPCI
1140
                } else {
1141
                        /*
1142
                         * Whack the status register, just in case
1143
                         * we have a leftover pending IRQ.
1144
                         */
1145
                        (void) hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
1146
#endif /* CONFIG_BLK_DEV_IDEPCI */
1147
                }
1148
                spin_unlock_irqrestore(&io_request_lock, flags);
1149
                return;
1150
        }
1151
        drive = hwgroup->drive;
1152
        if (!drive) {
1153
                /*
1154
                 * This should NEVER happen, and there isn't much
1155
                 * we could do about it here.
1156
                 */
1157
                spin_unlock_irqrestore(&io_request_lock, flags);
1158
                return;
1159
        }
1160
        if (!drive_is_ready(drive)) {
1161
                /*
1162
                 * This happens regularly when we share a PCI IRQ with
1163
                 * another device.  Unfortunately, it can also happen
1164
                 * with some buggy drives that trigger the IRQ before
1165
                 * their status register is up to date.  Hopefully we have
1166
                 * enough advance overhead that the latter isn't a problem.
1167
                 */
1168
                spin_unlock_irqrestore(&io_request_lock, flags);
1169
                return;
1170
        }
1171
        if (!hwgroup->busy) {
1172
                hwgroup->busy = 1;      /* paranoia */
1173
                printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name);
1174
        }
1175
        hwgroup->handler = NULL;
1176
        del_timer(&hwgroup->timer);
1177
        spin_unlock(&io_request_lock);
1178
 
1179
        if (drive->unmask)
1180
                local_irq_enable();
1181
 
1182
        /* service this interrupt, may set handler for next interrupt */
1183
        startstop = handler(drive);
1184
        spin_lock_irq(&io_request_lock);
1185
 
1186
        /*
1187
         * Note that handler() may have set things up for another
1188
         * interrupt to occur soon, but it cannot happen until
1189
         * we exit from this routine, because it will be the
1190
         * same irq as is currently being serviced here, and Linux
1191
         * won't allow another of the same (on any CPU) until we return.
1192
         */
1193
        drive->service_time = jiffies - drive->service_start;
1194
        if (startstop == ide_stopped) {
1195
                if (hwgroup->handler == NULL) { /* paranoia */
1196
                        hwgroup->busy = 0;
1197
                        ide_do_request(hwgroup, hwif->irq);
1198
                } else {
1199
                        printk(KERN_ERR "%s: ide_intr: huh? expected NULL handler "
1200
                                "on exit\n", drive->name);
1201
                }
1202
        }
1203
        spin_unlock_irqrestore(&io_request_lock, flags);
1204
}
1205
 
1206
EXPORT_SYMBOL(ide_intr);
1207
 
1208
/*
1209
 * get_info_ptr() returns the (ide_drive_t *) for a given device number.
1210
 * It returns NULL if the given device number does not match any present drives.
1211
 */
1212
ide_drive_t *ide_info_ptr (kdev_t i_rdev, int force)
1213
{
1214
        int             major = MAJOR(i_rdev);
1215
        unsigned int    h;
1216
 
1217
        for (h = 0; h < MAX_HWIFS; ++h) {
1218
                ide_hwif_t  *hwif = &ide_hwifs[h];
1219
                if (hwif->present && major == hwif->major) {
1220
                        unsigned unit = DEVICE_NR(i_rdev);
1221
                        if (unit < MAX_DRIVES) {
1222
                                ide_drive_t *drive = &hwif->drives[unit];
1223
                                if (drive->present || force)
1224
                                        return drive;
1225
                        }
1226
                        break;
1227
                }
1228
        }
1229
        return NULL;
1230
}
1231
 
1232
EXPORT_SYMBOL(ide_info_ptr);
1233
 
1234
/**
1235
 *      ide_init_drive_cmd      -       initialize a drive command request
1236
 *      @rq: request object
1237
 *
1238
 *      Initialize a request before we fill it in and send it down to
1239
 *      ide_do_drive_cmd. Commands must be set up by this function. Right
1240
 *      now it doesn't do a lot, but if that changes abusers will have a
1241
 *      nasty suprise.
1242
 */
1243
 
1244
void ide_init_drive_cmd (struct request *rq)
1245
{
1246
        memset(rq, 0, sizeof(*rq));
1247
        rq->cmd = IDE_DRIVE_CMD;
1248
}
1249
 
1250
EXPORT_SYMBOL(ide_init_drive_cmd);
1251
 
1252
/**
1253
 *      ide_do_drive_cmd        -       issue IDE special command
1254
 *      @drive: device to issue command
1255
 *      @rq: request to issue
1256
 *      @action: action for processing
1257
 *
1258
 *      This function issues a special IDE device request
1259
 *      onto the request queue.
1260
 *
1261
 *      If action is ide_wait, then the rq is queued at the end of the
1262
 *      request queue, and the function sleeps until it has been processed.
1263
 *      This is for use when invoked from an ioctl handler.
1264
 *
1265
 *      If action is ide_preempt, then the rq is queued at the head of
1266
 *      the request queue, displacing the currently-being-processed
1267
 *      request and this function returns immediately without waiting
1268
 *      for the new rq to be completed.  This is VERY DANGEROUS, and is
1269
 *      intended for careful use by the ATAPI tape/cdrom driver code.
1270
 *
1271
 *      If action is ide_next, then the rq is queued immediately after
1272
 *      the currently-being-processed-request (if any), and the function
1273
 *      returns without waiting for the new rq to be completed.  As above,
1274
 *      This is VERY DANGEROUS, and is intended for careful use by the
1275
 *      ATAPI tape/cdrom driver code.
1276
 *
1277
 *      If action is ide_end, then the rq is queued at the end of the
1278
 *      request queue, and the function returns immediately without waiting
1279
 *      for the new rq to be completed. This is again intended for careful
1280
 *      use by the ATAPI tape/cdrom driver code.
1281
 */
1282
 
1283
int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t action)
1284
{
1285
        unsigned long flags;
1286
        ide_hwgroup_t *hwgroup = HWGROUP(drive);
1287
        unsigned int major = HWIF(drive)->major;
1288
        request_queue_t *q = &drive->queue;
1289
        struct list_head *queue_head = &q->queue_head;
1290
        DECLARE_COMPLETION(wait);
1291
 
1292
#ifdef CONFIG_BLK_DEV_PDC4030
1293
        if (HWIF(drive)->chipset == ide_pdc4030 && rq->buffer != NULL)
1294
                return -ENOSYS;  /* special drive cmds not supported */
1295
#endif
1296
        rq->errors = 0;
1297
        rq->rq_status = RQ_ACTIVE;
1298
        rq->rq_dev = MKDEV(major,(drive->select.b.unit)<<PARTN_BITS);
1299
        if (action == ide_wait)
1300
                rq->waiting = &wait;
1301
        spin_lock_irqsave(&io_request_lock, flags);
1302
        if (blk_queue_empty(q) || action == ide_preempt) {
1303
                if (action == ide_preempt)
1304
                        hwgroup->rq = NULL;
1305
        } else {
1306
                if (action == ide_wait || action == ide_end) {
1307
                        queue_head = queue_head->prev;
1308
                } else
1309
                        queue_head = queue_head->next;
1310
        }
1311
        list_add(&rq->queue, queue_head);
1312
        ide_do_request(hwgroup, IDE_NO_IRQ);
1313
        spin_unlock_irqrestore(&io_request_lock, flags);
1314
        if (action == ide_wait) {
1315
                /* wait for it to be serviced */
1316
                wait_for_completion(&wait);
1317
                /* return -EIO if errors */
1318
                return rq->errors ? -EIO : 0;
1319
        }
1320
        return 0;
1321
 
1322
}
1323
 
1324
EXPORT_SYMBOL(ide_do_drive_cmd);

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.