OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [scsi/] [scsi.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *  scsi.c Copyright (C) 1992 Drew Eckhardt
3
 *         Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
4
 *
5
 *  generic mid-level SCSI driver
6
 *      Initial versions: Drew Eckhardt
7
 *      Subsequent revisions: Eric Youngdale
8
 *
9
 *  <drew@colorado.edu>
10
 *
11
 *  Bug correction thanks go to :
12
 *      Rik Faith <faith@cs.unc.edu>
13
 *      Tommy Thorn <tthorn>
14
 *      Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
15
 *
16
 *  Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
17
 *  add scatter-gather, multiple outstanding request, and other
18
 *  enhancements.
19
 *
20
 *  Native multichannel, wide scsi, /proc/scsi and hot plugging
21
 *  support added by Michael Neuffer <mike@i-connect.net>
22
 *
23
 *  Added request_module("scsi_hostadapter") for kerneld:
24
 *  (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modules.conf)
25
 *  Bjorn Ekwall  <bj0rn@blox.se>
26
 *  (changed to kmod)
27
 *
28
 *  Major improvements to the timeout, abort, and reset processing,
29
 *  as well as performance modifications for large queue depths by
30
 *  Leonard N. Zubkoff <lnz@dandelion.com>
31
 *
32
 *  Converted cli() code to spinlocks, Ingo Molnar
33
 *
34
 *  Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
35
 *
36
 *  out_of_space hacks, D. Gilbert (dpg) 990608
37
 */
38
 
39
#define REVISION        "Revision: 1.00"
40
#define VERSION         "Id: scsi.c 1.00 2000/09/26"
41
 
42
#include <linux/config.h>
43
#include <linux/module.h>
44
 
45
#include <linux/sched.h>
46
#include <linux/timer.h>
47
#include <linux/string.h>
48
#include <linux/slab.h>
49
#include <linux/ioport.h>
50
#include <linux/kernel.h>
51
#include <linux/stat.h>
52
#include <linux/blk.h>
53
#include <linux/interrupt.h>
54
#include <linux/delay.h>
55
#include <linux/init.h>
56
#include <linux/smp_lock.h>
57
#include <linux/completion.h>
58
 
59
#define __KERNEL_SYSCALLS__
60
 
61
#include <linux/unistd.h>
62
#include <linux/spinlock.h>
63
 
64
#include <asm/system.h>
65
#include <asm/irq.h>
66
#include <asm/dma.h>
67
#include <asm/uaccess.h>
68
 
69
#include "scsi.h"
70
#include "hosts.h"
71
#include "constants.h"
72
 
73
#ifdef CONFIG_KMOD
74
#include <linux/kmod.h>
75
#endif
76
 
77
#undef USE_STATIC_SCSI_MEMORY
78
 
79
struct proc_dir_entry *proc_scsi;
80
 
81
#ifdef CONFIG_PROC_FS
82
static int scsi_proc_info(char *buffer, char **start, off_t offset, int length);
83
static void scsi_dump_status(int level);
84
#endif
85
 
86
/*
87
   static const char RCSid[] = "$Header: /home/marcus/revision_ctrl_test/oc_cvs/cvs/or1k/linux/linux-2.4/drivers/scsi/scsi.c,v 1.1.1.1 2004-04-15 02:12:00 phoenix Exp $";
88
 */
89
 
90
/*
91
 * Definitions and constants.
92
 */
93
 
94
#define MIN_RESET_DELAY (2*HZ)
95
 
96
/* Do not call reset on error if we just did a reset within 15 sec. */
97
#define MIN_RESET_PERIOD (15*HZ)
98
 
99
/*
100
 * Macro to determine the size of SCSI command. This macro takes vendor
101
 * unique commands into account. SCSI commands in groups 6 and 7 are
102
 * vendor unique and we will depend upon the command length being
103
 * supplied correctly in cmd_len.
104
 */
105
#define CDB_SIZE(SCpnt) ((((SCpnt->cmnd[0] >> 5) & 7) < 6) ? \
106
                                COMMAND_SIZE(SCpnt->cmnd[0]) : SCpnt->cmd_len)
107
 
108
/*
109
 * Data declarations.
110
 */
111
unsigned long scsi_pid;
112
Scsi_Cmnd *last_cmnd;
113
/* Command group 3 is reserved and should never be used.  */
114
const unsigned char scsi_command_size[8] =
115
{
116
        6, 10, 10, 12,
117
        16, 12, 10, 10
118
};
119
static unsigned long serial_number;
120
static Scsi_Cmnd *scsi_bh_queue_head;
121
static Scsi_Cmnd *scsi_bh_queue_tail;
122
 
123
/*
124
 * Note - the initial logging level can be set here to log events at boot time.
125
 * After the system is up, you may enable logging via the /proc interface.
126
 */
127
unsigned int scsi_logging_level;
128
 
129
const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] =
130
{
131
        "Direct-Access    ",
132
        "Sequential-Access",
133
        "Printer          ",
134
        "Processor        ",
135
        "WORM             ",
136
        "CD-ROM           ",
137
        "Scanner          ",
138
        "Optical Device   ",
139
        "Medium Changer   ",
140
        "Communications   ",
141
        "Unknown          ",
142
        "Unknown          ",
143
        "Unknown          ",
144
        "Enclosure        ",
145
};
146
 
147
/*
148
 * Function prototypes.
149
 */
150
extern void scsi_times_out(Scsi_Cmnd * SCpnt);
151
void scsi_build_commandblocks(Scsi_Device * SDpnt);
152
 
153
/*
154
 * These are the interface to the old error handling code.  It should go away
155
 * someday soon.
156
 */
157
extern void scsi_old_done(Scsi_Cmnd * SCpnt);
158
extern void scsi_old_times_out(Scsi_Cmnd * SCpnt);
159
extern int scsi_old_reset(Scsi_Cmnd *SCpnt, unsigned int flag);
160
 
161
/*
162
 * Private interface into the new error handling code.
163
 */
164
extern int scsi_new_reset(Scsi_Cmnd *SCpnt, unsigned int flag);
165
 
166
/*
167
 * Function:    scsi_initialize_queue()
168
 *
169
 * Purpose:     Selects queue handler function for a device.
170
 *
171
 * Arguments:   SDpnt   - device for which we need a handler function.
172
 *
173
 * Returns:     Nothing
174
 *
175
 * Lock status: No locking assumed or required.
176
 *
177
 * Notes:       Most devices will end up using scsi_request_fn for the
178
 *              handler function (at least as things are done now).
179
 *              The "block" feature basically ensures that only one of
180
 *              the blocked hosts is active at one time, mainly to work around
181
 *              buggy DMA chipsets where the memory gets starved.
182
 *              For this case, we have a special handler function, which
183
 *              does some checks and ultimately calls scsi_request_fn.
184
 *
185
 *              The single_lun feature is a similar special case.
186
 *
187
 *              We handle these things by stacking the handlers.  The
188
 *              special case handlers simply check a few conditions,
189
 *              and return if they are not supposed to do anything.
190
 *              In the event that things are OK, then they call the next
191
 *              handler in the list - ultimately they call scsi_request_fn
192
 *              to do the dirty deed.
193
 */
194
void  scsi_initialize_queue(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt)
195
{
196
        request_queue_t *q = &SDpnt->request_queue;
197
 
198
        blk_init_queue(q, scsi_request_fn);
199
        blk_queue_headactive(q, 0);
200
        blk_queue_throttle_sectors(q, 1);
201
        q->queuedata = (void *) SDpnt;
202
}
203
 
204
#ifdef MODULE
205
MODULE_PARM(scsi_logging_level, "i");
206
MODULE_PARM_DESC(scsi_logging_level, "SCSI logging level; should be zero or nonzero");
207
 
208
#else
209
 
210
static int __init scsi_logging_setup(char *str)
211
{
212
        int tmp;
213
 
214
        if (get_option(&str, &tmp) == 1) {
215
                scsi_logging_level = (tmp ? ~0 : 0);
216
                return 1;
217
        } else {
218
                printk(KERN_INFO "scsi_logging_setup : usage scsi_logging_level=n "
219
                       "(n should be 0 or non-zero)\n");
220
                return 0;
221
        }
222
}
223
 
224
__setup("scsi_logging=", scsi_logging_setup);
225
 
226
#endif
227
 
228
/*
229
 *      Issue a command and wait for it to complete
230
 */
231
 
232
static void scsi_wait_done(Scsi_Cmnd * SCpnt)
233
{
234
        struct request *req;
235
 
236
        req = &SCpnt->request;
237
        req->rq_status = RQ_SCSI_DONE;  /* Busy, but indicate request done */
238
 
239
        if (req->waiting != NULL) {
240
                complete(req->waiting);
241
        }
242
}
243
 
244
/*
245
 * This lock protects the freelist for all devices on the system.
246
 * We could make this finer grained by having a single lock per
247
 * device if it is ever found that there is excessive contention
248
 * on this lock.
249
 */
250
static spinlock_t device_request_lock = SPIN_LOCK_UNLOCKED;
251
 
252
/*
253
 * Used to protect insertion into and removal from the queue of
254
 * commands to be processed by the bottom half handler.
255
 */
256
static spinlock_t scsi_bhqueue_lock = SPIN_LOCK_UNLOCKED;
257
 
258
/*
259
 * Function:    scsi_allocate_request
260
 *
261
 * Purpose:     Allocate a request descriptor.
262
 *
263
 * Arguments:   device    - device for which we want a request
264
 *
265
 * Lock status: No locks assumed to be held.  This function is SMP-safe.
266
 *
267
 * Returns:     Pointer to request block.
268
 *
269
 * Notes:       With the new queueing code, it becomes important
270
 *              to track the difference between a command and a
271
 *              request.  A request is a pending item in the queue that
272
 *              has not yet reached the top of the queue.
273
 */
274
 
275
Scsi_Request *scsi_allocate_request(Scsi_Device * device)
276
{
277
        Scsi_Request *SRpnt = NULL;
278
 
279
        if (!device)
280
                panic("No device passed to scsi_allocate_request().\n");
281
 
282
        SRpnt = (Scsi_Request *) kmalloc(sizeof(Scsi_Request), GFP_ATOMIC);
283
        if( SRpnt == NULL )
284
        {
285
                return NULL;
286
        }
287
 
288
        memset(SRpnt, 0, sizeof(Scsi_Request));
289
        SRpnt->sr_device = device;
290
        SRpnt->sr_host = device->host;
291
        SRpnt->sr_magic = SCSI_REQ_MAGIC;
292
        SRpnt->sr_data_direction = SCSI_DATA_UNKNOWN;
293
 
294
        return SRpnt;
295
}
296
 
297
/*
298
 * Function:    scsi_release_request
299
 *
300
 * Purpose:     Release a request descriptor.
301
 *
302
 * Arguments:   device    - device for which we want a request
303
 *
304
 * Lock status: No locks assumed to be held.  This function is SMP-safe.
305
 *
306
 * Returns:     Pointer to request block.
307
 *
308
 * Notes:       With the new queueing code, it becomes important
309
 *              to track the difference between a command and a
310
 *              request.  A request is a pending item in the queue that
311
 *              has not yet reached the top of the queue.  We still need
312
 *              to free a request when we are done with it, of course.
313
 */
314
void scsi_release_request(Scsi_Request * req)
315
{
316
        if( req->sr_command != NULL )
317
        {
318
                scsi_release_command(req->sr_command);
319
                req->sr_command = NULL;
320
        }
321
 
322
        kfree(req);
323
}
324
 
325
/*
326
 * Function:    scsi_allocate_device
327
 *
328
 * Purpose:     Allocate a command descriptor.
329
 *
330
 * Arguments:   device    - device for which we want a command descriptor
331
 *              wait      - 1 if we should wait in the event that none
332
 *                          are available.
333
 *              interruptible - 1 if we should unblock and return NULL
334
 *                          in the event that we must wait, and a signal
335
 *                          arrives.
336
 *
337
 * Lock status: No locks assumed to be held.  This function is SMP-safe.
338
 *
339
 * Returns:     Pointer to command descriptor.
340
 *
341
 * Notes:       Prior to the new queue code, this function was not SMP-safe.
342
 *
343
 *              If the wait flag is true, and we are waiting for a free
344
 *              command block, this function will interrupt and return
345
 *              NULL in the event that a signal arrives that needs to
346
 *              be handled.
347
 *
348
 *              This function is deprecated, and drivers should be
349
 *              rewritten to use Scsi_Request instead of Scsi_Cmnd.
350
 */
351
 
352
Scsi_Cmnd *scsi_allocate_device(Scsi_Device * device, int wait,
353
                                int interruptable)
354
{
355
        struct Scsi_Host *host;
356
        Scsi_Cmnd *SCpnt = NULL;
357
        Scsi_Device *SDpnt;
358
        unsigned long flags;
359
 
360
        if (!device)
361
                panic("No device passed to scsi_allocate_device().\n");
362
 
363
        host = device->host;
364
 
365
        spin_lock_irqsave(&device_request_lock, flags);
366
 
367
        while (1 == 1) {
368
                SCpnt = NULL;
369
                if (!device->device_blocked) {
370
                        if (device->single_lun) {
371
                                /*
372
                                 * FIXME(eric) - this is not at all optimal.  Given that
373
                                 * single lun devices are rare and usually slow
374
                                 * (i.e. CD changers), this is good enough for now, but
375
                                 * we may want to come back and optimize this later.
376
                                 *
377
                                 * Scan through all of the devices attached to this
378
                                 * host, and see if any are active or not.  If so,
379
                                 * we need to defer this command.
380
                                 *
381
                                 * We really need a busy counter per device.  This would
382
                                 * allow us to more easily figure out whether we should
383
                                 * do anything here or not.
384
                                 */
385
                                for (SDpnt = host->host_queue;
386
                                     SDpnt;
387
                                     SDpnt = SDpnt->next) {
388
                                        /*
389
                                         * Only look for other devices on the same bus
390
                                         * with the same target ID.
391
                                         */
392
                                        if (SDpnt->channel != device->channel
393
                                            || SDpnt->id != device->id
394
                                            || SDpnt == device) {
395
                                                continue;
396
                                        }
397
                                        if( atomic_read(&SDpnt->device_active) != 0)
398
                                        {
399
                                                break;
400
                                        }
401
                                }
402
                                if (SDpnt) {
403
                                        /*
404
                                         * Some other device in this cluster is busy.
405
                                         * If asked to wait, we need to wait, otherwise
406
                                         * return NULL.
407
                                         */
408
                                        SCpnt = NULL;
409
                                        goto busy;
410
                                }
411
                        }
412
                        /*
413
                         * Now we can check for a free command block for this device.
414
                         */
415
                        for (SCpnt = device->device_queue; SCpnt; SCpnt = SCpnt->next) {
416
                                if (SCpnt->request.rq_status == RQ_INACTIVE)
417
                                        break;
418
                        }
419
                }
420
                /*
421
                 * If we couldn't find a free command block, and we have been
422
                 * asked to wait, then do so.
423
                 */
424
                if (SCpnt) {
425
                        break;
426
                }
427
      busy:
428
                /*
429
                 * If we have been asked to wait for a free block, then
430
                 * wait here.
431
                 */
432
                if (wait) {
433
                        DECLARE_WAITQUEUE(wait, current);
434
 
435
                        /*
436
                         * We need to wait for a free commandblock.  We need to
437
                         * insert ourselves into the list before we release the
438
                         * lock.  This way if a block were released the same
439
                         * microsecond that we released the lock, the call
440
                         * to schedule() wouldn't block (well, it might switch,
441
                         * but the current task will still be schedulable.
442
                         */
443
                        add_wait_queue(&device->scpnt_wait, &wait);
444
                        if( interruptable ) {
445
                                set_current_state(TASK_INTERRUPTIBLE);
446
                        } else {
447
                                set_current_state(TASK_UNINTERRUPTIBLE);
448
                        }
449
 
450
                        spin_unlock_irqrestore(&device_request_lock, flags);
451
 
452
                        /*
453
                         * This should block until a device command block
454
                         * becomes available.
455
                         */
456
                        schedule();
457
 
458
                        spin_lock_irqsave(&device_request_lock, flags);
459
 
460
                        remove_wait_queue(&device->scpnt_wait, &wait);
461
                        /*
462
                         * FIXME - Isn't this redundant??  Someone
463
                         * else will have forced the state back to running.
464
                         */
465
                        set_current_state(TASK_RUNNING);
466
                        /*
467
                         * In the event that a signal has arrived that we need
468
                         * to consider, then simply return NULL.  Everyone
469
                         * that calls us should be prepared for this
470
                         * possibility, and pass the appropriate code back
471
                         * to the user.
472
                         */
473
                        if( interruptable ) {
474
                                if (signal_pending(current)) {
475
                                        spin_unlock_irqrestore(&device_request_lock, flags);
476
                                        return NULL;
477
                                }
478
                        }
479
                } else {
480
                        spin_unlock_irqrestore(&device_request_lock, flags);
481
                        return NULL;
482
                }
483
        }
484
 
485
        SCpnt->request.rq_status = RQ_SCSI_BUSY;
486
        SCpnt->request.waiting = NULL;  /* And no one is waiting for this
487
                                         * to complete */
488
        atomic_inc(&SCpnt->host->host_active);
489
        atomic_inc(&SCpnt->device->device_active);
490
 
491
        SCpnt->buffer  = NULL;
492
        SCpnt->bufflen = 0;
493
        SCpnt->request_buffer = NULL;
494
        SCpnt->request_bufflen = 0;
495
 
496
        SCpnt->use_sg = 0;       /* Reset the scatter-gather flag */
497
        SCpnt->old_use_sg = 0;
498
        SCpnt->transfersize = 0; /* No default transfer size */
499
        SCpnt->cmd_len = 0;
500
 
501
        SCpnt->sc_data_direction = SCSI_DATA_UNKNOWN;
502
        SCpnt->sc_request = NULL;
503
        SCpnt->sc_magic = SCSI_CMND_MAGIC;
504
 
505
        SCpnt->result = 0;
506
        SCpnt->underflow = 0;    /* Do not flag underflow conditions */
507
        SCpnt->old_underflow = 0;
508
        SCpnt->resid = 0;
509
        SCpnt->state = SCSI_STATE_INITIALIZING;
510
        SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
511
 
512
        spin_unlock_irqrestore(&device_request_lock, flags);
513
 
514
        SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n",
515
                                   SCpnt->target,
516
                                atomic_read(&SCpnt->host->host_active)));
517
 
518
        return SCpnt;
519
}
520
 
521
inline void __scsi_release_command(Scsi_Cmnd * SCpnt)
522
{
523
        unsigned long flags;
524
        Scsi_Device * SDpnt;
525
 
526
        spin_lock_irqsave(&device_request_lock, flags);
527
 
528
        SDpnt = SCpnt->device;
529
 
530
        SCpnt->request.rq_status = RQ_INACTIVE;
531
        SCpnt->state = SCSI_STATE_UNUSED;
532
        SCpnt->owner = SCSI_OWNER_NOBODY;
533
        atomic_dec(&SCpnt->host->host_active);
534
        atomic_dec(&SDpnt->device_active);
535
 
536
        SCSI_LOG_MLQUEUE(5, printk("Deactivating command for device %d (active=%d, failed=%d)\n",
537
                                   SCpnt->target,
538
                                   atomic_read(&SCpnt->host->host_active),
539
                                   SCpnt->host->host_failed));
540
        if (SCpnt->host->host_failed != 0) {
541
                SCSI_LOG_ERROR_RECOVERY(5, printk("Error handler thread %d %d\n",
542
                                                SCpnt->host->in_recovery,
543
                                                SCpnt->host->eh_active));
544
        }
545
        /*
546
         * If the host is having troubles, then look to see if this was the last
547
         * command that might have failed.  If so, wake up the error handler.
548
         */
549
        if (SCpnt->host->in_recovery
550
            && !SCpnt->host->eh_active
551
            && SCpnt->host->host_busy == SCpnt->host->host_failed) {
552
                SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
553
                             atomic_read(&SCpnt->host->eh_wait->count)));
554
                up(SCpnt->host->eh_wait);
555
        }
556
 
557
        spin_unlock_irqrestore(&device_request_lock, flags);
558
 
559
        /*
560
         * Wake up anyone waiting for this device.  Do this after we
561
         * have released the lock, as they will need it as soon as
562
         * they wake up.
563
         */
564
        wake_up(&SDpnt->scpnt_wait);
565
}
566
 
567
/*
568
 * Function:    scsi_release_command
569
 *
570
 * Purpose:     Release a command block.
571
 *
572
 * Arguments:   SCpnt - command block we are releasing.
573
 *
574
 * Notes:       The command block can no longer be used by the caller once
575
 *              this funciton is called.  This is in effect the inverse
576
 *              of scsi_allocate_device.  Note that we also must perform
577
 *              a couple of additional tasks.  We must first wake up any
578
 *              processes that might have blocked waiting for a command
579
 *              block, and secondly we must hit the queue handler function
580
 *              to make sure that the device is busy.  Note - there is an
581
 *              option to not do this - there were instances where we could
582
 *              recurse too deeply and blow the stack if this happened
583
 *              when we were indirectly called from the request function
584
 *              itself.
585
 *
586
 *              The idea is that a lot of the mid-level internals gunk
587
 *              gets hidden in this function.  Upper level drivers don't
588
 *              have any chickens to wave in the air to get things to
589
 *              work reliably.
590
 *
591
 *              This function is deprecated, and drivers should be
592
 *              rewritten to use Scsi_Request instead of Scsi_Cmnd.
593
 */
594
void scsi_release_command(Scsi_Cmnd * SCpnt)
595
{
596
        request_queue_t *q;
597
        Scsi_Device * SDpnt;
598
 
599
        SDpnt = SCpnt->device;
600
 
601
        __scsi_release_command(SCpnt);
602
 
603
        /*
604
         * Finally, hit the queue request function to make sure that
605
         * the device is actually busy if there are requests present.
606
         * This won't block - if the device cannot take any more, life
607
         * will go on.
608
         */
609
        q = &SDpnt->request_queue;
610
        scsi_queue_next_request(q, NULL);
611
}
612
 
613
/*
614
 * Function:    scsi_dispatch_command
615
 *
616
 * Purpose:     Dispatch a command to the low-level driver.
617
 *
618
 * Arguments:   SCpnt - command block we are dispatching.
619
 *
620
 * Notes:
621
 */
622
int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt)
623
{
624
#ifdef DEBUG_DELAY
625
        unsigned long clock;
626
#endif
627
        struct Scsi_Host *host;
628
        int rtn = 0;
629
        unsigned long flags = 0;
630
        unsigned long timeout;
631
 
632
        ASSERT_LOCK(&io_request_lock, 0);
633
 
634
#if DEBUG
635
        unsigned long *ret = 0;
636
#ifdef __mips__
637
        __asm__ __volatile__("move\t%0,$31":"=r"(ret));
638
#else
639
        ret = __builtin_return_address(0);
640
#endif
641
#endif
642
 
643
        host = SCpnt->host;
644
 
645
        /* Assign a unique nonzero serial_number. */
646
        if (++serial_number == 0)
647
                serial_number = 1;
648
        SCpnt->serial_number = serial_number;
649
        SCpnt->pid = scsi_pid++;
650
 
651
        /*
652
         * We will wait MIN_RESET_DELAY clock ticks after the last reset so
653
         * we can avoid the drive not being ready.
654
         */
655
        timeout = host->last_reset + MIN_RESET_DELAY;
656
 
657
        if (host->resetting && time_before(jiffies, timeout)) {
658
                int ticks_remaining = timeout - jiffies;
659
                /*
660
                 * NOTE: This may be executed from within an interrupt
661
                 * handler!  This is bad, but for now, it'll do.  The irq
662
                 * level of the interrupt handler has been masked out by the
663
                 * platform dependent interrupt handling code already, so the
664
                 * sti() here will not cause another call to the SCSI host's
665
                 * interrupt handler (assuming there is one irq-level per
666
                 * host).
667
                 */
668
                while (--ticks_remaining >= 0)
669
                        mdelay(1 + 999 / HZ);
670
                host->resetting = 0;
671
        }
672
        if (host->hostt->use_new_eh_code) {
673
                scsi_add_timer(SCpnt, SCpnt->timeout_per_command, scsi_times_out);
674
        } else {
675
                scsi_add_timer(SCpnt, SCpnt->timeout_per_command,
676
                               scsi_old_times_out);
677
        }
678
 
679
        /*
680
         * We will use a queued command if possible, otherwise we will emulate the
681
         * queuing and calling of completion function ourselves.
682
         */
683
        SCSI_LOG_MLQUEUE(3, printk("scsi_dispatch_cmnd (host = %d, channel = %d, target = %d, "
684
               "command = %p, buffer = %p, \nbufflen = %d, done = %p)\n",
685
        SCpnt->host->host_no, SCpnt->channel, SCpnt->target, SCpnt->cmnd,
686
                            SCpnt->buffer, SCpnt->bufflen, SCpnt->done));
687
 
688
        SCpnt->state = SCSI_STATE_QUEUED;
689
        SCpnt->owner = SCSI_OWNER_LOWLEVEL;
690
        if (host->can_queue) {
691
                SCSI_LOG_MLQUEUE(3, printk("queuecommand : routine at %p\n",
692
                                           host->hostt->queuecommand));
693
                /*
694
                 * Use the old error handling code if we haven't converted the driver
695
                 * to use the new one yet.  Note - only the new queuecommand variant
696
                 * passes a meaningful return value.
697
                 */
698
                if (host->hostt->use_new_eh_code) {
699
                        /*
700
                         * Before we queue this command, check if the command
701
                         * length exceeds what the host adapter can handle.
702
                         */
703
                        if (CDB_SIZE(SCpnt) <= SCpnt->host->max_cmd_len) {
704
                                spin_lock_irqsave(&io_request_lock, flags);
705
                                rtn = host->hostt->queuecommand(SCpnt, scsi_done);
706
                                spin_unlock_irqrestore(&io_request_lock, flags);
707
                                if (rtn != 0) {
708
                                        scsi_delete_timer(SCpnt);
709
                                        scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY);
710
                                        SCSI_LOG_MLQUEUE(3, printk("queuecommand : request rejected\n"));
711
                                }
712
                        } else {
713
                                SCSI_LOG_MLQUEUE(3, printk("queuecommand : command too long.\n"));
714
                                SCpnt->result = (DID_ABORT << 16);
715
                                spin_lock_irqsave(&io_request_lock, flags);
716
                                scsi_done(SCpnt);
717
                                spin_unlock_irqrestore(&io_request_lock, flags);
718
                                rtn = 1;
719
                        }
720
                } else {
721
                        /*
722
                         * Before we queue this command, check if the command
723
                         * length exceeds what the host adapter can handle.
724
                         */
725
                        if (CDB_SIZE(SCpnt) <= SCpnt->host->max_cmd_len) {
726
                                spin_lock_irqsave(&io_request_lock, flags);
727
                                host->hostt->queuecommand(SCpnt, scsi_old_done);
728
                                spin_unlock_irqrestore(&io_request_lock, flags);
729
                        } else {
730
                                SCSI_LOG_MLQUEUE(3, printk("queuecommand : command too long.\n"));
731
                                SCpnt->result = (DID_ABORT << 16);
732
                                spin_lock_irqsave(&io_request_lock, flags);
733
                                scsi_old_done(SCpnt);
734
                                spin_unlock_irqrestore(&io_request_lock, flags);
735
                                rtn = 1;
736
                        }
737
                }
738
        } else {
739
                int temp;
740
 
741
                SCSI_LOG_MLQUEUE(3, printk("command() :  routine at %p\n", host->hostt->command));
742
                spin_lock_irqsave(&io_request_lock, flags);
743
                temp = host->hostt->command(SCpnt);
744
                SCpnt->result = temp;
745
#ifdef DEBUG_DELAY
746
                spin_unlock_irqrestore(&io_request_lock, flags);
747
                clock = jiffies + 4 * HZ;
748
                while (time_before(jiffies, clock)) {
749
                        barrier();
750
                        cpu_relax();
751
                }
752
                printk("done(host = %d, result = %04x) : routine at %p\n",
753
                       host->host_no, temp, host->hostt->command);
754
                spin_lock_irqsave(&io_request_lock, flags);
755
#endif
756
                if (host->hostt->use_new_eh_code) {
757
                        scsi_done(SCpnt);
758
                } else {
759
                        scsi_old_done(SCpnt);
760
                }
761
                spin_unlock_irqrestore(&io_request_lock, flags);
762
        }
763
        SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
764
        return rtn;
765
}
766
 
767
devfs_handle_t scsi_devfs_handle;
768
 
769
/*
770
 * scsi_do_cmd sends all the commands out to the low-level driver.  It
771
 * handles the specifics required for each low level driver - ie queued
772
 * or non queued.  It also prevents conflicts when different high level
773
 * drivers go for the same host at the same time.
774
 */
775
 
776
void scsi_wait_req (Scsi_Request * SRpnt, const void *cmnd ,
777
                  void *buffer, unsigned bufflen,
778
                  int timeout, int retries)
779
{
780
        DECLARE_COMPLETION(wait);
781
        request_queue_t *q = &SRpnt->sr_device->request_queue;
782
 
783
        SRpnt->sr_request.waiting = &wait;
784
        SRpnt->sr_request.rq_status = RQ_SCSI_BUSY;
785
        scsi_do_req (SRpnt, (void *) cmnd,
786
                buffer, bufflen, scsi_wait_done, timeout, retries);
787
        generic_unplug_device(q);
788
        wait_for_completion(&wait);
789
        SRpnt->sr_request.waiting = NULL;
790
        if( SRpnt->sr_command != NULL )
791
        {
792
                scsi_release_command(SRpnt->sr_command);
793
                SRpnt->sr_command = NULL;
794
        }
795
 
796
}
797
 
798
/*
799
 * Function:    scsi_do_req
800
 *
801
 * Purpose:     Queue a SCSI request
802
 *
803
 * Arguments:   SRpnt     - command descriptor.
804
 *              cmnd      - actual SCSI command to be performed.
805
 *              buffer    - data buffer.
806
 *              bufflen   - size of data buffer.
807
 *              done      - completion function to be run.
808
 *              timeout   - how long to let it run before timeout.
809
 *              retries   - number of retries we allow.
810
 *
811
 * Lock status: With the new queueing code, this is SMP-safe, and no locks
812
 *              need be held upon entry.   The old queueing code the lock was
813
 *              assumed to be held upon entry.
814
 *
815
 * Returns:     Nothing.
816
 *
817
 * Notes:       Prior to the new queue code, this function was not SMP-safe.
818
 *              Also, this function is now only used for queueing requests
819
 *              for things like ioctls and character device requests - this
820
 *              is because we essentially just inject a request into the
821
 *              queue for the device. Normal block device handling manipulates
822
 *              the queue directly.
823
 */
824
void scsi_do_req(Scsi_Request * SRpnt, const void *cmnd,
825
              void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *),
826
                 int timeout, int retries)
827
{
828
        Scsi_Device * SDpnt = SRpnt->sr_device;
829
        struct Scsi_Host *host = SDpnt->host;
830
 
831
        ASSERT_LOCK(&io_request_lock, 0);
832
 
833
        SCSI_LOG_MLQUEUE(4,
834
                         {
835
                         int i;
836
                         int target = SDpnt->id;
837
                         int size = COMMAND_SIZE(((const unsigned char *)cmnd)[0]);
838
                         printk("scsi_do_req (host = %d, channel = %d target = %d, "
839
                    "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
840
                                "retries = %d)\n"
841
                                "command : ", host->host_no, SDpnt->channel, target, buffer,
842
                                bufflen, done, timeout, retries);
843
                         for (i  = 0; i < size; ++i)
844
                                printk("%02x  ", ((unsigned char *) cmnd)[i]);
845
                                printk("\n");
846
                         });
847
 
848
        if (!host) {
849
                panic("Invalid or not present host.\n");
850
        }
851
 
852
        /*
853
         * If the upper level driver is reusing these things, then
854
         * we should release the low-level block now.  Another one will
855
         * be allocated later when this request is getting queued.
856
         */
857
        if( SRpnt->sr_command != NULL )
858
        {
859
                scsi_release_command(SRpnt->sr_command);
860
                SRpnt->sr_command = NULL;
861
        }
862
 
863
        /*
864
         * We must prevent reentrancy to the lowlevel host driver.  This prevents
865
         * it - we enter a loop until the host we want to talk to is not busy.
866
         * Race conditions are prevented, as interrupts are disabled in between the
867
         * time we check for the host being not busy, and the time we mark it busy
868
         * ourselves.
869
         */
870
 
871
 
872
        /*
873
         * Our own function scsi_done (which marks the host as not busy, disables
874
         * the timeout counter, etc) will be called by us or by the
875
         * scsi_hosts[host].queuecommand() function needs to also call
876
         * the completion function for the high level driver.
877
         */
878
 
879
        memcpy((void *) SRpnt->sr_cmnd, (const void *) cmnd,
880
               sizeof(SRpnt->sr_cmnd));
881
        SRpnt->sr_bufflen = bufflen;
882
        SRpnt->sr_buffer = buffer;
883
        SRpnt->sr_allowed = retries;
884
        SRpnt->sr_done = done;
885
        SRpnt->sr_timeout_per_command = timeout;
886
 
887
        if (SRpnt->sr_cmd_len == 0)
888
                SRpnt->sr_cmd_len = COMMAND_SIZE(SRpnt->sr_cmnd[0]);
889
 
890
        /*
891
         * At this point, we merely set up the command, stick it in the normal
892
         * request queue, and return.  Eventually that request will come to the
893
         * top of the list, and will be dispatched.
894
         */
895
        scsi_insert_special_req(SRpnt, 0);
896
 
897
        SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_req()\n"));
898
}
899
 
900
/*
901
 * Function:    scsi_init_cmd_from_req
902
 *
903
 * Purpose:     Queue a SCSI command
904
 * Purpose:     Initialize a Scsi_Cmnd from a Scsi_Request
905
 *
906
 * Arguments:   SCpnt     - command descriptor.
907
 *              SRpnt     - Request from the queue.
908
 *
909
 * Lock status: None needed.
910
 *
911
 * Returns:     Nothing.
912
 *
913
 * Notes:       Mainly transfer data from the request structure to the
914
 *              command structure.  The request structure is allocated
915
 *              using the normal memory allocator, and requests can pile
916
 *              up to more or less any depth.  The command structure represents
917
 *              a consumable resource, as these are allocated into a pool
918
 *              when the SCSI subsystem initializes.  The preallocation is
919
 *              required so that in low-memory situations a disk I/O request
920
 *              won't cause the memory manager to try and write out a page.
921
 *              The request structure is generally used by ioctls and character
922
 *              devices.
923
 */
924
void scsi_init_cmd_from_req(Scsi_Cmnd * SCpnt, Scsi_Request * SRpnt)
925
{
926
        struct Scsi_Host *host = SCpnt->host;
927
 
928
        ASSERT_LOCK(&io_request_lock, 0);
929
 
930
        SCpnt->owner = SCSI_OWNER_MIDLEVEL;
931
        SRpnt->sr_command = SCpnt;
932
 
933
        if (!host) {
934
                panic("Invalid or not present host.\n");
935
        }
936
 
937
        SCpnt->cmd_len = SRpnt->sr_cmd_len;
938
        SCpnt->use_sg = SRpnt->sr_use_sg;
939
 
940
        memcpy((void *) &SCpnt->request, (const void *) &SRpnt->sr_request,
941
               sizeof(SRpnt->sr_request));
942
        memcpy((void *) SCpnt->data_cmnd, (const void *) SRpnt->sr_cmnd,
943
               sizeof(SCpnt->data_cmnd));
944
        SCpnt->reset_chain = NULL;
945
        SCpnt->serial_number = 0;
946
        SCpnt->serial_number_at_timeout = 0;
947
        SCpnt->bufflen = SRpnt->sr_bufflen;
948
        SCpnt->buffer = SRpnt->sr_buffer;
949
        SCpnt->flags = 0;
950
        SCpnt->retries = 0;
951
        SCpnt->allowed = SRpnt->sr_allowed;
952
        SCpnt->done = SRpnt->sr_done;
953
        SCpnt->timeout_per_command = SRpnt->sr_timeout_per_command;
954
 
955
        SCpnt->sc_data_direction = SRpnt->sr_data_direction;
956
 
957
        SCpnt->sglist_len = SRpnt->sr_sglist_len;
958
        SCpnt->underflow = SRpnt->sr_underflow;
959
 
960
        SCpnt->sc_request = SRpnt;
961
 
962
        memcpy((void *) SCpnt->cmnd, (const void *) SRpnt->sr_cmnd,
963
               sizeof(SCpnt->cmnd));
964
        /* Zero the sense buffer.  Some host adapters automatically request
965
         * sense on error.  0 is not a valid sense code.
966
         */
967
        memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
968
        SCpnt->request_buffer = SRpnt->sr_buffer;
969
        SCpnt->request_bufflen = SRpnt->sr_bufflen;
970
        SCpnt->old_use_sg = SCpnt->use_sg;
971
        if (SCpnt->cmd_len == 0)
972
                SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
973
        SCpnt->old_cmd_len = SCpnt->cmd_len;
974
        SCpnt->sc_old_data_direction = SCpnt->sc_data_direction;
975
        SCpnt->old_underflow = SCpnt->underflow;
976
 
977
        /* Start the timer ticking.  */
978
 
979
        SCpnt->internal_timeout = NORMAL_TIMEOUT;
980
        SCpnt->abort_reason = 0;
981
        SCpnt->result = 0;
982
 
983
        SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_init_cmd_from_req()\n"));
984
}
985
 
986
/*
987
 * Function:    scsi_do_cmd
988
 *
989
 * Purpose:     Queue a SCSI command
990
 *
991
 * Arguments:   SCpnt     - command descriptor.
992
 *              cmnd      - actual SCSI command to be performed.
993
 *              buffer    - data buffer.
994
 *              bufflen   - size of data buffer.
995
 *              done      - completion function to be run.
996
 *              timeout   - how long to let it run before timeout.
997
 *              retries   - number of retries we allow.
998
 *
999
 * Lock status: With the new queueing code, this is SMP-safe, and no locks
1000
 *              need be held upon entry.   The old queueing code the lock was
1001
 *              assumed to be held upon entry.
1002
 *
1003
 * Returns:     Nothing.
1004
 *
1005
 * Notes:       Prior to the new queue code, this function was not SMP-safe.
1006
 *              Also, this function is now only used for queueing requests
1007
 *              for things like ioctls and character device requests - this
1008
 *              is because we essentially just inject a request into the
1009
 *              queue for the device. Normal block device handling manipulates
1010
 *              the queue directly.
1011
 */
1012
void scsi_do_cmd(Scsi_Cmnd * SCpnt, const void *cmnd,
1013
              void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *),
1014
                 int timeout, int retries)
1015
{
1016
        struct Scsi_Host *host = SCpnt->host;
1017
 
1018
        ASSERT_LOCK(&io_request_lock, 0);
1019
 
1020
        SCpnt->pid = scsi_pid++;
1021
        SCpnt->owner = SCSI_OWNER_MIDLEVEL;
1022
 
1023
        SCSI_LOG_MLQUEUE(4,
1024
                         {
1025
                         int i;
1026
                         int target = SCpnt->target;
1027
                         int size = COMMAND_SIZE(((const unsigned char *)cmnd)[0]);
1028
                         printk("scsi_do_cmd (host = %d, channel = %d target = %d, "
1029
                    "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
1030
                                "retries = %d)\n"
1031
                                "command : ", host->host_no, SCpnt->channel, target, buffer,
1032
                                bufflen, done, timeout, retries);
1033
                         for (i = 0; i < size; ++i)
1034
                                printk("%02x  ", ((unsigned char *) cmnd)[i]);
1035
                                printk("\n");
1036
                         });
1037
 
1038
        if (!host) {
1039
                panic("Invalid or not present host.\n");
1040
        }
1041
        /*
1042
         * We must prevent reentrancy to the lowlevel host driver.  This prevents
1043
         * it - we enter a loop until the host we want to talk to is not busy.
1044
         * Race conditions are prevented, as interrupts are disabled in between the
1045
         * time we check for the host being not busy, and the time we mark it busy
1046
         * ourselves.
1047
         */
1048
 
1049
 
1050
        /*
1051
         * Our own function scsi_done (which marks the host as not busy, disables
1052
         * the timeout counter, etc) will be called by us or by the
1053
         * scsi_hosts[host].queuecommand() function needs to also call
1054
         * the completion function for the high level driver.
1055
         */
1056
 
1057
        memcpy((void *) SCpnt->data_cmnd, (const void *) cmnd,
1058
               sizeof(SCpnt->data_cmnd));
1059
        SCpnt->reset_chain = NULL;
1060
        SCpnt->serial_number = 0;
1061
        SCpnt->serial_number_at_timeout = 0;
1062
        SCpnt->bufflen = bufflen;
1063
        SCpnt->buffer = buffer;
1064
        SCpnt->flags = 0;
1065
        SCpnt->retries = 0;
1066
        SCpnt->allowed = retries;
1067
        SCpnt->done = done;
1068
        SCpnt->timeout_per_command = timeout;
1069
 
1070
        memcpy((void *) SCpnt->cmnd, (const void *) cmnd,
1071
               sizeof(SCpnt->cmnd));
1072
        /* Zero the sense buffer.  Some host adapters automatically request
1073
         * sense on error.  0 is not a valid sense code.
1074
         */
1075
        memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
1076
        SCpnt->request_buffer = buffer;
1077
        SCpnt->request_bufflen = bufflen;
1078
        SCpnt->old_use_sg = SCpnt->use_sg;
1079
        if (SCpnt->cmd_len == 0)
1080
                SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
1081
        SCpnt->old_cmd_len = SCpnt->cmd_len;
1082
        SCpnt->sc_old_data_direction = SCpnt->sc_data_direction;
1083
        SCpnt->old_underflow = SCpnt->underflow;
1084
 
1085
        /* Start the timer ticking.  */
1086
 
1087
        SCpnt->internal_timeout = NORMAL_TIMEOUT;
1088
        SCpnt->abort_reason = 0;
1089
        SCpnt->result = 0;
1090
 
1091
        /*
1092
         * At this point, we merely set up the command, stick it in the normal
1093
         * request queue, and return.  Eventually that request will come to the
1094
         * top of the list, and will be dispatched.
1095
         */
1096
        scsi_insert_special_cmd(SCpnt, 0);
1097
 
1098
        SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_cmd()\n"));
1099
}
1100
 
1101
/*
1102
 * This function is the mid-level interrupt routine, which decides how
1103
 *  to handle error conditions.  Each invocation of this function must
1104
 *  do one and *only* one of the following:
1105
 *
1106
 *      1) Insert command in BH queue.
1107
 *      2) Activate error handler for host.
1108
 *
1109
 * FIXME(eric) - I am concerned about stack overflow (still).  An
1110
 * interrupt could come while we are processing the bottom queue,
1111
 * which would cause another command to be stuffed onto the bottom
1112
 * queue, and it would in turn be processed as that interrupt handler
1113
 * is returning.  Given a sufficiently steady rate of returning
1114
 * commands, this could cause the stack to overflow.  I am not sure
1115
 * what is the most appropriate solution here - we should probably
1116
 * keep a depth count, and not process any commands while we still
1117
 * have a bottom handler active higher in the stack.
1118
 *
1119
 * There is currently code in the bottom half handler to monitor
1120
 * recursion in the bottom handler and report if it ever happens.  If
1121
 * this becomes a problem, it won't be hard to engineer something to
1122
 * deal with it so that only the outer layer ever does any real
1123
 * processing.
1124
 */
1125
void scsi_done(Scsi_Cmnd * SCpnt)
1126
{
1127
        unsigned long flags;
1128
        int tstatus;
1129
 
1130
        /*
1131
         * We don't have to worry about this one timing out any more.
1132
         */
1133
        tstatus = scsi_delete_timer(SCpnt);
1134
 
1135
        /*
1136
         * If we are unable to remove the timer, it means that the command
1137
         * has already timed out.  In this case, we have no choice but to
1138
         * let the timeout function run, as we have no idea where in fact
1139
         * that function could really be.  It might be on another processor,
1140
         * etc, etc.
1141
         */
1142
        if (!tstatus) {
1143
                SCpnt->done_late = 1;
1144
                return;
1145
        }
1146
        /* Set the serial numbers back to zero */
1147
        SCpnt->serial_number = 0;
1148
 
1149
        /*
1150
         * First, see whether this command already timed out.  If so, we ignore
1151
         * the response.  We treat it as if the command never finished.
1152
         *
1153
         * Since serial_number is now 0, the error handler cound detect this
1154
         * situation and avoid to call the low level driver abort routine.
1155
         * (DB)
1156
         *
1157
         * FIXME(eric) - I believe that this test is now redundant, due to
1158
         * the test of the return status of del_timer().
1159
         */
1160
        if (SCpnt->state == SCSI_STATE_TIMEOUT) {
1161
                SCSI_LOG_MLCOMPLETE(1, printk("Ignoring completion of %p due to timeout status", SCpnt));
1162
                return;
1163
        }
1164
        spin_lock_irqsave(&scsi_bhqueue_lock, flags);
1165
 
1166
        SCpnt->serial_number_at_timeout = 0;
1167
        SCpnt->state = SCSI_STATE_BHQUEUE;
1168
        SCpnt->owner = SCSI_OWNER_BH_HANDLER;
1169
        SCpnt->bh_next = NULL;
1170
 
1171
        /*
1172
         * Next, put this command in the BH queue.
1173
         *
1174
         * We need a spinlock here, or compare and exchange if we can reorder incoming
1175
         * Scsi_Cmnds, as it happens pretty often scsi_done is called multiple times
1176
         * before bh is serviced. -jj
1177
         *
1178
         * We already have the io_request_lock here, since we are called from the
1179
         * interrupt handler or the error handler. (DB)
1180
         *
1181
         * This may be true at the moment, but I would like to wean all of the low
1182
         * level drivers away from using io_request_lock.   Technically they should
1183
         * all use their own locking.  I am adding a small spinlock to protect
1184
         * this datastructure to make it safe for that day.  (ERY)
1185
         */
1186
        if (!scsi_bh_queue_head) {
1187
                scsi_bh_queue_head = SCpnt;
1188
                scsi_bh_queue_tail = SCpnt;
1189
        } else {
1190
                scsi_bh_queue_tail->bh_next = SCpnt;
1191
                scsi_bh_queue_tail = SCpnt;
1192
        }
1193
 
1194
        spin_unlock_irqrestore(&scsi_bhqueue_lock, flags);
1195
        /*
1196
         * Mark the bottom half handler to be run.
1197
         */
1198
        mark_bh(SCSI_BH);
1199
}
1200
 
1201
/*
1202
 * Procedure:   scsi_bottom_half_handler
1203
 *
1204
 * Purpose:     Called after we have finished processing interrupts, it
1205
 *              performs post-interrupt handling for commands that may
1206
 *              have completed.
1207
 *
1208
 * Notes:       This is called with all interrupts enabled.  This should reduce
1209
 *              interrupt latency, stack depth, and reentrancy of the low-level
1210
 *              drivers.
1211
 *
1212
 * The io_request_lock is required in all the routine. There was a subtle
1213
 * race condition when scsi_done is called after a command has already
1214
 * timed out but before the time out is processed by the error handler.
1215
 * (DB)
1216
 *
1217
 * I believe I have corrected this.  We simply monitor the return status of
1218
 * del_timer() - if this comes back as 0, it means that the timer has fired
1219
 * and that a timeout is in progress.   I have modified scsi_done() such
1220
 * that in this instance the command is never inserted in the bottom
1221
 * half queue.  Thus the only time we hold the lock here is when
1222
 * we wish to atomically remove the contents of the queue.
1223
 */
1224
void scsi_bottom_half_handler(void)
1225
{
1226
        Scsi_Cmnd *SCpnt;
1227
        Scsi_Cmnd *SCnext;
1228
        unsigned long flags;
1229
 
1230
 
1231
        while (1 == 1) {
1232
                spin_lock_irqsave(&scsi_bhqueue_lock, flags);
1233
                SCpnt = scsi_bh_queue_head;
1234
                scsi_bh_queue_head = NULL;
1235
                spin_unlock_irqrestore(&scsi_bhqueue_lock, flags);
1236
 
1237
                if (SCpnt == NULL) {
1238
                        return;
1239
                }
1240
                SCnext = SCpnt->bh_next;
1241
 
1242
                for (; SCpnt; SCpnt = SCnext) {
1243
                        SCnext = SCpnt->bh_next;
1244
 
1245
                        switch (scsi_decide_disposition(SCpnt)) {
1246
                        case SUCCESS:
1247
                                /*
1248
                                 * Add to BH queue.
1249
                                 */
1250
                                SCSI_LOG_MLCOMPLETE(3, printk("Command finished %d %d 0x%x\n", SCpnt->host->host_busy,
1251
                                                SCpnt->host->host_failed,
1252
                                                         SCpnt->result));
1253
 
1254
                                scsi_finish_command(SCpnt);
1255
                                break;
1256
                        case NEEDS_RETRY:
1257
                                /*
1258
                                 * We only come in here if we want to retry a command.  The
1259
                                 * test to see whether the command should be retried should be
1260
                                 * keeping track of the number of tries, so we don't end up looping,
1261
                                 * of course.
1262
                                 */
1263
                                SCSI_LOG_MLCOMPLETE(3, printk("Command needs retry %d %d 0x%x\n", SCpnt->host->host_busy,
1264
                                SCpnt->host->host_failed, SCpnt->result));
1265
 
1266
                                scsi_retry_command(SCpnt);
1267
                                break;
1268
                        case ADD_TO_MLQUEUE:
1269
                                /*
1270
                                 * This typically happens for a QUEUE_FULL message -
1271
                                 * typically only when the queue depth is only
1272
                                 * approximate for a given device.  Adding a command
1273
                                 * to the queue for the device will prevent further commands
1274
                                 * from being sent to the device, so we shouldn't end up
1275
                                 * with tons of things being sent down that shouldn't be.
1276
                                 */
1277
                                SCSI_LOG_MLCOMPLETE(3, printk("Command rejected as device queue full, put on ml queue %p\n",
1278
                                                              SCpnt));
1279
                                scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_DEVICE_BUSY);
1280
                                break;
1281
                        default:
1282
                                /*
1283
                                 * Here we have a fatal error of some sort.  Turn it over to
1284
                                 * the error handler.
1285
                                 */
1286
                                SCSI_LOG_MLCOMPLETE(3, printk("Command failed %p %x active=%d busy=%d failed=%d\n",
1287
                                                    SCpnt, SCpnt->result,
1288
                                  atomic_read(&SCpnt->host->host_active),
1289
                                                  SCpnt->host->host_busy,
1290
                                              SCpnt->host->host_failed));
1291
 
1292
                                /*
1293
                                 * Dump the sense information too.
1294
                                 */
1295
                                if ((status_byte(SCpnt->result) & CHECK_CONDITION) != 0) {
1296
                                        SCSI_LOG_MLCOMPLETE(3, print_sense("bh", SCpnt));
1297
                                }
1298
                                if (SCpnt->host->eh_wait != NULL) {
1299
                                        SCpnt->host->host_failed++;
1300
                                        SCpnt->owner = SCSI_OWNER_ERROR_HANDLER;
1301
                                        SCpnt->state = SCSI_STATE_FAILED;
1302
                                        SCpnt->host->in_recovery = 1;
1303
                                        /*
1304
                                         * If the host is having troubles, then look to see if this was the last
1305
                                         * command that might have failed.  If so, wake up the error handler.
1306
                                         */
1307
                                        if (SCpnt->host->host_busy == SCpnt->host->host_failed) {
1308
                                                SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
1309
                                                                                  atomic_read(&SCpnt->host->eh_wait->count)));
1310
                                                up(SCpnt->host->eh_wait);
1311
                                        }
1312
                                } else {
1313
                                        /*
1314
                                         * We only get here if the error recovery thread has died.
1315
                                         */
1316
                                        scsi_finish_command(SCpnt);
1317
                                }
1318
                        }
1319
                }               /* for(; SCpnt...) */
1320
 
1321
        }                       /* while(1==1) */
1322
 
1323
}
1324
 
1325
/*
1326
 * Function:    scsi_retry_command
1327
 *
1328
 * Purpose:     Send a command back to the low level to be retried.
1329
 *
1330
 * Notes:       This command is always executed in the context of the
1331
 *              bottom half handler, or the error handler thread. Low
1332
 *              level drivers should not become re-entrant as a result of
1333
 *              this.
1334
 */
1335
int scsi_retry_command(Scsi_Cmnd * SCpnt)
1336
{
1337
        memcpy((void *) SCpnt->cmnd, (void *) SCpnt->data_cmnd,
1338
               sizeof(SCpnt->data_cmnd));
1339
        SCpnt->request_buffer = SCpnt->buffer;
1340
        SCpnt->request_bufflen = SCpnt->bufflen;
1341
        SCpnt->use_sg = SCpnt->old_use_sg;
1342
        SCpnt->cmd_len = SCpnt->old_cmd_len;
1343
        SCpnt->sc_data_direction = SCpnt->sc_old_data_direction;
1344
        SCpnt->underflow = SCpnt->old_underflow;
1345
 
1346
        /*
1347
         * Zero the sense information from the last time we tried
1348
         * this command.
1349
         */
1350
        memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
1351
 
1352
        return scsi_dispatch_cmd(SCpnt);
1353
}
1354
 
1355
/*
1356
 * Function:    scsi_finish_command
1357
 *
1358
 * Purpose:     Pass command off to upper layer for finishing of I/O
1359
 *              request, waking processes that are waiting on results,
1360
 *              etc.
1361
 */
1362
void scsi_finish_command(Scsi_Cmnd * SCpnt)
1363
{
1364
        struct Scsi_Host *host;
1365
        Scsi_Device *device;
1366
        Scsi_Request * SRpnt;
1367
        unsigned long flags;
1368
 
1369
        ASSERT_LOCK(&io_request_lock, 0);
1370
 
1371
        host = SCpnt->host;
1372
        device = SCpnt->device;
1373
 
1374
        /*
1375
         * We need to protect the decrement, as otherwise a race condition
1376
         * would exist.  Fiddling with SCpnt isn't a problem as the
1377
         * design only allows a single SCpnt to be active in only
1378
         * one execution context, but the device and host structures are
1379
         * shared.
1380
         */
1381
        spin_lock_irqsave(&io_request_lock, flags);
1382
        host->host_busy--;      /* Indicate that we are free */
1383
        device->device_busy--;  /* Decrement device usage counter. */
1384
        spin_unlock_irqrestore(&io_request_lock, flags);
1385
 
1386
        /*
1387
         * Clear the flags which say that the device/host is no longer
1388
         * capable of accepting new commands.  These are set in scsi_queue.c
1389
         * for both the queue full condition on a device, and for a
1390
         * host full condition on the host.
1391
         */
1392
        host->host_blocked = FALSE;
1393
        device->device_blocked = FALSE;
1394
 
1395
        /*
1396
         * If we have valid sense information, then some kind of recovery
1397
         * must have taken place.  Make a note of this.
1398
         */
1399
        if (scsi_sense_valid(SCpnt)) {
1400
                SCpnt->result |= (DRIVER_SENSE << 24);
1401
        }
1402
        SCSI_LOG_MLCOMPLETE(3, printk("Notifying upper driver of completion for device %d %x\n",
1403
                                      SCpnt->device->id, SCpnt->result));
1404
 
1405
        SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
1406
        SCpnt->state = SCSI_STATE_FINISHED;
1407
 
1408
        /* We can get here with use_sg=0, causing a panic in the upper level (DB) */
1409
        SCpnt->use_sg = SCpnt->old_use_sg;
1410
 
1411
       /*
1412
        * If there is an associated request structure, copy the data over before we call the
1413
        * completion function.
1414
        */
1415
        SRpnt = SCpnt->sc_request;
1416
        if( SRpnt != NULL ) {
1417
               SRpnt->sr_result = SRpnt->sr_command->result;
1418
               if( SRpnt->sr_result != 0 ) {
1419
                       memcpy(SRpnt->sr_sense_buffer,
1420
                              SRpnt->sr_command->sense_buffer,
1421
                              sizeof(SRpnt->sr_sense_buffer));
1422
               }
1423
        }
1424
 
1425
        SCpnt->done(SCpnt);
1426
}
1427
 
1428
static int scsi_register_host(Scsi_Host_Template *);
1429
static int scsi_unregister_host(Scsi_Host_Template *);
1430
 
1431
/*
1432
 * Function:    scsi_release_commandblocks()
1433
 *
1434
 * Purpose:     Release command blocks associated with a device.
1435
 *
1436
 * Arguments:   SDpnt   - device
1437
 *
1438
 * Returns:     Nothing
1439
 *
1440
 * Lock status: No locking assumed or required.
1441
 *
1442
 * Notes:
1443
 */
1444
void scsi_release_commandblocks(Scsi_Device * SDpnt)
1445
{
1446
        Scsi_Cmnd *SCpnt, *SCnext;
1447
        unsigned long flags;
1448
 
1449
        spin_lock_irqsave(&device_request_lock, flags);
1450
        for (SCpnt = SDpnt->device_queue; SCpnt; SCpnt = SCnext) {
1451
                SDpnt->device_queue = SCnext = SCpnt->next;
1452
                kfree((char *) SCpnt);
1453
        }
1454
        SDpnt->has_cmdblocks = 0;
1455
        SDpnt->queue_depth = 0;
1456
        spin_unlock_irqrestore(&device_request_lock, flags);
1457
}
1458
 
1459
/*
1460
 * Function:    scsi_build_commandblocks()
1461
 *
1462
 * Purpose:     Allocate command blocks associated with a device.
1463
 *
1464
 * Arguments:   SDpnt   - device
1465
 *
1466
 * Returns:     Nothing
1467
 *
1468
 * Lock status: No locking assumed or required.
1469
 *
1470
 * Notes:
1471
 */
1472
void scsi_build_commandblocks(Scsi_Device * SDpnt)
1473
{
1474
        unsigned long flags;
1475
        struct Scsi_Host *host = SDpnt->host;
1476
        int j;
1477
        Scsi_Cmnd *SCpnt;
1478
 
1479
        spin_lock_irqsave(&device_request_lock, flags);
1480
 
1481
        if (SDpnt->queue_depth == 0)
1482
        {
1483
                SDpnt->queue_depth = host->cmd_per_lun;
1484
                if (SDpnt->queue_depth == 0)
1485
                        SDpnt->queue_depth = 1; /* live to fight another day */
1486
        }
1487
        SDpnt->device_queue = NULL;
1488
 
1489
        for (j = 0; j < SDpnt->queue_depth; j++) {
1490
                SCpnt = (Scsi_Cmnd *)
1491
                    kmalloc(sizeof(Scsi_Cmnd),
1492
                                     GFP_ATOMIC |
1493
                                (host->unchecked_isa_dma ? GFP_DMA : 0));
1494
                if (NULL == SCpnt)
1495
                        break;  /* If not, the next line will oops ... */
1496
                memset(SCpnt, 0, sizeof(Scsi_Cmnd));
1497
                SCpnt->host = host;
1498
                SCpnt->device = SDpnt;
1499
                SCpnt->target = SDpnt->id;
1500
                SCpnt->lun = SDpnt->lun;
1501
                SCpnt->channel = SDpnt->channel;
1502
                SCpnt->request.rq_status = RQ_INACTIVE;
1503
                SCpnt->use_sg = 0;
1504
                SCpnt->old_use_sg = 0;
1505
                SCpnt->old_cmd_len = 0;
1506
                SCpnt->underflow = 0;
1507
                SCpnt->old_underflow = 0;
1508
                SCpnt->transfersize = 0;
1509
                SCpnt->resid = 0;
1510
                SCpnt->serial_number = 0;
1511
                SCpnt->serial_number_at_timeout = 0;
1512
                SCpnt->host_scribble = NULL;
1513
                SCpnt->next = SDpnt->device_queue;
1514
                SDpnt->device_queue = SCpnt;
1515
                SCpnt->state = SCSI_STATE_UNUSED;
1516
                SCpnt->owner = SCSI_OWNER_NOBODY;
1517
        }
1518
        if (j < SDpnt->queue_depth) {   /* low on space (D.Gilbert 990424) */
1519
                printk(KERN_WARNING "scsi_build_commandblocks: want=%d, space for=%d blocks\n",
1520
                       SDpnt->queue_depth, j);
1521
                SDpnt->queue_depth = j;
1522
                SDpnt->has_cmdblocks = (0 != j);
1523
        } else {
1524
                SDpnt->has_cmdblocks = 1;
1525
        }
1526
        spin_unlock_irqrestore(&device_request_lock, flags);
1527
}
1528
 
1529
void __init scsi_host_no_insert(char *str, int n)
1530
{
1531
    Scsi_Host_Name *shn, *shn2;
1532
    int len;
1533
 
1534
    len = strlen(str);
1535
    if (len && (shn = (Scsi_Host_Name *) kmalloc(sizeof(Scsi_Host_Name), GFP_ATOMIC))) {
1536
        if ((shn->name = kmalloc(len+1, GFP_ATOMIC))) {
1537
            strncpy(shn->name, str, len);
1538
            shn->name[len] = 0;
1539
            shn->host_no = n;
1540
            shn->host_registered = 0;
1541
            shn->loaded_as_module = 1; /* numbers shouldn't be freed in any case */
1542
            shn->next = NULL;
1543
            if (scsi_host_no_list) {
1544
                for (shn2 = scsi_host_no_list;shn2->next;shn2 = shn2->next)
1545
                    ;
1546
                shn2->next = shn;
1547
            }
1548
            else
1549
                scsi_host_no_list = shn;
1550
            max_scsi_hosts = n+1;
1551
        }
1552
        else
1553
            kfree((char *) shn);
1554
    }
1555
}
1556
 
1557
#ifdef CONFIG_PROC_FS
1558
static int scsi_proc_info(char *buffer, char **start, off_t offset, int length)
1559
{
1560
        Scsi_Device *scd;
1561
        struct Scsi_Host *HBA_ptr;
1562
        int size, len = 0;
1563
        off_t begin = 0;
1564
        off_t pos = 0;
1565
 
1566
        /*
1567
         * First, see if there are any attached devices or not.
1568
         */
1569
        for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1570
                if (HBA_ptr->host_queue != NULL) {
1571
                        break;
1572
                }
1573
        }
1574
        size = sprintf(buffer + len, "Attached devices: %s\n", (HBA_ptr) ? "" : "none");
1575
        len += size;
1576
        pos = begin + len;
1577
        for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1578
#if 0
1579
                size += sprintf(buffer + len, "scsi%2d: %s\n", (int) HBA_ptr->host_no,
1580
                                HBA_ptr->hostt->procname);
1581
                len += size;
1582
                pos = begin + len;
1583
#endif
1584
                for (scd = HBA_ptr->host_queue; scd; scd = scd->next) {
1585
                        proc_print_scsidevice(scd, buffer, &size, len);
1586
                        len += size;
1587
                        pos = begin + len;
1588
 
1589
                        if (pos < offset) {
1590
                                len = 0;
1591
                                begin = pos;
1592
                        }
1593
                        if (pos > offset + length)
1594
                                goto stop_output;
1595
                }
1596
        }
1597
 
1598
stop_output:
1599
        *start = buffer + (offset - begin);     /* Start of wanted data */
1600
        len -= (offset - begin);        /* Start slop */
1601
        if (len > length)
1602
                len = length;   /* Ending slop */
1603
        return (len);
1604
}
1605
 
1606
static int proc_scsi_gen_write(struct file * file, const char * buf,
1607
                              unsigned long length, void *data)
1608
{
1609
        struct Scsi_Device_Template *SDTpnt;
1610
        Scsi_Device *scd;
1611
        struct Scsi_Host *HBA_ptr;
1612
        char *p;
1613
        int host, channel, id, lun;
1614
        char * buffer;
1615
        int err;
1616
 
1617
        if (!buf || length>PAGE_SIZE)
1618
                return -EINVAL;
1619
 
1620
        if (!(buffer = (char *) __get_free_page(GFP_KERNEL)))
1621
                return -ENOMEM;
1622
        if(copy_from_user(buffer, buf, length))
1623
        {
1624
                err =-EFAULT;
1625
                goto out;
1626
        }
1627
 
1628
        err = -EINVAL;
1629
 
1630
        if (length < PAGE_SIZE)
1631
                buffer[length] = '\0';
1632
        else if (buffer[PAGE_SIZE-1])
1633
                goto out;
1634
 
1635
        if (length < 11 || strncmp("scsi", buffer, 4))
1636
                goto out;
1637
 
1638
        /*
1639
         * Usage: echo "scsi dump #N" > /proc/scsi/scsi
1640
         * to dump status of all scsi commands.  The number is used to specify the level
1641
         * of detail in the dump.
1642
         */
1643
        if (!strncmp("dump", buffer + 5, 4)) {
1644
                unsigned int level;
1645
 
1646
                p = buffer + 10;
1647
 
1648
                if (*p == '\0')
1649
                        goto out;
1650
 
1651
                level = simple_strtoul(p, NULL, 0);
1652
                scsi_dump_status(level);
1653
        }
1654
        /*
1655
         * Usage: echo "scsi log token #N" > /proc/scsi/scsi
1656
         * where token is one of [error,scan,mlqueue,mlcomplete,llqueue,
1657
         * llcomplete,hlqueue,hlcomplete]
1658
         */
1659
#ifdef CONFIG_SCSI_LOGGING              /* { */
1660
 
1661
        if (!strncmp("log", buffer + 5, 3)) {
1662
                char *token;
1663
                unsigned int level;
1664
 
1665
                p = buffer + 9;
1666
                token = p;
1667
                while (*p != ' ' && *p != '\t' && *p != '\0') {
1668
                        p++;
1669
                }
1670
 
1671
                if (*p == '\0') {
1672
                        if (strncmp(token, "all", 3) == 0) {
1673
                                /*
1674
                                 * Turn on absolutely everything.
1675
                                 */
1676
                                scsi_logging_level = ~0;
1677
                        } else if (strncmp(token, "none", 4) == 0) {
1678
                                /*
1679
                                 * Turn off absolutely everything.
1680
                                 */
1681
                                scsi_logging_level = 0;
1682
                        } else {
1683
                                goto out;
1684
                        }
1685
                } else {
1686
                        *p++ = '\0';
1687
 
1688
                        level = simple_strtoul(p, NULL, 0);
1689
 
1690
                        /*
1691
                         * Now figure out what to do with it.
1692
                         */
1693
                        if (strcmp(token, "error") == 0) {
1694
                                SCSI_SET_ERROR_RECOVERY_LOGGING(level);
1695
                        } else if (strcmp(token, "timeout") == 0) {
1696
                                SCSI_SET_TIMEOUT_LOGGING(level);
1697
                        } else if (strcmp(token, "scan") == 0) {
1698
                                SCSI_SET_SCAN_BUS_LOGGING(level);
1699
                        } else if (strcmp(token, "mlqueue") == 0) {
1700
                                SCSI_SET_MLQUEUE_LOGGING(level);
1701
                        } else if (strcmp(token, "mlcomplete") == 0) {
1702
                                SCSI_SET_MLCOMPLETE_LOGGING(level);
1703
                        } else if (strcmp(token, "llqueue") == 0) {
1704
                                SCSI_SET_LLQUEUE_LOGGING(level);
1705
                        } else if (strcmp(token, "llcomplete") == 0) {
1706
                                SCSI_SET_LLCOMPLETE_LOGGING(level);
1707
                        } else if (strcmp(token, "hlqueue") == 0) {
1708
                                SCSI_SET_HLQUEUE_LOGGING(level);
1709
                        } else if (strcmp(token, "hlcomplete") == 0) {
1710
                                SCSI_SET_HLCOMPLETE_LOGGING(level);
1711
                        } else if (strcmp(token, "ioctl") == 0) {
1712
                                SCSI_SET_IOCTL_LOGGING(level);
1713
                        } else {
1714
                                goto out;
1715
                        }
1716
                }
1717
 
1718
                printk(KERN_INFO "scsi logging level set to 0x%8.8x\n", scsi_logging_level);
1719
        }
1720
#endif  /* CONFIG_SCSI_LOGGING */ /* } */
1721
 
1722
        /*
1723
         * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi
1724
         * with  "0 1 2 3" replaced by your "Host Channel Id Lun".
1725
         * Consider this feature BETA.
1726
         *     CAUTION: This is not for hotplugging your peripherals. As
1727
         *     SCSI was not designed for this you could damage your
1728
         *     hardware !
1729
         * However perhaps it is legal to switch on an
1730
         * already connected device. It is perhaps not
1731
         * guaranteed this device doesn't corrupt an ongoing data transfer.
1732
         */
1733
        if (!strncmp("add-single-device", buffer + 5, 17)) {
1734
                p = buffer + 23;
1735
 
1736
                host = simple_strtoul(p, &p, 0);
1737
                channel = simple_strtoul(p + 1, &p, 0);
1738
                id = simple_strtoul(p + 1, &p, 0);
1739
                lun = simple_strtoul(p + 1, &p, 0);
1740
 
1741
                printk(KERN_INFO "scsi singledevice %d %d %d %d\n", host, channel,
1742
                       id, lun);
1743
 
1744
                for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1745
                        if (HBA_ptr->host_no == host) {
1746
                                break;
1747
                        }
1748
                }
1749
                err = -ENXIO;
1750
                if (!HBA_ptr)
1751
                        goto out;
1752
 
1753
                for (scd = HBA_ptr->host_queue; scd; scd = scd->next) {
1754
                        if ((scd->channel == channel
1755
                             && scd->id == id
1756
                             && scd->lun == lun)) {
1757
                                break;
1758
                        }
1759
                }
1760
 
1761
                err = -ENOSYS;
1762
                if (scd)
1763
                        goto out;       /* We do not yet support unplugging */
1764
 
1765
                scan_scsis(HBA_ptr, 1, channel, id, lun);
1766
 
1767
                /* FIXME (DB) This assumes that the queue_depth routines can be used
1768
                   in this context as well, while they were all designed to be
1769
                   called only once after the detect routine. (DB) */
1770
                /* queue_depth routine moved to inside scan_scsis(,1,,,) so
1771
                   it is called before build_commandblocks() */
1772
 
1773
                err = length;
1774
                goto out;
1775
        }
1776
        /*
1777
         * Usage: echo "scsi remove-single-device 0 1 2 3" >/proc/scsi/scsi
1778
         * with  "0 1 2 3" replaced by your "Host Channel Id Lun".
1779
         *
1780
         * Consider this feature pre-BETA.
1781
         *
1782
         *     CAUTION: This is not for hotplugging your peripherals. As
1783
         *     SCSI was not designed for this you could damage your
1784
         *     hardware and thoroughly confuse the SCSI subsystem.
1785
         *
1786
         */
1787
        else if (!strncmp("remove-single-device", buffer + 5, 20)) {
1788
                p = buffer + 26;
1789
 
1790
                host = simple_strtoul(p, &p, 0);
1791
                channel = simple_strtoul(p + 1, &p, 0);
1792
                id = simple_strtoul(p + 1, &p, 0);
1793
                lun = simple_strtoul(p + 1, &p, 0);
1794
 
1795
 
1796
                for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1797
                        if (HBA_ptr->host_no == host) {
1798
                                break;
1799
                        }
1800
                }
1801
                err = -ENODEV;
1802
                if (!HBA_ptr)
1803
                        goto out;
1804
 
1805
                for (scd = HBA_ptr->host_queue; scd; scd = scd->next) {
1806
                        if ((scd->channel == channel
1807
                             && scd->id == id
1808
                             && scd->lun == lun)) {
1809
                                break;
1810
                        }
1811
                }
1812
 
1813
                if (scd == NULL)
1814
                        goto out;       /* there is no such device attached */
1815
 
1816
                err = -EBUSY;
1817
                if (scd->access_count)
1818
                        goto out;
1819
 
1820
                SDTpnt = scsi_devicelist;
1821
                while (SDTpnt != NULL) {
1822
                        if (SDTpnt->detach)
1823
                                (*SDTpnt->detach) (scd);
1824
                        SDTpnt = SDTpnt->next;
1825
                }
1826
 
1827
                if (scd->attached == 0) {
1828
                        /*
1829
                         * Nobody is using this device any more.
1830
                         * Free all of the command structures.
1831
                         */
1832
                        if (HBA_ptr->hostt->revoke)
1833
                                HBA_ptr->hostt->revoke(scd);
1834
                        devfs_unregister (scd->de);
1835
                        scsi_release_commandblocks(scd);
1836
 
1837
                        /* Now we can remove the device structure */
1838
                        if (scd->next != NULL)
1839
                                scd->next->prev = scd->prev;
1840
 
1841
                        if (scd->prev != NULL)
1842
                                scd->prev->next = scd->next;
1843
 
1844
                        if (HBA_ptr->host_queue == scd) {
1845
                                HBA_ptr->host_queue = scd->next;
1846
                        }
1847
                        blk_cleanup_queue(&scd->request_queue);
1848
                        kfree((char *) scd);
1849
                } else {
1850
                        goto out;
1851
                }
1852
                err = 0;
1853
        }
1854
out:
1855
 
1856
        free_page((unsigned long) buffer);
1857
        return err;
1858
}
1859
#endif
1860
 
1861
/*
1862
 * This entry point should be called by a driver if it is trying
1863
 * to add a low level scsi driver to the system.
1864
 */
1865
static int scsi_register_host(Scsi_Host_Template * tpnt)
1866
{
1867
        int pcount;
1868
        struct Scsi_Host *shpnt;
1869
        Scsi_Device *SDpnt;
1870
        struct Scsi_Device_Template *sdtpnt;
1871
        const char *name;
1872
        unsigned long flags;
1873
        int out_of_space = 0;
1874
 
1875
        if (tpnt->next || !tpnt->detect)
1876
                return 1;       /* Must be already loaded, or
1877
                                 * no detect routine available
1878
                                 */
1879
 
1880
        /* If max_sectors isn't set, default to max */
1881
        if (!tpnt->max_sectors)
1882
                tpnt->max_sectors = MAX_SECTORS;
1883
 
1884
        pcount = next_scsi_host;
1885
 
1886
        MOD_INC_USE_COUNT;
1887
 
1888
        /* The detect routine must carefully spinunlock/spinlock if
1889
           it enables interrupts, since all interrupt handlers do
1890
           spinlock as well.
1891
           All lame drivers are going to fail due to the following
1892
           spinlock. For the time beeing let's use it only for drivers
1893
           using the new scsi code. NOTE: the detect routine could
1894
           redefine the value tpnt->use_new_eh_code. (DB, 13 May 1998) */
1895
 
1896
        if (tpnt->use_new_eh_code) {
1897
                spin_lock_irqsave(&io_request_lock, flags);
1898
                tpnt->present = tpnt->detect(tpnt);
1899
                spin_unlock_irqrestore(&io_request_lock, flags);
1900
        } else
1901
                tpnt->present = tpnt->detect(tpnt);
1902
 
1903
        if (tpnt->present) {
1904
                if (pcount == next_scsi_host) {
1905
                        if (tpnt->present > 1) {
1906
                                printk(KERN_ERR "scsi: Failure to register low-level scsi driver");
1907
                                scsi_unregister_host(tpnt);
1908
                                return 1;
1909
                        }
1910
                        /*
1911
                         * The low-level driver failed to register a driver.
1912
                         * We can do this now.
1913
                         */
1914
                        if(scsi_register(tpnt, 0)==NULL)
1915
                        {
1916
                                printk(KERN_ERR "scsi: register failed.\n");
1917
                                scsi_unregister_host(tpnt);
1918
                                return 1;
1919
                        }
1920
                }
1921
                tpnt->next = scsi_hosts;        /* Add to the linked list */
1922
                scsi_hosts = tpnt;
1923
 
1924
                /* Add the new driver to /proc/scsi */
1925
#ifdef CONFIG_PROC_FS
1926
                build_proc_dir_entries(tpnt);
1927
#endif
1928
 
1929
 
1930
                /*
1931
                 * Add the kernel threads for each host adapter that will
1932
                 * handle error correction.
1933
                 */
1934
                for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
1935
                        if (shpnt->hostt == tpnt && shpnt->hostt->use_new_eh_code) {
1936
                                DECLARE_MUTEX_LOCKED(sem);
1937
 
1938
                                shpnt->eh_notify = &sem;
1939
                                kernel_thread((int (*)(void *)) scsi_error_handler,
1940
                                              (void *) shpnt, 0);
1941
 
1942
                                /*
1943
                                 * Now wait for the kernel error thread to initialize itself
1944
                                 * as it might be needed when we scan the bus.
1945
                                 */
1946
                                down(&sem);
1947
                                shpnt->eh_notify = NULL;
1948
                        }
1949
                }
1950
 
1951
                for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
1952
                        if (shpnt->hostt == tpnt) {
1953
                                if (tpnt->info) {
1954
                                        name = tpnt->info(shpnt);
1955
                                } else {
1956
                                        name = tpnt->name;
1957
                                }
1958
                                printk(KERN_INFO "scsi%d : %s\n",               /* And print a little message */
1959
                                       shpnt->host_no, name);
1960
                        }
1961
                }
1962
 
1963
                /* The next step is to call scan_scsis here.  This generates the
1964
                 * Scsi_Devices entries
1965
                 */
1966
                for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
1967
                        if (shpnt->hostt == tpnt) {
1968
                                scan_scsis(shpnt, 0, 0, 0, 0);
1969
                                if (shpnt->select_queue_depths != NULL) {
1970
                                        (shpnt->select_queue_depths) (shpnt, shpnt->host_queue);
1971
                                }
1972
                        }
1973
                }
1974
 
1975
                for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
1976
                        if (sdtpnt->init && sdtpnt->dev_noticed)
1977
                                (*sdtpnt->init) ();
1978
                }
1979
 
1980
                /*
1981
                 * Next we create the Scsi_Cmnd structures for this host
1982
                 */
1983
                for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
1984
                        for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next)
1985
                                if (SDpnt->host->hostt == tpnt) {
1986
                                        for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
1987
                                                if (sdtpnt->attach)
1988
                                                        (*sdtpnt->attach) (SDpnt);
1989
                                        if (SDpnt->attached) {
1990
                                                scsi_build_commandblocks(SDpnt);
1991
                                                if (0 == SDpnt->has_cmdblocks)
1992
                                                        out_of_space = 1;
1993
                                        }
1994
                                }
1995
                }
1996
 
1997
                /*
1998
                 * Now that we have all of the devices, resize the DMA pool,
1999
                 * as required.  */
2000
                if (!out_of_space)
2001
                        scsi_resize_dma_pool();
2002
 
2003
 
2004
                /* This does any final handling that is required. */
2005
                for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
2006
                        if (sdtpnt->finish && sdtpnt->nr_dev) {
2007
                                (*sdtpnt->finish) ();
2008
                        }
2009
                }
2010
        }
2011
#if defined(USE_STATIC_SCSI_MEMORY)
2012
        printk("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
2013
               (scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
2014
               (scsi_init_memory_start - scsi_memory_lower_value) / 1024,
2015
               (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
2016
#endif
2017
 
2018
        if (out_of_space) {
2019
                scsi_unregister_host(tpnt);     /* easiest way to clean up?? */
2020
                return 1;
2021
        } else
2022
                return 0;
2023
}
2024
 
2025
/*
2026
 * Similarly, this entry point should be called by a loadable module if it
2027
 * is trying to remove a low level scsi driver from the system.
2028
 */
2029
static int scsi_unregister_host(Scsi_Host_Template * tpnt)
2030
{
2031
        int online_status;
2032
        int pcount0, pcount;
2033
        Scsi_Cmnd *SCpnt;
2034
        Scsi_Device *SDpnt;
2035
        Scsi_Device *SDpnt1;
2036
        struct Scsi_Device_Template *sdtpnt;
2037
        struct Scsi_Host *sh1;
2038
        struct Scsi_Host *shpnt;
2039
        char name[10];  /* host_no>=10^9? I don't think so. */
2040
 
2041
        /* get the big kernel lock, so we don't race with open() */
2042
        lock_kernel();
2043
 
2044
        /*
2045
         * First verify that this host adapter is completely free with no pending
2046
         * commands
2047
         */
2048
        for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2049
                for (SDpnt = shpnt->host_queue; SDpnt;
2050
                     SDpnt = SDpnt->next) {
2051
                        if (SDpnt->host->hostt == tpnt
2052
                            && SDpnt->host->hostt->module
2053
                            && GET_USE_COUNT(SDpnt->host->hostt->module))
2054
                                goto err_out;
2055
                        /*
2056
                         * FIXME(eric) - We need to find a way to notify the
2057
                         * low level driver that we are shutting down - via the
2058
                         * special device entry that still needs to get added.
2059
                         *
2060
                         * Is detach interface below good enough for this?
2061
                         */
2062
                }
2063
        }
2064
 
2065
        /*
2066
         * FIXME(eric) put a spinlock on this.  We force all of the devices offline
2067
         * to help prevent race conditions where other hosts/processors could try and
2068
         * get in and queue a command.
2069
         */
2070
        for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2071
                for (SDpnt = shpnt->host_queue; SDpnt;
2072
                     SDpnt = SDpnt->next) {
2073
                        if (SDpnt->host->hostt == tpnt)
2074
                                SDpnt->online = FALSE;
2075
 
2076
                }
2077
        }
2078
 
2079
        for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2080
                if (shpnt->hostt != tpnt) {
2081
                        continue;
2082
                }
2083
                for (SDpnt = shpnt->host_queue; SDpnt;
2084
                     SDpnt = SDpnt->next) {
2085
                        /*
2086
                         * Loop over all of the commands associated with the device.  If any of
2087
                         * them are busy, then set the state back to inactive and bail.
2088
                         */
2089
                        for (SCpnt = SDpnt->device_queue; SCpnt;
2090
                             SCpnt = SCpnt->next) {
2091
                                online_status = SDpnt->online;
2092
                                SDpnt->online = FALSE;
2093
                                if (SCpnt->request.rq_status != RQ_INACTIVE) {
2094
                                        printk(KERN_ERR "SCSI device not inactive - rq_status=%d, target=%d, pid=%ld, state=%d, owner=%d.\n",
2095
                                               SCpnt->request.rq_status, SCpnt->target, SCpnt->pid,
2096
                                             SCpnt->state, SCpnt->owner);
2097
                                        for (SDpnt1 = shpnt->host_queue; SDpnt1;
2098
                                             SDpnt1 = SDpnt1->next) {
2099
                                                for (SCpnt = SDpnt1->device_queue; SCpnt;
2100
                                                     SCpnt = SCpnt->next)
2101
                                                        if (SCpnt->request.rq_status == RQ_SCSI_DISCONNECTING)
2102
                                                                SCpnt->request.rq_status = RQ_INACTIVE;
2103
                                        }
2104
                                        SDpnt->online = online_status;
2105
                                        printk(KERN_ERR "Device busy???\n");
2106
                                        goto err_out;
2107
                                }
2108
                                /*
2109
                                 * No, this device is really free.  Mark it as such, and
2110
                                 * continue on.
2111
                                 */
2112
                                SCpnt->state = SCSI_STATE_DISCONNECTING;
2113
                                SCpnt->request.rq_status = RQ_SCSI_DISCONNECTING;       /* Mark as busy */
2114
                        }
2115
                }
2116
        }
2117
        /* Next we detach the high level drivers from the Scsi_Device structures */
2118
 
2119
        for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2120
                if (shpnt->hostt != tpnt) {
2121
                        continue;
2122
                }
2123
                for (SDpnt = shpnt->host_queue; SDpnt;
2124
                     SDpnt = SDpnt->next) {
2125
                        for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2126
                                if (sdtpnt->detach)
2127
                                        (*sdtpnt->detach) (SDpnt);
2128
 
2129
                        /* If something still attached, punt */
2130
                        if (SDpnt->attached) {
2131
                                printk(KERN_ERR "Attached usage count = %d\n", SDpnt->attached);
2132
                                goto err_out;
2133
                        }
2134
                        devfs_unregister (SDpnt->de);
2135
                }
2136
        }
2137
 
2138
        /*
2139
         * Next, kill the kernel error recovery thread for this host.
2140
         */
2141
        for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2142
                if (shpnt->hostt == tpnt
2143
                    && shpnt->hostt->use_new_eh_code
2144
                    && shpnt->ehandler != NULL) {
2145
                        DECLARE_MUTEX_LOCKED(sem);
2146
 
2147
                        shpnt->eh_notify = &sem;
2148
                        send_sig(SIGHUP, shpnt->ehandler, 1);
2149
                        down(&sem);
2150
                        shpnt->eh_notify = NULL;
2151
                }
2152
        }
2153
 
2154
        /* Next we free up the Scsi_Cmnd structures for this host */
2155
 
2156
        for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2157
                if (shpnt->hostt != tpnt) {
2158
                        continue;
2159
                }
2160
                for (SDpnt = shpnt->host_queue; SDpnt;
2161
                     SDpnt = shpnt->host_queue) {
2162
                        scsi_release_commandblocks(SDpnt);
2163
 
2164
                        blk_cleanup_queue(&SDpnt->request_queue);
2165
                        /* Next free up the Scsi_Device structures for this host */
2166
                        shpnt->host_queue = SDpnt->next;
2167
                        kfree((char *) SDpnt);
2168
 
2169
                }
2170
        }
2171
 
2172
        /* Next we go through and remove the instances of the individual hosts
2173
         * that were detected */
2174
 
2175
        pcount0 = next_scsi_host;
2176
        for (shpnt = scsi_hostlist; shpnt; shpnt = sh1) {
2177
                sh1 = shpnt->next;
2178
                if (shpnt->hostt != tpnt)
2179
                        continue;
2180
                pcount = next_scsi_host;
2181
                /* Remove the /proc/scsi directory entry */
2182
                sprintf(name,"%d",shpnt->host_no);
2183
                remove_proc_entry(name, tpnt->proc_dir);
2184
                if (tpnt->release)
2185
                        (*tpnt->release) (shpnt);
2186
                else {
2187
                        /* This is the default case for the release function.
2188
                         * It should do the right thing for most correctly
2189
                         * written host adapters.
2190
                         */
2191
                        if (shpnt->irq)
2192
                                free_irq(shpnt->irq, NULL);
2193
                        if (shpnt->dma_channel != 0xff)
2194
                                free_dma(shpnt->dma_channel);
2195
                        if (shpnt->io_port && shpnt->n_io_port)
2196
                                release_region(shpnt->io_port, shpnt->n_io_port);
2197
                }
2198
                if (pcount == next_scsi_host)
2199
                        scsi_unregister(shpnt);
2200
                tpnt->present--;
2201
        }
2202
 
2203
        /*
2204
         * If there are absolutely no more hosts left, it is safe
2205
         * to completely nuke the DMA pool.  The resize operation will
2206
         * do the right thing and free everything.
2207
         */
2208
        if (!scsi_hosts)
2209
                scsi_resize_dma_pool();
2210
 
2211
        if (pcount0 != next_scsi_host)
2212
                printk(KERN_INFO "scsi : %d host%s left.\n", next_scsi_host,
2213
                       (next_scsi_host == 1) ? "" : "s");
2214
 
2215
#if defined(USE_STATIC_SCSI_MEMORY)
2216
        printk("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
2217
               (scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
2218
               (scsi_init_memory_start - scsi_memory_lower_value) / 1024,
2219
               (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
2220
#endif
2221
 
2222
        /*
2223
         * Remove it from the linked list and /proc if all
2224
         * hosts were successfully removed (ie preset == 0)
2225
         */
2226
        if (!tpnt->present) {
2227
                Scsi_Host_Template **SHTp = &scsi_hosts;
2228
                Scsi_Host_Template *SHT;
2229
 
2230
                while ((SHT = *SHTp) != NULL) {
2231
                        if (SHT == tpnt) {
2232
                                *SHTp = SHT->next;
2233
                                remove_proc_entry(tpnt->proc_name, proc_scsi);
2234
                                break;
2235
                        }
2236
                        SHTp = &SHT->next;
2237
                }
2238
        }
2239
        MOD_DEC_USE_COUNT;
2240
 
2241
        unlock_kernel();
2242
        return 0;
2243
 
2244
err_out:
2245
        unlock_kernel();
2246
        return -1;
2247
}
2248
 
2249
static int scsi_unregister_device(struct Scsi_Device_Template *tpnt);
2250
 
2251
/*
2252
 * This entry point should be called by a loadable module if it is trying
2253
 * add a high level scsi driver to the system.
2254
 */
2255
static int scsi_register_device_module(struct Scsi_Device_Template *tpnt)
2256
{
2257
        Scsi_Device *SDpnt;
2258
        struct Scsi_Host *shpnt;
2259
        int out_of_space = 0;
2260
 
2261
        if (tpnt->next)
2262
                return 1;
2263
 
2264
        scsi_register_device(tpnt);
2265
        /*
2266
         * First scan the devices that we know about, and see if we notice them.
2267
         */
2268
 
2269
        for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2270
                for (SDpnt = shpnt->host_queue; SDpnt;
2271
                     SDpnt = SDpnt->next) {
2272
                        if (tpnt->detect)
2273
                                SDpnt->detected = (*tpnt->detect) (SDpnt);
2274
                }
2275
        }
2276
 
2277
        /*
2278
         * If any of the devices would match this driver, then perform the
2279
         * init function.
2280
         */
2281
        if (tpnt->init && tpnt->dev_noticed) {
2282
                if ((*tpnt->init) ()) {
2283
                        for (shpnt = scsi_hostlist; shpnt;
2284
                             shpnt = shpnt->next) {
2285
                                for (SDpnt = shpnt->host_queue; SDpnt;
2286
                                     SDpnt = SDpnt->next) {
2287
                                        SDpnt->detected = 0;
2288
                                }
2289
                        }
2290
                        scsi_deregister_device(tpnt);
2291
                        return 1;
2292
                }
2293
        }
2294
 
2295
        /*
2296
         * Now actually connect the devices to the new driver.
2297
         */
2298
        for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2299
                for (SDpnt = shpnt->host_queue; SDpnt;
2300
                     SDpnt = SDpnt->next) {
2301
                        SDpnt->attached += SDpnt->detected;
2302
                        SDpnt->detected = 0;
2303
                        if (tpnt->attach)
2304
                                (*tpnt->attach) (SDpnt);
2305
                        /*
2306
                         * If this driver attached to the device, and don't have any
2307
                         * command blocks for this device, allocate some.
2308
                         */
2309
                        if (SDpnt->attached && SDpnt->has_cmdblocks == 0) {
2310
                                SDpnt->online = TRUE;
2311
                                scsi_build_commandblocks(SDpnt);
2312
                                if (0 == SDpnt->has_cmdblocks)
2313
                                        out_of_space = 1;
2314
                        }
2315
                }
2316
        }
2317
 
2318
        /*
2319
         * This does any final handling that is required.
2320
         */
2321
        if (tpnt->finish && tpnt->nr_dev)
2322
                (*tpnt->finish) ();
2323
        if (!out_of_space)
2324
                scsi_resize_dma_pool();
2325
        MOD_INC_USE_COUNT;
2326
 
2327
        if (out_of_space) {
2328
                scsi_unregister_device(tpnt);   /* easiest way to clean up?? */
2329
                return 1;
2330
        } else
2331
                return 0;
2332
}
2333
 
2334
static int scsi_unregister_device(struct Scsi_Device_Template *tpnt)
2335
{
2336
        Scsi_Device *SDpnt;
2337
        struct Scsi_Host *shpnt;
2338
 
2339
        lock_kernel();
2340
        /*
2341
         * If we are busy, this is not going to fly.
2342
         */
2343
        if (GET_USE_COUNT(tpnt->module) != 0)
2344
                goto error_out;
2345
 
2346
        /*
2347
         * Next, detach the devices from the driver.
2348
         */
2349
 
2350
        for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2351
                for (SDpnt = shpnt->host_queue; SDpnt;
2352
                     SDpnt = SDpnt->next) {
2353
                        if (tpnt->detach)
2354
                                (*tpnt->detach) (SDpnt);
2355
                        if (SDpnt->attached == 0) {
2356
                                SDpnt->online = FALSE;
2357
 
2358
                                /*
2359
                                 * Nobody is using this device any more.  Free all of the
2360
                                 * command structures.
2361
                                 */
2362
                                scsi_release_commandblocks(SDpnt);
2363
                        }
2364
                }
2365
        }
2366
        /*
2367
         * Extract the template from the linked list.
2368
         */
2369
        scsi_deregister_device(tpnt);
2370
 
2371
        MOD_DEC_USE_COUNT;
2372
        unlock_kernel();
2373
        /*
2374
         * Final cleanup for the driver is done in the driver sources in the
2375
         * cleanup function.
2376
         */
2377
        return 0;
2378
error_out:
2379
        unlock_kernel();
2380
        return -1;
2381
}
2382
 
2383
 
2384
/* This function should be called by drivers which needs to register
2385
 * with the midlevel scsi system. As of 2.4.0-test9pre3 this is our
2386
 * main device/hosts register function  /mathiasen
2387
 */
2388
int scsi_register_module(int module_type, void *ptr)
2389
{
2390
        switch (module_type) {
2391
        case MODULE_SCSI_HA:
2392
                return scsi_register_host((Scsi_Host_Template *) ptr);
2393
 
2394
                /* Load upper level device handler of some kind */
2395
        case MODULE_SCSI_DEV:
2396
#ifdef CONFIG_KMOD
2397
                if (scsi_hosts == NULL)
2398
                        request_module("scsi_hostadapter");
2399
#endif
2400
                return scsi_register_device_module((struct Scsi_Device_Template *) ptr);
2401
                /* The rest of these are not yet implemented */
2402
 
2403
                /* Load constants.o */
2404
        case MODULE_SCSI_CONST:
2405
 
2406
                /* Load specialized ioctl handler for some device.  Intended for
2407
                 * cdroms that have non-SCSI2 audio command sets. */
2408
        case MODULE_SCSI_IOCTL:
2409
 
2410
        default:
2411
                return 1;
2412
        }
2413
}
2414
 
2415
/* Reverse the actions taken above
2416
 */
2417
int scsi_unregister_module(int module_type, void *ptr)
2418
{
2419
        int retval = 0;
2420
 
2421
        switch (module_type) {
2422
        case MODULE_SCSI_HA:
2423
                retval = scsi_unregister_host((Scsi_Host_Template *) ptr);
2424
                break;
2425
        case MODULE_SCSI_DEV:
2426
                retval = scsi_unregister_device((struct Scsi_Device_Template *)ptr);
2427
                break;
2428
                /* The rest of these are not yet implemented. */
2429
        case MODULE_SCSI_CONST:
2430
        case MODULE_SCSI_IOCTL:
2431
                break;
2432
        default:;
2433
        }
2434
        return retval;
2435
}
2436
 
2437
#ifdef CONFIG_PROC_FS
2438
/*
2439
 * Function:    scsi_dump_status
2440
 *
2441
 * Purpose:     Brain dump of scsi system, used for problem solving.
2442
 *
2443
 * Arguments:   level - used to indicate level of detail.
2444
 *
2445
 * Notes:       The level isn't used at all yet, but we need to find some way
2446
 *              of sensibly logging varying degrees of information.  A quick one-line
2447
 *              display of each command, plus the status would be most useful.
2448
 *
2449
 *              This does depend upon CONFIG_SCSI_LOGGING - I do want some way of turning
2450
 *              it all off if the user wants a lean and mean kernel.  It would probably
2451
 *              also be useful to allow the user to specify one single host to be dumped.
2452
 *              A second argument to the function would be useful for that purpose.
2453
 *
2454
 *              FIXME - some formatting of the output into tables would be very handy.
2455
 */
2456
static void scsi_dump_status(int level)
2457
{
2458
#ifdef CONFIG_SCSI_LOGGING              /* { */
2459
        int i;
2460
        struct Scsi_Host *shpnt;
2461
        Scsi_Cmnd *SCpnt;
2462
        Scsi_Device *SDpnt;
2463
        printk(KERN_INFO "Dump of scsi host parameters:\n");
2464
        i = 0;
2465
        for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2466
                printk(KERN_INFO " %d %d %d : %d %d\n",
2467
                       shpnt->host_failed,
2468
                       shpnt->host_busy,
2469
                       atomic_read(&shpnt->host_active),
2470
                       shpnt->host_blocked,
2471
                       shpnt->host_self_blocked);
2472
        }
2473
 
2474
        printk(KERN_INFO "\n\n");
2475
        printk(KERN_INFO "Dump of scsi command parameters:\n");
2476
        for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2477
                printk(KERN_INFO "h:c:t:l (dev sect nsect cnumsec sg) (ret all flg) (to/cmd to ito) cmd snse result\n");
2478
                for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
2479
                        for (SCpnt = SDpnt->device_queue; SCpnt; SCpnt = SCpnt->next) {
2480
                                /*  (0) h:c:t:l (dev sect nsect cnumsec sg) (ret all flg) (to/cmd to ito) cmd snse result %d %x      */
2481
                                printk(KERN_INFO "(%3d) %2d:%1d:%2d:%2d (%6s %4ld %4ld %4ld %4x %1d) (%1d %1d 0x%2x) (%4d %4d %4d) 0x%2.2x 0x%2.2x 0x%8.8x\n",
2482
                                       i++,
2483
 
2484
                                       SCpnt->host->host_no,
2485
                                       SCpnt->channel,
2486
                                       SCpnt->target,
2487
                                       SCpnt->lun,
2488
 
2489
                                       kdevname(SCpnt->request.rq_dev),
2490
                                       SCpnt->request.sector,
2491
                                       SCpnt->request.nr_sectors,
2492
                                       SCpnt->request.current_nr_sectors,
2493
                                       SCpnt->request.rq_status,
2494
                                       SCpnt->use_sg,
2495
 
2496
                                       SCpnt->retries,
2497
                                       SCpnt->allowed,
2498
                                       SCpnt->flags,
2499
 
2500
                                       SCpnt->timeout_per_command,
2501
                                       SCpnt->timeout,
2502
                                       SCpnt->internal_timeout,
2503
 
2504
                                       SCpnt->cmnd[0],
2505
                                       SCpnt->sense_buffer[2],
2506
                                       SCpnt->result);
2507
                        }
2508
                }
2509
        }
2510
 
2511
        for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2512
                for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
2513
                        /* Now dump the request lists for each block device */
2514
                        printk(KERN_INFO "Dump of pending block device requests\n");
2515
                        for (i = 0; i < MAX_BLKDEV; i++) {
2516
                                struct list_head * queue_head;
2517
 
2518
                                queue_head = &blk_dev[i].request_queue.queue_head;
2519
                                if (!list_empty(queue_head)) {
2520
                                        struct request *req;
2521
                                        struct list_head * entry;
2522
 
2523
                                        printk(KERN_INFO "%d: ", i);
2524
                                        entry = queue_head->next;
2525
                                        do {
2526
                                                req = blkdev_entry_to_request(entry);
2527
                                                printk("(%s %d %ld %ld %ld) ",
2528
                                                   kdevname(req->rq_dev),
2529
                                                       req->cmd,
2530
                                                       req->sector,
2531
                                                       req->nr_sectors,
2532
                                                req->current_nr_sectors);
2533
                                        } while ((entry = entry->next) != queue_head);
2534
                                        printk("\n");
2535
                                }
2536
                        }
2537
                }
2538
        }
2539
#endif  /* CONFIG_SCSI_LOGGING */ /* } */
2540
}
2541
#endif                          /* CONFIG_PROC_FS */
2542
 
2543
static int __init scsi_host_no_init (char *str)
2544
{
2545
    static int next_no = 0;
2546
    char *temp;
2547
 
2548
    while (str) {
2549
        temp = str;
2550
        while (*temp && (*temp != ':') && (*temp != ','))
2551
            temp++;
2552
        if (!*temp)
2553
            temp = NULL;
2554
        else
2555
            *temp++ = 0;
2556
        scsi_host_no_insert(str, next_no);
2557
        str = temp;
2558
        next_no++;
2559
    }
2560
    return 1;
2561
}
2562
 
2563
static char *scsihosts;
2564
 
2565
MODULE_PARM(scsihosts, "s");
2566
MODULE_DESCRIPTION("SCSI core");
2567
MODULE_LICENSE("GPL");
2568
 
2569
#ifndef MODULE
2570
int __init scsi_setup(char *str)
2571
{
2572
        scsihosts = str;
2573
        return 1;
2574
}
2575
 
2576
__setup("scsihosts=", scsi_setup);
2577
#endif
2578
 
2579
static int __init init_scsi(void)
2580
{
2581
        struct proc_dir_entry *generic;
2582
 
2583
        printk(KERN_INFO "SCSI subsystem driver " REVISION "\n");
2584
 
2585
        if( scsi_init_minimal_dma_pool() != 0 )
2586
        {
2587
                return 1;
2588
        }
2589
 
2590
        /*
2591
         * This makes /proc/scsi and /proc/scsi/scsi visible.
2592
         */
2593
#ifdef CONFIG_PROC_FS
2594
        proc_scsi = proc_mkdir("scsi", 0);
2595
        if (!proc_scsi) {
2596
                printk (KERN_ERR "cannot init /proc/scsi\n");
2597
                return -ENOMEM;
2598
        }
2599
        generic = create_proc_info_entry ("scsi/scsi", 0, 0, scsi_proc_info);
2600
        if (!generic) {
2601
                printk (KERN_ERR "cannot init /proc/scsi/scsi\n");
2602
                remove_proc_entry("scsi", 0);
2603
                return -ENOMEM;
2604
        }
2605
        generic->write_proc = proc_scsi_gen_write;
2606
#endif
2607
 
2608
        scsi_devfs_handle = devfs_mk_dir (NULL, "scsi", NULL);
2609
        if (scsihosts)
2610
                printk(KERN_INFO "scsi: host order: %s\n", scsihosts);
2611
        scsi_host_no_init (scsihosts);
2612
        /*
2613
         * This is where the processing takes place for most everything
2614
         * when commands are completed.
2615
         */
2616
        init_bh(SCSI_BH, scsi_bottom_half_handler);
2617
 
2618
        return 0;
2619
}
2620
 
2621
static void __exit exit_scsi(void)
2622
{
2623
        Scsi_Host_Name *shn, *shn2 = NULL;
2624
 
2625
        remove_bh(SCSI_BH);
2626
 
2627
        devfs_unregister (scsi_devfs_handle);
2628
        for (shn = scsi_host_no_list;shn;shn = shn->next) {
2629
                if (shn->name)
2630
                        kfree(shn->name);
2631
                if (shn2)
2632
                        kfree (shn2);
2633
                shn2 = shn;
2634
        }
2635
        if (shn2)
2636
                kfree (shn2);
2637
 
2638
#ifdef CONFIG_PROC_FS
2639
        /* No, we're not here anymore. Don't show the /proc/scsi files. */
2640
        remove_proc_entry ("scsi/scsi", 0);
2641
        remove_proc_entry ("scsi", 0);
2642
#endif
2643
 
2644
        /*
2645
         * Free up the DMA pool.
2646
         */
2647
        scsi_resize_dma_pool();
2648
 
2649
}
2650
 
2651
module_init(init_scsi);
2652
module_exit(exit_scsi);
2653
 
2654
/*
2655
 * Function:    scsi_get_host_dev()
2656
 *
2657
 * Purpose:     Create a Scsi_Device that points to the host adapter itself.
2658
 *
2659
 * Arguments:   SHpnt   - Host that needs a Scsi_Device
2660
 *
2661
 * Lock status: None assumed.
2662
 *
2663
 * Returns:     The Scsi_Device or NULL
2664
 *
2665
 * Notes:
2666
 */
2667
Scsi_Device * scsi_get_host_dev(struct Scsi_Host * SHpnt)
2668
{
2669
        Scsi_Device * SDpnt;
2670
 
2671
        /*
2672
         * Attach a single Scsi_Device to the Scsi_Host - this should
2673
         * be made to look like a "pseudo-device" that points to the
2674
         * HA itself.  For the moment, we include it at the head of
2675
         * the host_queue itself - I don't think we want to show this
2676
         * to the HA in select_queue_depths(), as this would probably confuse
2677
         * matters.
2678
         * Note - this device is not accessible from any high-level
2679
         * drivers (including generics), which is probably not
2680
         * optimal.  We can add hooks later to attach
2681
         */
2682
        SDpnt = (Scsi_Device *) kmalloc(sizeof(Scsi_Device),
2683
                                        GFP_ATOMIC);
2684
        if(SDpnt == NULL)
2685
                return NULL;
2686
 
2687
        memset(SDpnt, 0, sizeof(Scsi_Device));
2688
 
2689
        SDpnt->host = SHpnt;
2690
        SDpnt->id = SHpnt->this_id;
2691
        SDpnt->type = -1;
2692
        SDpnt->queue_depth = 1;
2693
 
2694
        scsi_build_commandblocks(SDpnt);
2695
 
2696
        scsi_initialize_queue(SDpnt, SHpnt);
2697
 
2698
        SDpnt->online = TRUE;
2699
 
2700
        /*
2701
         * Initialize the object that we will use to wait for command blocks.
2702
         */
2703
        init_waitqueue_head(&SDpnt->scpnt_wait);
2704
        return SDpnt;
2705
}
2706
 
2707
/*
2708
 * Function:    scsi_free_host_dev()
2709
 *
2710
 * Purpose:     Create a Scsi_Device that points to the host adapter itself.
2711
 *
2712
 * Arguments:   SHpnt   - Host that needs a Scsi_Device
2713
 *
2714
 * Lock status: None assumed.
2715
 *
2716
 * Returns:     Nothing
2717
 *
2718
 * Notes:
2719
 */
2720
void scsi_free_host_dev(Scsi_Device * SDpnt)
2721
{
2722
        if( (unsigned char) SDpnt->id != (unsigned char) SDpnt->host->this_id )
2723
        {
2724
                panic("Attempt to delete wrong device\n");
2725
        }
2726
 
2727
        blk_cleanup_queue(&SDpnt->request_queue);
2728
 
2729
        /*
2730
         * We only have a single SCpnt attached to this device.  Free
2731
         * it now.
2732
         */
2733
        scsi_release_commandblocks(SDpnt);
2734
        kfree(SDpnt);
2735
}
2736
 
2737
/*
2738
 * Function:    scsi_reset_provider_done_command
2739
 *
2740
 * Purpose:     Dummy done routine.
2741
 *
2742
 * Notes:       Some low level drivers will call scsi_done and end up here,
2743
 *              others won't bother.
2744
 *              We don't want the bogus command used for the bus/device
2745
 *              reset to find its way into the mid-layer so we intercept
2746
 *              it here.
2747
 */
2748
static void
2749
scsi_reset_provider_done_command(Scsi_Cmnd *SCpnt)
2750
{
2751
}
2752
 
2753
/*
2754
 * Function:    scsi_reset_provider
2755
 *
2756
 * Purpose:     Send requested reset to a bus or device at any phase.
2757
 *
2758
 * Arguments:   device  - device to send reset to
2759
 *              flag - reset type (see scsi.h)
2760
 *
2761
 * Returns:     SUCCESS/FAILURE.
2762
 *
2763
 * Notes:       This is used by the SCSI Generic driver to provide
2764
 *              Bus/Device reset capability.
2765
 */
2766
int
2767
scsi_reset_provider(Scsi_Device *dev, int flag)
2768
{
2769
        Scsi_Cmnd SC, *SCpnt = &SC;
2770
        int rtn;
2771
 
2772
        memset(&SCpnt->eh_timeout, 0, sizeof(SCpnt->eh_timeout));
2773
        SCpnt->host                     = dev->host;
2774
        SCpnt->device                   = dev;
2775
        SCpnt->target                   = dev->id;
2776
        SCpnt->lun                      = dev->lun;
2777
        SCpnt->channel                  = dev->channel;
2778
        SCpnt->request.rq_status        = RQ_SCSI_BUSY;
2779
        SCpnt->request.waiting          = NULL;
2780
        SCpnt->use_sg                   = 0;
2781
        SCpnt->old_use_sg               = 0;
2782
        SCpnt->old_cmd_len              = 0;
2783
        SCpnt->underflow                = 0;
2784
        SCpnt->transfersize             = 0;
2785
        SCpnt->resid                    = 0;
2786
        SCpnt->serial_number            = 0;
2787
        SCpnt->serial_number_at_timeout = 0;
2788
        SCpnt->host_scribble            = NULL;
2789
        SCpnt->next                     = NULL;
2790
        SCpnt->state                    = SCSI_STATE_INITIALIZING;
2791
        SCpnt->owner                    = SCSI_OWNER_MIDLEVEL;
2792
 
2793
        memset(&SCpnt->cmnd, '\0', sizeof(SCpnt->cmnd));
2794
 
2795
        SCpnt->scsi_done                = scsi_reset_provider_done_command;
2796
        SCpnt->done                     = NULL;
2797
        SCpnt->reset_chain              = NULL;
2798
 
2799
        SCpnt->buffer                   = NULL;
2800
        SCpnt->bufflen                  = 0;
2801
        SCpnt->request_buffer           = NULL;
2802
        SCpnt->request_bufflen          = 0;
2803
 
2804
        SCpnt->internal_timeout         = NORMAL_TIMEOUT;
2805
        SCpnt->abort_reason             = DID_ABORT;
2806
 
2807
        SCpnt->cmd_len                  = 0;
2808
 
2809
        SCpnt->sc_data_direction        = SCSI_DATA_UNKNOWN;
2810
        SCpnt->sc_request               = NULL;
2811
        SCpnt->sc_magic                 = SCSI_CMND_MAGIC;
2812
 
2813
        /*
2814
         * Sometimes the command can get back into the timer chain,
2815
         * so use the pid as an identifier.
2816
         */
2817
        SCpnt->pid                      = 0;
2818
 
2819
        if (dev->host->hostt->use_new_eh_code) {
2820
                rtn = scsi_new_reset(SCpnt, flag);
2821
        } else {
2822
                unsigned long flags;
2823
 
2824
                spin_lock_irqsave(&io_request_lock, flags);
2825
                rtn = scsi_old_reset(SCpnt, flag);
2826
                spin_unlock_irqrestore(&io_request_lock, flags);
2827
        }
2828
 
2829
        scsi_delete_timer(SCpnt);
2830
        return rtn;
2831
}
2832
 
2833
/*
2834
 * Overrides for Emacs so that we follow Linus's tabbing style.
2835
 * Emacs will notice this stuff at the end of the file and automatically
2836
 * adjust the settings for this buffer only.  This must remain at the end
2837
 * of the file.
2838
 * ---------------------------------------------------------------------------
2839
 * Local variables:
2840
 * c-indent-level: 4
2841
 * c-brace-imaginary-offset: 0
2842
 * c-brace-offset: -4
2843
 * c-argdecl-indent: 4
2844
 * c-label-offset: -4
2845
 * c-continued-statement-offset: 4
2846
 * c-continued-brace-offset: 0
2847
 * indent-tabs-mode: nil
2848
 * tab-width: 8
2849
 * End:
2850
 */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.