OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [include/] [scsi/] [scsi_host.h] - Blame information for rev 65

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
#ifndef _SCSI_SCSI_HOST_H
2
#define _SCSI_SCSI_HOST_H
3
 
4
#include <linux/device.h>
5
#include <linux/list.h>
6
#include <linux/types.h>
7
#include <linux/workqueue.h>
8
#include <linux/mutex.h>
9
 
10
struct request_queue;
11
struct block_device;
12
struct completion;
13
struct module;
14
struct scsi_cmnd;
15
struct scsi_device;
16
struct scsi_target;
17
struct Scsi_Host;
18
struct scsi_host_cmd_pool;
19
struct scsi_transport_template;
20
struct blk_queue_tags;
21
 
22
 
23
/*
24
 * The various choices mean:
25
 * NONE: Self evident.  Host adapter is not capable of scatter-gather.
26
 * ALL:  Means that the host adapter module can do scatter-gather,
27
 *       and that there is no limit to the size of the table to which
28
 *       we scatter/gather data.
29
 * Anything else:  Indicates the maximum number of chains that can be
30
 *       used in one scatter-gather request.
31
 */
32
#define SG_NONE 0
33
#define SG_ALL 0xff
34
 
35
#define MODE_UNKNOWN 0x00
36
#define MODE_INITIATOR 0x01
37
#define MODE_TARGET 0x02
38
 
39
#define DISABLE_CLUSTERING 0
40
#define ENABLE_CLUSTERING 1
41
 
42
#define DISABLE_SG_CHAINING 0
43
#define ENABLE_SG_CHAINING 1
44
 
45
enum scsi_eh_timer_return {
46
        EH_NOT_HANDLED,
47
        EH_HANDLED,
48
        EH_RESET_TIMER,
49
};
50
 
51
 
52
struct scsi_host_template {
53
        struct module *module;
54
        const char *name;
55
 
56
        /*
57
         * Used to initialize old-style drivers.  For new-style drivers
58
         * just perform all work in your module initialization function.
59
         *
60
         * Status:  OBSOLETE
61
         */
62
        int (* detect)(struct scsi_host_template *);
63
 
64
        /*
65
         * Used as unload callback for hosts with old-style drivers.
66
         *
67
         * Status: OBSOLETE
68
         */
69
        int (* release)(struct Scsi_Host *);
70
 
71
        /*
72
         * The info function will return whatever useful information the
73
         * developer sees fit.  If not provided, then the name field will
74
         * be used instead.
75
         *
76
         * Status: OPTIONAL
77
         */
78
        const char *(* info)(struct Scsi_Host *);
79
 
80
        /*
81
         * Ioctl interface
82
         *
83
         * Status: OPTIONAL
84
         */
85
        int (* ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
86
 
87
 
88
#ifdef CONFIG_COMPAT
89
        /*
90
         * Compat handler. Handle 32bit ABI.
91
         * When unknown ioctl is passed return -ENOIOCTLCMD.
92
         *
93
         * Status: OPTIONAL
94
         */
95
        int (* compat_ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
96
#endif
97
 
98
        /*
99
         * The queuecommand function is used to queue up a scsi
100
         * command block to the LLDD.  When the driver finished
101
         * processing the command the done callback is invoked.
102
         *
103
         * If queuecommand returns 0, then the HBA has accepted the
104
         * command.  The done() function must be called on the command
105
         * when the driver has finished with it. (you may call done on the
106
         * command before queuecommand returns, but in this case you
107
         * *must* return 0 from queuecommand).
108
         *
109
         * Queuecommand may also reject the command, in which case it may
110
         * not touch the command and must not call done() for it.
111
         *
112
         * There are two possible rejection returns:
113
         *
114
         *   SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but
115
         *   allow commands to other devices serviced by this host.
116
         *
117
         *   SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this
118
         *   host temporarily.
119
         *
120
         * For compatibility, any other non-zero return is treated the
121
         * same as SCSI_MLQUEUE_HOST_BUSY.
122
         *
123
         * NOTE: "temporarily" means either until the next command for#
124
         * this device/host completes, or a period of time determined by
125
         * I/O pressure in the system if there are no other outstanding
126
         * commands.
127
         *
128
         * STATUS: REQUIRED
129
         */
130
        int (* queuecommand)(struct scsi_cmnd *,
131
                             void (*done)(struct scsi_cmnd *));
132
 
133
        /*
134
         * The transfer functions are used to queue a scsi command to
135
         * the LLD. When the driver is finished processing the command
136
         * the done callback is invoked.
137
         *
138
         * This is called to inform the LLD to transfer
139
         * cmd->request_bufflen bytes. The cmd->use_sg speciefies the
140
         * number of scatterlist entried in the command and
141
         * cmd->request_buffer contains the scatterlist.
142
         *
143
         * return values: see queuecommand
144
         *
145
         * If the LLD accepts the cmd, it should set the result to an
146
         * appropriate value when completed before calling the done function.
147
         *
148
         * STATUS: REQUIRED FOR TARGET DRIVERS
149
         */
150
        /* TODO: rename */
151
        int (* transfer_response)(struct scsi_cmnd *,
152
                                  void (*done)(struct scsi_cmnd *));
153
 
154
        /*
155
         * This is an error handling strategy routine.  You don't need to
156
         * define one of these if you don't want to - there is a default
157
         * routine that is present that should work in most cases.  For those
158
         * driver authors that have the inclination and ability to write their
159
         * own strategy routine, this is where it is specified.  Note - the
160
         * strategy routine is *ALWAYS* run in the context of the kernel eh
161
         * thread.  Thus you are guaranteed to *NOT* be in an interrupt
162
         * handler when you execute this, and you are also guaranteed to
163
         * *NOT* have any other commands being queued while you are in the
164
         * strategy routine. When you return from this function, operations
165
         * return to normal.
166
         *
167
         * See scsi_error.c scsi_unjam_host for additional comments about
168
         * what this function should and should not be attempting to do.
169
         *
170
         * Status: REQUIRED     (at least one of them)
171
         */
172
        int (* eh_abort_handler)(struct scsi_cmnd *);
173
        int (* eh_device_reset_handler)(struct scsi_cmnd *);
174
        int (* eh_bus_reset_handler)(struct scsi_cmnd *);
175
        int (* eh_host_reset_handler)(struct scsi_cmnd *);
176
 
177
        /*
178
         * Before the mid layer attempts to scan for a new device where none
179
         * currently exists, it will call this entry in your driver.  Should
180
         * your driver need to allocate any structs or perform any other init
181
         * items in order to send commands to a currently unused target/lun
182
         * combo, then this is where you can perform those allocations.  This
183
         * is specifically so that drivers won't have to perform any kind of
184
         * "is this a new device" checks in their queuecommand routine,
185
         * thereby making the hot path a bit quicker.
186
         *
187
         * Return values: 0 on success, non-0 on failure
188
         *
189
         * Deallocation:  If we didn't find any devices at this ID, you will
190
         * get an immediate call to slave_destroy().  If we find something
191
         * here then you will get a call to slave_configure(), then the
192
         * device will be used for however long it is kept around, then when
193
         * the device is removed from the system (or * possibly at reboot
194
         * time), you will then get a call to slave_destroy().  This is
195
         * assuming you implement slave_configure and slave_destroy.
196
         * However, if you allocate memory and hang it off the device struct,
197
         * then you must implement the slave_destroy() routine at a minimum
198
         * in order to avoid leaking memory
199
         * each time a device is tore down.
200
         *
201
         * Status: OPTIONAL
202
         */
203
        int (* slave_alloc)(struct scsi_device *);
204
 
205
        /*
206
         * Once the device has responded to an INQUIRY and we know the
207
         * device is online, we call into the low level driver with the
208
         * struct scsi_device *.  If the low level device driver implements
209
         * this function, it *must* perform the task of setting the queue
210
         * depth on the device.  All other tasks are optional and depend
211
         * on what the driver supports and various implementation details.
212
         *
213
         * Things currently recommended to be handled at this time include:
214
         *
215
         * 1.  Setting the device queue depth.  Proper setting of this is
216
         *     described in the comments for scsi_adjust_queue_depth.
217
         * 2.  Determining if the device supports the various synchronous
218
         *     negotiation protocols.  The device struct will already have
219
         *     responded to INQUIRY and the results of the standard items
220
         *     will have been shoved into the various device flag bits, eg.
221
         *     device->sdtr will be true if the device supports SDTR messages.
222
         * 3.  Allocating command structs that the device will need.
223
         * 4.  Setting the default timeout on this device (if needed).
224
         * 5.  Anything else the low level driver might want to do on a device
225
         *     specific setup basis...
226
         * 6.  Return 0 on success, non-0 on error.  The device will be marked
227
         *     as offline on error so that no access will occur.  If you return
228
         *     non-0, your slave_destroy routine will never get called for this
229
         *     device, so don't leave any loose memory hanging around, clean
230
         *     up after yourself before returning non-0
231
         *
232
         * Status: OPTIONAL
233
         */
234
        int (* slave_configure)(struct scsi_device *);
235
 
236
        /*
237
         * Immediately prior to deallocating the device and after all activity
238
         * has ceased the mid layer calls this point so that the low level
239
         * driver may completely detach itself from the scsi device and vice
240
         * versa.  The low level driver is responsible for freeing any memory
241
         * it allocated in the slave_alloc or slave_configure calls.
242
         *
243
         * Status: OPTIONAL
244
         */
245
        void (* slave_destroy)(struct scsi_device *);
246
 
247
        /*
248
         * Before the mid layer attempts to scan for a new device attached
249
         * to a target where no target currently exists, it will call this
250
         * entry in your driver.  Should your driver need to allocate any
251
         * structs or perform any other init items in order to send commands
252
         * to a currently unused target, then this is where you can perform
253
         * those allocations.
254
         *
255
         * Return values: 0 on success, non-0 on failure
256
         *
257
         * Status: OPTIONAL
258
         */
259
        int (* target_alloc)(struct scsi_target *);
260
 
261
        /*
262
         * Immediately prior to deallocating the target structure, and
263
         * after all activity to attached scsi devices has ceased, the
264
         * midlayer calls this point so that the driver may deallocate
265
         * and terminate any references to the target.
266
         *
267
         * Status: OPTIONAL
268
         */
269
        void (* target_destroy)(struct scsi_target *);
270
 
271
        /*
272
         * If a host has the ability to discover targets on its own instead
273
         * of scanning the entire bus, it can fill in this function and
274
         * call scsi_scan_host().  This function will be called periodically
275
         * until it returns 1 with the scsi_host and the elapsed time of
276
         * the scan in jiffies.
277
         *
278
         * Status: OPTIONAL
279
         */
280
        int (* scan_finished)(struct Scsi_Host *, unsigned long);
281
 
282
        /*
283
         * If the host wants to be called before the scan starts, but
284
         * after the midlayer has set up ready for the scan, it can fill
285
         * in this function.
286
         */
287
        void (* scan_start)(struct Scsi_Host *);
288
 
289
        /*
290
         * fill in this function to allow the queue depth of this host
291
         * to be changeable (on a per device basis).  returns either
292
         * the current queue depth setting (may be different from what
293
         * was passed in) or an error.  An error should only be
294
         * returned if the requested depth is legal but the driver was
295
         * unable to set it.  If the requested depth is illegal, the
296
         * driver should set and return the closest legal queue depth.
297
         *
298
         */
299
        int (* change_queue_depth)(struct scsi_device *, int);
300
 
301
        /*
302
         * fill in this function to allow the changing of tag types
303
         * (this also allows the enabling/disabling of tag command
304
         * queueing).  An error should only be returned if something
305
         * went wrong in the driver while trying to set the tag type.
306
         * If the driver doesn't support the requested tag type, then
307
         * it should set the closest type it does support without
308
         * returning an error.  Returns the actual tag type set.
309
         */
310
        int (* change_queue_type)(struct scsi_device *, int);
311
 
312
        /*
313
         * This function determines the bios parameters for a given
314
         * harddisk.  These tend to be numbers that are made up by
315
         * the host adapter.  Parameters:
316
         * size, device, list (heads, sectors, cylinders)
317
         *
318
         * Status: OPTIONAL */
319
        int (* bios_param)(struct scsi_device *, struct block_device *,
320
                        sector_t, int []);
321
 
322
        /*
323
         * Can be used to export driver statistics and other infos to the
324
         * world outside the kernel ie. userspace and it also provides an
325
         * interface to feed the driver with information.
326
         *
327
         * Status: OBSOLETE
328
         */
329
        int (*proc_info)(struct Scsi_Host *, char *, char **, off_t, int, int);
330
 
331
        /*
332
         * This is an optional routine that allows the transport to become
333
         * involved when a scsi io timer fires. The return value tells the
334
         * timer routine how to finish the io timeout handling:
335
         * EH_HANDLED:          I fixed the error, please complete the command
336
         * EH_RESET_TIMER:      I need more time, reset the timer and
337
         *                      begin counting again
338
         * EH_NOT_HANDLED       Begin normal error recovery
339
         *
340
         * Status: OPTIONAL
341
         */
342
        enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *);
343
 
344
        /*
345
         * Name of proc directory
346
         */
347
        const char *proc_name;
348
 
349
        /*
350
         * Used to store the procfs directory if a driver implements the
351
         * proc_info method.
352
         */
353
        struct proc_dir_entry *proc_dir;
354
 
355
        /*
356
         * This determines if we will use a non-interrupt driven
357
         * or an interrupt driven scheme,  It is set to the maximum number
358
         * of simultaneous commands a given host adapter will accept.
359
         */
360
        int can_queue;
361
 
362
        /*
363
         * In many instances, especially where disconnect / reconnect are
364
         * supported, our host also has an ID on the SCSI bus.  If this is
365
         * the case, then it must be reserved.  Please set this_id to -1 if
366
         * your setup is in single initiator mode, and the host lacks an
367
         * ID.
368
         */
369
        int this_id;
370
 
371
        /*
372
         * This determines the degree to which the host adapter is capable
373
         * of scatter-gather.
374
         */
375
        unsigned short sg_tablesize;
376
 
377
        /*
378
         * If the host adapter has limitations beside segment count
379
         */
380
        unsigned short max_sectors;
381
 
382
        /*
383
         * dma scatter gather segment boundary limit. a segment crossing this
384
         * boundary will be split in two.
385
         */
386
        unsigned long dma_boundary;
387
 
388
        /*
389
         * This specifies "machine infinity" for host templates which don't
390
         * limit the transfer size.  Note this limit represents an absolute
391
         * maximum, and may be over the transfer limits allowed for
392
         * individual devices (e.g. 256 for SCSI-1)
393
         */
394
#define SCSI_DEFAULT_MAX_SECTORS        1024
395
 
396
        /*
397
         * True if this host adapter can make good use of linked commands.
398
         * This will allow more than one command to be queued to a given
399
         * unit on a given host.  Set this to the maximum number of command
400
         * blocks to be provided for each device.  Set this to 1 for one
401
         * command block per lun, 2 for two, etc.  Do not set this to 0.
402
         * You should make sure that the host adapter will do the right thing
403
         * before you try setting this above 1.
404
         */
405
        short cmd_per_lun;
406
 
407
        /*
408
         * present contains counter indicating how many boards of this
409
         * type were found when we did the scan.
410
         */
411
        unsigned char present;
412
 
413
        /*
414
         * This specifies the mode that a LLD supports.
415
         */
416
        unsigned supported_mode:2;
417
 
418
        /*
419
         * true if this host adapter uses unchecked DMA onto an ISA bus.
420
         */
421
        unsigned unchecked_isa_dma:1;
422
 
423
        /*
424
         * true if this host adapter can make good use of clustering.
425
         * I originally thought that if the tablesize was large that it
426
         * was a waste of CPU cycles to prepare a cluster list, but
427
         * it works out that the Buslogic is faster if you use a smaller
428
         * number of segments (i.e. use clustering).  I guess it is
429
         * inefficient.
430
         */
431
        unsigned use_clustering:1;
432
 
433
        /*
434
         * True for emulated SCSI host adapters (e.g. ATAPI)
435
         */
436
        unsigned emulated:1;
437
 
438
        /*
439
         * True if the low-level driver performs its own reset-settle delays.
440
         */
441
        unsigned skip_settle_delay:1;
442
 
443
        /*
444
         * ordered write support
445
         */
446
        unsigned ordered_tag:1;
447
 
448
        /*
449
         * true if the low-level driver can support sg chaining. this
450
         * will be removed eventually when all the drivers are
451
         * converted to support sg chaining.
452
         *
453
         * Status: OBSOLETE
454
         */
455
        unsigned use_sg_chaining:1;
456
 
457
        /*
458
         * Countdown for host blocking with no commands outstanding
459
         */
460
        unsigned int max_host_blocked;
461
 
462
        /*
463
         * Default value for the blocking.  If the queue is empty,
464
         * host_blocked counts down in the request_fn until it restarts
465
         * host operations as zero is reached.
466
         *
467
         * FIXME: This should probably be a value in the template
468
         */
469
#define SCSI_DEFAULT_HOST_BLOCKED       7
470
 
471
        /*
472
         * Pointer to the sysfs class properties for this host, NULL terminated.
473
         */
474
        struct class_device_attribute **shost_attrs;
475
 
476
        /*
477
         * Pointer to the SCSI device properties for this host, NULL terminated.
478
         */
479
        struct device_attribute **sdev_attrs;
480
 
481
        /*
482
         * List of hosts per template.
483
         *
484
         * This is only for use by scsi_module.c for legacy templates.
485
         * For these access to it is synchronized implicitly by
486
         * module_init/module_exit.
487
         */
488
        struct list_head legacy_hosts;
489
};
490
 
491
/*
492
 * shost state: If you alter this, you also need to alter scsi_sysfs.c
493
 * (for the ascii descriptions) and the state model enforcer:
494
 * scsi_host_set_state()
495
 */
496
enum scsi_host_state {
497
        SHOST_CREATED = 1,
498
        SHOST_RUNNING,
499
        SHOST_CANCEL,
500
        SHOST_DEL,
501
        SHOST_RECOVERY,
502
        SHOST_CANCEL_RECOVERY,
503
        SHOST_DEL_RECOVERY,
504
};
505
 
506
struct Scsi_Host {
507
        /*
508
         * __devices is protected by the host_lock, but you should
509
         * usually use scsi_device_lookup / shost_for_each_device
510
         * to access it and don't care about locking yourself.
511
         * In the rare case of beeing in irq context you can use
512
         * their __ prefixed variants with the lock held. NEVER
513
         * access this list directly from a driver.
514
         */
515
        struct list_head        __devices;
516
        struct list_head        __targets;
517
 
518
        struct scsi_host_cmd_pool *cmd_pool;
519
        spinlock_t              free_list_lock;
520
        struct list_head        free_list; /* backup store of cmd structs */
521
        struct list_head        starved_list;
522
 
523
        spinlock_t              default_lock;
524
        spinlock_t              *host_lock;
525
 
526
        struct mutex            scan_mutex;/* serialize scanning activity */
527
 
528
        struct list_head        eh_cmd_q;
529
        struct task_struct    * ehandler;  /* Error recovery thread. */
530
        struct completion     * eh_action; /* Wait for specific actions on the
531
                                              host. */
532
        wait_queue_head_t       host_wait;
533
        struct scsi_host_template *hostt;
534
        struct scsi_transport_template *transportt;
535
 
536
        /*
537
         * area to keep a shared tag map (if needed, will be
538
         * NULL if not)
539
         */
540
        struct blk_queue_tag    *bqt;
541
 
542
        /*
543
         * The following two fields are protected with host_lock;
544
         * however, eh routines can safely access during eh processing
545
         * without acquiring the lock.
546
         */
547
        unsigned int host_busy;            /* commands actually active on low-level */
548
        unsigned int host_failed;          /* commands that failed. */
549
        unsigned int host_eh_scheduled;    /* EH scheduled without command */
550
 
551
        unsigned short host_no;  /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
552
        int resetting; /* if set, it means that last_reset is a valid value */
553
        unsigned long last_reset;
554
 
555
        /*
556
         * These three parameters can be used to allow for wide scsi,
557
         * and for host adapters that support multiple busses
558
         * The first two should be set to 1 more than the actual max id
559
         * or lun (i.e. 8 for normal systems).
560
         */
561
        unsigned int max_id;
562
        unsigned int max_lun;
563
        unsigned int max_channel;
564
 
565
        /*
566
         * This is a unique identifier that must be assigned so that we
567
         * have some way of identifying each detected host adapter properly
568
         * and uniquely.  For hosts that do not support more than one card
569
         * in the system at one time, this does not need to be set.  It is
570
         * initialized to 0 in scsi_register.
571
         */
572
        unsigned int unique_id;
573
 
574
        /*
575
         * The maximum length of SCSI commands that this host can accept.
576
         * Probably 12 for most host adapters, but could be 16 for others.
577
         * For drivers that don't set this field, a value of 12 is
578
         * assumed.  I am leaving this as a number rather than a bit
579
         * because you never know what subsequent SCSI standards might do
580
         * (i.e. could there be a 20 byte or a 24-byte command a few years
581
         * down the road?).
582
         */
583
        unsigned char max_cmd_len;
584
 
585
        int this_id;
586
        int can_queue;
587
        short cmd_per_lun;
588
        short unsigned int sg_tablesize;
589
        short unsigned int max_sectors;
590
        unsigned long dma_boundary;
591
        /*
592
         * Used to assign serial numbers to the cmds.
593
         * Protected by the host lock.
594
         */
595
        unsigned long cmd_serial_number;
596
 
597
        unsigned active_mode:2;
598
        unsigned unchecked_isa_dma:1;
599
        unsigned use_clustering:1;
600
        unsigned use_blk_tcq:1;
601
        unsigned use_sg_chaining:1;
602
 
603
        /*
604
         * Host has requested that no further requests come through for the
605
         * time being.
606
         */
607
        unsigned host_self_blocked:1;
608
 
609
        /*
610
         * Host uses correct SCSI ordering not PC ordering. The bit is
611
         * set for the minority of drivers whose authors actually read
612
         * the spec ;)
613
         */
614
        unsigned reverse_ordering:1;
615
 
616
        /*
617
         * ordered write support
618
         */
619
        unsigned ordered_tag:1;
620
 
621
        /* task mgmt function in progress */
622
        unsigned tmf_in_progress:1;
623
 
624
        /* Asynchronous scan in progress */
625
        unsigned async_scan:1;
626
 
627
        /*
628
         * Optional work queue to be utilized by the transport
629
         */
630
        char work_q_name[KOBJ_NAME_LEN];
631
        struct workqueue_struct *work_q;
632
 
633
        /*
634
         * Host has rejected a command because it was busy.
635
         */
636
        unsigned int host_blocked;
637
 
638
        /*
639
         * Value host_blocked counts down from
640
         */
641
        unsigned int max_host_blocked;
642
 
643
        /*
644
         * q used for scsi_tgt msgs, async events or any other requests that
645
         * need to be processed in userspace
646
         */
647
        struct request_queue *uspace_req_q;
648
 
649
        /* legacy crap */
650
        unsigned long base;
651
        unsigned long io_port;
652
        unsigned char n_io_port;
653
        unsigned char dma_channel;
654
        unsigned int  irq;
655
 
656
 
657
        enum scsi_host_state shost_state;
658
 
659
        /* ldm bits */
660
        struct device           shost_gendev;
661
        struct class_device     shost_classdev;
662
 
663
        /*
664
         * List of hosts per template.
665
         *
666
         * This is only for use by scsi_module.c for legacy templates.
667
         * For these access to it is synchronized implicitly by
668
         * module_init/module_exit.
669
         */
670
        struct list_head sht_legacy_list;
671
 
672
        /*
673
         * Points to the transport data (if any) which is allocated
674
         * separately
675
         */
676
        void *shost_data;
677
 
678
        /*
679
         * We should ensure that this is aligned, both for better performance
680
         * and also because some compilers (m68k) don't automatically force
681
         * alignment to a long boundary.
682
         */
683
        unsigned long hostdata[0]  /* Used for storage of host specific stuff */
684
                __attribute__ ((aligned (sizeof(unsigned long))));
685
};
686
 
687
#define         class_to_shost(d)       \
688
        container_of(d, struct Scsi_Host, shost_classdev)
689
 
690
#define shost_printk(prefix, shost, fmt, a...)  \
691
        dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a)
692
 
693
static inline void *shost_priv(struct Scsi_Host *shost)
694
{
695
        return (void *)shost->hostdata;
696
}
697
 
698
int scsi_is_host_device(const struct device *);
699
 
700
static inline struct Scsi_Host *dev_to_shost(struct device *dev)
701
{
702
        while (!scsi_is_host_device(dev)) {
703
                if (!dev->parent)
704
                        return NULL;
705
                dev = dev->parent;
706
        }
707
        return container_of(dev, struct Scsi_Host, shost_gendev);
708
}
709
 
710
static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
711
{
712
        return shost->shost_state == SHOST_RECOVERY ||
713
                shost->shost_state == SHOST_CANCEL_RECOVERY ||
714
                shost->shost_state == SHOST_DEL_RECOVERY ||
715
                shost->tmf_in_progress;
716
}
717
 
718
extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
719
extern void scsi_flush_work(struct Scsi_Host *);
720
 
721
extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
722
extern int __must_check scsi_add_host(struct Scsi_Host *, struct device *);
723
extern void scsi_scan_host(struct Scsi_Host *);
724
extern void scsi_rescan_device(struct device *);
725
extern void scsi_remove_host(struct Scsi_Host *);
726
extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
727
extern void scsi_host_put(struct Scsi_Host *t);
728
extern struct Scsi_Host *scsi_host_lookup(unsigned short);
729
extern const char *scsi_host_state_name(enum scsi_host_state);
730
 
731
extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *);
732
 
733
static inline struct device *scsi_get_device(struct Scsi_Host *shost)
734
{
735
        return shost->shost_gendev.parent;
736
}
737
 
738
/**
739
 * scsi_host_scan_allowed - Is scanning of this host allowed
740
 * @shost:      Pointer to Scsi_Host.
741
 **/
742
static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
743
{
744
        return shost->shost_state == SHOST_RUNNING;
745
}
746
 
747
extern void scsi_unblock_requests(struct Scsi_Host *);
748
extern void scsi_block_requests(struct Scsi_Host *);
749
 
750
struct class_container;
751
 
752
extern struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
753
                                                void (*) (struct request_queue *));
754
/*
755
 * These two functions are used to allocate and free a pseudo device
756
 * which will connect to the host adapter itself rather than any
757
 * physical device.  You must deallocate when you are done with the
758
 * thing.  This physical pseudo-device isn't real and won't be available
759
 * from any high-level drivers.
760
 */
761
extern void scsi_free_host_dev(struct scsi_device *);
762
extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *);
763
 
764
/* legacy interfaces */
765
extern struct Scsi_Host *scsi_register(struct scsi_host_template *, int);
766
extern void scsi_unregister(struct Scsi_Host *);
767
extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
768
 
769
#endif /* _SCSI_SCSI_HOST_H */

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.