1 |
1626 |
jcastillo |
/*
|
2 |
|
|
* sd.c Copyright (C) 1992 Drew Eckhardt
|
3 |
|
|
* Copyright (C) 1993, 1994, 1995 Eric Youngdale
|
4 |
|
|
*
|
5 |
|
|
* Linux scsi disk driver
|
6 |
|
|
* Initial versions: Drew Eckhardt
|
7 |
|
|
* Subsequent revisions: Eric Youngdale
|
8 |
|
|
*
|
9 |
|
|
* <drew@colorado.edu>
|
10 |
|
|
*
|
11 |
|
|
* Modified by Eric Youngdale ericy@cais.com to
|
12 |
|
|
* add scatter-gather, multiple outstanding request, and other
|
13 |
|
|
* enhancements.
|
14 |
|
|
*
|
15 |
|
|
* Modified by Eric Youngdale eric@aib.com to support loadable
|
16 |
|
|
* low-level scsi drivers.
|
17 |
|
|
*/
|
18 |
|
|
|
19 |
|
|
#include <linux/module.h>
|
20 |
|
|
#ifdef MODULE
|
21 |
|
|
/*
|
22 |
|
|
* This is a variable in scsi.c that is set when we are processing something
|
23 |
|
|
* after boot time. By definition, this is true when we are a loadable module
|
24 |
|
|
* ourselves.
|
25 |
|
|
*/
|
26 |
|
|
#define MODULE_FLAG 1
|
27 |
|
|
#else
|
28 |
|
|
#define MODULE_FLAG scsi_loadable_module_flag
|
29 |
|
|
#endif /* MODULE */
|
30 |
|
|
|
31 |
|
|
#include <linux/fs.h>
|
32 |
|
|
#include <linux/kernel.h>
|
33 |
|
|
#include <linux/sched.h>
|
34 |
|
|
#include <linux/mm.h>
|
35 |
|
|
#include <linux/string.h>
|
36 |
|
|
#include <linux/errno.h>
|
37 |
|
|
#include <linux/interrupt.h>
|
38 |
|
|
|
39 |
|
|
#include <asm/system.h>
|
40 |
|
|
|
41 |
|
|
#define MAJOR_NR SCSI_DISK_MAJOR
|
42 |
|
|
#include <linux/blk.h>
|
43 |
|
|
#include "scsi.h"
|
44 |
|
|
#include "hosts.h"
|
45 |
|
|
#include "sd.h"
|
46 |
|
|
#include <scsi/scsi_ioctl.h>
|
47 |
|
|
#include "constants.h"
|
48 |
|
|
|
49 |
|
|
#include <linux/genhd.h>
|
50 |
|
|
|
51 |
|
|
/*
|
52 |
|
|
* static const char RCSid[] = "$Header:";
|
53 |
|
|
*/
|
54 |
|
|
|
55 |
|
|
#define MAX_RETRIES 5
|
56 |
|
|
|
57 |
|
|
/*
|
58 |
|
|
* Time out in seconds for disks and Magneto-opticals (which are slower).
|
59 |
|
|
*/
|
60 |
|
|
|
61 |
|
|
#define SD_TIMEOUT (20 * HZ)
|
62 |
|
|
#define SD_MOD_TIMEOUT (25 * HZ)
|
63 |
|
|
|
64 |
|
|
#define CLUSTERABLE_DEVICE(SC) (SC->host->use_clustering && \
|
65 |
|
|
SC->device->type != TYPE_MOD)
|
66 |
|
|
|
67 |
|
|
struct hd_struct * sd;
|
68 |
|
|
|
69 |
|
|
Scsi_Disk * rscsi_disks = NULL;
|
70 |
|
|
static int * sd_sizes;
|
71 |
|
|
static int * sd_blocksizes;
|
72 |
|
|
static int * sd_hardsizes; /* Hardware sector size */
|
73 |
|
|
|
74 |
|
|
extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
|
75 |
|
|
|
76 |
|
|
static int check_scsidisk_media_change(kdev_t);
|
77 |
|
|
static int fop_revalidate_scsidisk(kdev_t);
|
78 |
|
|
|
79 |
|
|
static int sd_init_onedisk(int);
|
80 |
|
|
|
81 |
|
|
static void requeue_sd_request (Scsi_Cmnd * SCpnt);
|
82 |
|
|
|
83 |
|
|
static int sd_init(void);
|
84 |
|
|
static void sd_finish(void);
|
85 |
|
|
static int sd_attach(Scsi_Device *);
|
86 |
|
|
static int sd_detect(Scsi_Device *);
|
87 |
|
|
static void sd_detach(Scsi_Device *);
|
88 |
|
|
|
89 |
|
|
struct Scsi_Device_Template sd_template =
|
90 |
|
|
{ NULL, "disk", "sd", NULL, TYPE_DISK,
|
91 |
|
|
SCSI_DISK_MAJOR, 0, 0, 0, 1,
|
92 |
|
|
sd_detect, sd_init,
|
93 |
|
|
sd_finish, sd_attach, sd_detach
|
94 |
|
|
};
|
95 |
|
|
|
96 |
|
|
static int sd_open(struct inode * inode, struct file * filp)
|
97 |
|
|
{
|
98 |
|
|
int target;
|
99 |
|
|
target = DEVICE_NR(inode->i_rdev);
|
100 |
|
|
|
101 |
|
|
if(target >= sd_template.dev_max || !rscsi_disks[target].device)
|
102 |
|
|
return -ENXIO; /* No such device */
|
103 |
|
|
|
104 |
|
|
/*
|
105 |
|
|
* Make sure that only one process can do a check_change_disk at one time.
|
106 |
|
|
* This is also used to lock out further access when the partition table
|
107 |
|
|
* is being re-read.
|
108 |
|
|
*/
|
109 |
|
|
|
110 |
|
|
while (rscsi_disks[target].device->busy)
|
111 |
|
|
barrier();
|
112 |
|
|
if(rscsi_disks[target].device->removable) {
|
113 |
|
|
check_disk_change(inode->i_rdev);
|
114 |
|
|
|
115 |
|
|
/*
|
116 |
|
|
* If the drive is empty, just let the open fail.
|
117 |
|
|
*/
|
118 |
|
|
if ( !rscsi_disks[target].ready )
|
119 |
|
|
return -ENXIO;
|
120 |
|
|
|
121 |
|
|
/*
|
122 |
|
|
* Similarly, if the device has the write protect tab set,
|
123 |
|
|
* have the open fail if the user expects to be able to write
|
124 |
|
|
* to the thing.
|
125 |
|
|
*/
|
126 |
|
|
if ( (rscsi_disks[target].write_prot) && (filp->f_mode & 2) )
|
127 |
|
|
return -EROFS;
|
128 |
|
|
}
|
129 |
|
|
|
130 |
|
|
/*
|
131 |
|
|
* See if we are requesting a non-existent partition. Do this
|
132 |
|
|
* after checking for disk change.
|
133 |
|
|
*/
|
134 |
|
|
if(sd_sizes[MINOR(inode->i_rdev)] == 0)
|
135 |
|
|
return -ENXIO;
|
136 |
|
|
|
137 |
|
|
if(rscsi_disks[target].device->removable)
|
138 |
|
|
if(!rscsi_disks[target].device->access_count)
|
139 |
|
|
sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
|
140 |
|
|
|
141 |
|
|
rscsi_disks[target].device->access_count++;
|
142 |
|
|
if (rscsi_disks[target].device->host->hostt->usage_count)
|
143 |
|
|
(*rscsi_disks[target].device->host->hostt->usage_count)++;
|
144 |
|
|
if(sd_template.usage_count) (*sd_template.usage_count)++;
|
145 |
|
|
return 0;
|
146 |
|
|
}
|
147 |
|
|
|
148 |
|
|
static void sd_release(struct inode * inode, struct file * file)
|
149 |
|
|
{
|
150 |
|
|
int target;
|
151 |
|
|
fsync_dev(inode->i_rdev);
|
152 |
|
|
|
153 |
|
|
target = DEVICE_NR(inode->i_rdev);
|
154 |
|
|
|
155 |
|
|
rscsi_disks[target].device->access_count--;
|
156 |
|
|
if (rscsi_disks[target].device->host->hostt->usage_count)
|
157 |
|
|
(*rscsi_disks[target].device->host->hostt->usage_count)--;
|
158 |
|
|
if(sd_template.usage_count) (*sd_template.usage_count)--;
|
159 |
|
|
|
160 |
|
|
if(rscsi_disks[target].device->removable) {
|
161 |
|
|
if(!rscsi_disks[target].device->access_count)
|
162 |
|
|
sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
|
163 |
|
|
}
|
164 |
|
|
}
|
165 |
|
|
|
166 |
|
|
static void sd_geninit(struct gendisk *);
|
167 |
|
|
|
168 |
|
|
static struct file_operations sd_fops = {
|
169 |
|
|
NULL, /* lseek - default */
|
170 |
|
|
block_read, /* read - general block-dev read */
|
171 |
|
|
block_write, /* write - general block-dev write */
|
172 |
|
|
NULL, /* readdir - bad */
|
173 |
|
|
NULL, /* select */
|
174 |
|
|
sd_ioctl, /* ioctl */
|
175 |
|
|
NULL, /* mmap */
|
176 |
|
|
sd_open, /* open code */
|
177 |
|
|
sd_release, /* release */
|
178 |
|
|
block_fsync, /* fsync */
|
179 |
|
|
NULL, /* fasync */
|
180 |
|
|
check_scsidisk_media_change, /* Disk change */
|
181 |
|
|
fop_revalidate_scsidisk /* revalidate */
|
182 |
|
|
};
|
183 |
|
|
|
184 |
|
|
static struct gendisk sd_gendisk = {
|
185 |
|
|
MAJOR_NR, /* Major number */
|
186 |
|
|
"sd", /* Major name */
|
187 |
|
|
4, /* Bits to shift to get real from partition */
|
188 |
|
|
1 << 4, /* Number of partitions per real */
|
189 |
|
|
0, /* maximum number of real */
|
190 |
|
|
sd_geninit, /* init function */
|
191 |
|
|
NULL, /* hd struct */
|
192 |
|
|
NULL, /* block sizes */
|
193 |
|
|
0, /* number */
|
194 |
|
|
NULL, /* internal */
|
195 |
|
|
NULL /* next */
|
196 |
|
|
};
|
197 |
|
|
|
198 |
|
|
static void sd_geninit (struct gendisk *ignored)
|
199 |
|
|
{
|
200 |
|
|
int i;
|
201 |
|
|
|
202 |
|
|
for (i = 0; i < sd_template.dev_max; ++i)
|
203 |
|
|
if(rscsi_disks[i].device)
|
204 |
|
|
sd[i << 4].nr_sects = rscsi_disks[i].capacity;
|
205 |
|
|
#if 0
|
206 |
|
|
/* No longer needed - we keep track of this as we attach/detach */
|
207 |
|
|
sd_gendisk.nr_real = sd_template.dev_max;
|
208 |
|
|
#endif
|
209 |
|
|
}
|
210 |
|
|
|
211 |
|
|
/*
|
212 |
|
|
* rw_intr is the interrupt routine for the device driver.
|
213 |
|
|
* It will be notified on the end of a SCSI read / write, and
|
214 |
|
|
* will take one of several actions based on success or failure.
|
215 |
|
|
*/
|
216 |
|
|
|
217 |
|
|
static void rw_intr (Scsi_Cmnd *SCpnt)
|
218 |
|
|
{
|
219 |
|
|
int result = SCpnt->result;
|
220 |
|
|
int this_count = SCpnt->bufflen >> 9;
|
221 |
|
|
int good_sectors = (result == 0 ? this_count : 0);
|
222 |
|
|
int block_sectors = 1;
|
223 |
|
|
|
224 |
|
|
#ifdef DEBUG
|
225 |
|
|
printk("sd%c : rw_intr(%d, %d)\n", 'a' + MINOR(SCpnt->request.rq_dev),
|
226 |
|
|
SCpnt->host->host_no, result);
|
227 |
|
|
#endif
|
228 |
|
|
|
229 |
|
|
/*
|
230 |
|
|
Handle MEDIUM ERRORs that indicate partial success. Since this is a
|
231 |
|
|
relatively rare error condition, no care is taken to avoid unnecessary
|
232 |
|
|
additional work such as memcpy's that could be avoided.
|
233 |
|
|
*/
|
234 |
|
|
|
235 |
|
|
if (driver_byte(result) != 0 && /* An error occurred */
|
236 |
|
|
SCpnt->sense_buffer[0] == 0xF0 && /* Sense data is valid */
|
237 |
|
|
SCpnt->sense_buffer[2] == MEDIUM_ERROR)
|
238 |
|
|
{
|
239 |
|
|
long error_sector = (SCpnt->sense_buffer[3] << 24) |
|
240 |
|
|
(SCpnt->sense_buffer[4] << 16) |
|
241 |
|
|
(SCpnt->sense_buffer[5] << 8) |
|
242 |
|
|
SCpnt->sense_buffer[6];
|
243 |
|
|
int sector_size =
|
244 |
|
|
rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].sector_size;
|
245 |
|
|
if (SCpnt->request.bh != NULL)
|
246 |
|
|
block_sectors = SCpnt->request.bh->b_size >> 9;
|
247 |
|
|
if (sector_size == 1024)
|
248 |
|
|
{
|
249 |
|
|
error_sector <<= 1;
|
250 |
|
|
if (block_sectors < 2) block_sectors = 2;
|
251 |
|
|
}
|
252 |
|
|
else if (sector_size == 256)
|
253 |
|
|
error_sector >>= 1;
|
254 |
|
|
error_sector -= sd[MINOR(SCpnt->request.rq_dev)].start_sect;
|
255 |
|
|
error_sector &= ~ (block_sectors - 1);
|
256 |
|
|
good_sectors = error_sector - SCpnt->request.sector;
|
257 |
|
|
if (good_sectors < 0 || good_sectors >= this_count)
|
258 |
|
|
good_sectors = 0;
|
259 |
|
|
}
|
260 |
|
|
|
261 |
|
|
/*
|
262 |
|
|
* Handle RECOVERED ERRORs that indicate success after recovery action
|
263 |
|
|
* by the target device.
|
264 |
|
|
*/
|
265 |
|
|
|
266 |
|
|
if (SCpnt->sense_buffer[0] == 0xF0 && /* Sense data is valid */
|
267 |
|
|
SCpnt->sense_buffer[2] == RECOVERED_ERROR)
|
268 |
|
|
{
|
269 |
|
|
printk("scsidisk recovered I/O error: dev %s, sector %lu, absolute sector %lu\n",
|
270 |
|
|
kdevname(SCpnt->request.rq_dev), SCpnt->request.sector,
|
271 |
|
|
SCpnt->request.sector + sd[MINOR(SCpnt->request.rq_dev)].start_sect);
|
272 |
|
|
good_sectors = this_count;
|
273 |
|
|
result = 0;
|
274 |
|
|
}
|
275 |
|
|
|
276 |
|
|
/*
|
277 |
|
|
* First case : we assume that the command succeeded. One of two things
|
278 |
|
|
* will happen here. Either we will be finished, or there will be more
|
279 |
|
|
* sectors that we were unable to read last time.
|
280 |
|
|
*/
|
281 |
|
|
|
282 |
|
|
if (good_sectors > 0) {
|
283 |
|
|
|
284 |
|
|
#ifdef DEBUG
|
285 |
|
|
printk("sd%c : %d sectors remain.\n", 'a' + MINOR(SCpnt->request.rq_dev),
|
286 |
|
|
SCpnt->request.nr_sectors);
|
287 |
|
|
printk("use_sg is %d\n ",SCpnt->use_sg);
|
288 |
|
|
#endif
|
289 |
|
|
if (SCpnt->use_sg) {
|
290 |
|
|
struct scatterlist * sgpnt;
|
291 |
|
|
int i;
|
292 |
|
|
sgpnt = (struct scatterlist *) SCpnt->buffer;
|
293 |
|
|
for(i=0; i<SCpnt->use_sg; i++) {
|
294 |
|
|
#ifdef DEBUG
|
295 |
|
|
printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address,
|
296 |
|
|
sgpnt[i].length);
|
297 |
|
|
#endif
|
298 |
|
|
if (sgpnt[i].alt_address) {
|
299 |
|
|
if (SCpnt->request.cmd == READ)
|
300 |
|
|
memcpy(sgpnt[i].alt_address, sgpnt[i].address,
|
301 |
|
|
sgpnt[i].length);
|
302 |
|
|
scsi_free(sgpnt[i].address, sgpnt[i].length);
|
303 |
|
|
}
|
304 |
|
|
}
|
305 |
|
|
|
306 |
|
|
/* Free list of scatter-gather pointers */
|
307 |
|
|
scsi_free(SCpnt->buffer, SCpnt->sglist_len);
|
308 |
|
|
} else {
|
309 |
|
|
if (SCpnt->buffer != SCpnt->request.buffer) {
|
310 |
|
|
#ifdef DEBUG
|
311 |
|
|
printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
|
312 |
|
|
SCpnt->bufflen);
|
313 |
|
|
#endif
|
314 |
|
|
if (SCpnt->request.cmd == READ)
|
315 |
|
|
memcpy(SCpnt->request.buffer, SCpnt->buffer,
|
316 |
|
|
SCpnt->bufflen);
|
317 |
|
|
scsi_free(SCpnt->buffer, SCpnt->bufflen);
|
318 |
|
|
}
|
319 |
|
|
}
|
320 |
|
|
/*
|
321 |
|
|
* If multiple sectors are requested in one buffer, then
|
322 |
|
|
* they will have been finished off by the first command.
|
323 |
|
|
* If not, then we have a multi-buffer command.
|
324 |
|
|
*/
|
325 |
|
|
if (SCpnt->request.nr_sectors > this_count)
|
326 |
|
|
{
|
327 |
|
|
SCpnt->request.errors = 0;
|
328 |
|
|
|
329 |
|
|
if (!SCpnt->request.bh)
|
330 |
|
|
{
|
331 |
|
|
#ifdef DEBUG
|
332 |
|
|
printk("sd%c : handling page request, no buffer\n",
|
333 |
|
|
'a' + MINOR(SCpnt->request.rq_dev));
|
334 |
|
|
#endif
|
335 |
|
|
/*
|
336 |
|
|
* The SCpnt->request.nr_sectors field is always done in
|
337 |
|
|
* 512 byte sectors, even if this really isn't the case.
|
338 |
|
|
*/
|
339 |
|
|
panic("sd.c: linked page request (%lx %x)",
|
340 |
|
|
SCpnt->request.sector, this_count);
|
341 |
|
|
}
|
342 |
|
|
}
|
343 |
|
|
SCpnt = end_scsi_request(SCpnt, 1, good_sectors);
|
344 |
|
|
if (result == 0)
|
345 |
|
|
{
|
346 |
|
|
requeue_sd_request(SCpnt);
|
347 |
|
|
return;
|
348 |
|
|
}
|
349 |
|
|
}
|
350 |
|
|
|
351 |
|
|
if (good_sectors == 0) {
|
352 |
|
|
|
353 |
|
|
/* Free up any indirection buffers we allocated for DMA purposes. */
|
354 |
|
|
if (SCpnt->use_sg) {
|
355 |
|
|
struct scatterlist * sgpnt;
|
356 |
|
|
int i;
|
357 |
|
|
sgpnt = (struct scatterlist *) SCpnt->buffer;
|
358 |
|
|
for(i=0; i<SCpnt->use_sg; i++) {
|
359 |
|
|
#ifdef DEBUG
|
360 |
|
|
printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
|
361 |
|
|
SCpnt->bufflen);
|
362 |
|
|
#endif
|
363 |
|
|
if (sgpnt[i].alt_address) {
|
364 |
|
|
scsi_free(sgpnt[i].address, sgpnt[i].length);
|
365 |
|
|
}
|
366 |
|
|
}
|
367 |
|
|
scsi_free(SCpnt->buffer, SCpnt->sglist_len); /* Free list of scatter-gather pointers */
|
368 |
|
|
} else {
|
369 |
|
|
#ifdef DEBUG
|
370 |
|
|
printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
|
371 |
|
|
SCpnt->bufflen);
|
372 |
|
|
#endif
|
373 |
|
|
if (SCpnt->buffer != SCpnt->request.buffer)
|
374 |
|
|
scsi_free(SCpnt->buffer, SCpnt->bufflen);
|
375 |
|
|
}
|
376 |
|
|
}
|
377 |
|
|
|
378 |
|
|
/*
|
379 |
|
|
* Now, if we were good little boys and girls, Santa left us a request
|
380 |
|
|
* sense buffer. We can extract information from this, so we
|
381 |
|
|
* can choose a block to remap, etc.
|
382 |
|
|
*/
|
383 |
|
|
|
384 |
|
|
if (driver_byte(result) != 0) {
|
385 |
|
|
if (suggestion(result) == SUGGEST_REMAP) {
|
386 |
|
|
#ifdef REMAP
|
387 |
|
|
/*
|
388 |
|
|
* Not yet implemented. A read will fail after being remapped,
|
389 |
|
|
* a write will call the strategy routine again.
|
390 |
|
|
*/
|
391 |
|
|
if rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].remap
|
392 |
|
|
{
|
393 |
|
|
result = 0;
|
394 |
|
|
}
|
395 |
|
|
else
|
396 |
|
|
#endif
|
397 |
|
|
}
|
398 |
|
|
|
399 |
|
|
if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
|
400 |
|
|
if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
|
401 |
|
|
if(rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->removable) {
|
402 |
|
|
/* detected disc change. set a bit and quietly refuse
|
403 |
|
|
* further access.
|
404 |
|
|
*/
|
405 |
|
|
rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->changed = 1;
|
406 |
|
|
SCpnt = end_scsi_request(SCpnt, 0, this_count);
|
407 |
|
|
requeue_sd_request(SCpnt);
|
408 |
|
|
return;
|
409 |
|
|
}
|
410 |
|
|
else
|
411 |
|
|
{
|
412 |
|
|
/*
|
413 |
|
|
* Must have been a power glitch, or a bus reset.
|
414 |
|
|
* Could not have been a media change, so we just retry
|
415 |
|
|
* the request and see what happens.
|
416 |
|
|
*/
|
417 |
|
|
requeue_sd_request(SCpnt);
|
418 |
|
|
return;
|
419 |
|
|
}
|
420 |
|
|
}
|
421 |
|
|
}
|
422 |
|
|
|
423 |
|
|
|
424 |
|
|
/* If we had an ILLEGAL REQUEST returned, then we may have
|
425 |
|
|
* performed an unsupported command. The only thing this should be
|
426 |
|
|
* would be a ten byte read where only a six byte read was supported.
|
427 |
|
|
* Also, on a system where READ CAPACITY failed, we have read past
|
428 |
|
|
* the end of the disk.
|
429 |
|
|
*/
|
430 |
|
|
|
431 |
|
|
if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
|
432 |
|
|
if (rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten) {
|
433 |
|
|
rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten = 0;
|
434 |
|
|
requeue_sd_request(SCpnt);
|
435 |
|
|
result = 0;
|
436 |
|
|
} else {
|
437 |
|
|
/* ???? */
|
438 |
|
|
}
|
439 |
|
|
}
|
440 |
|
|
|
441 |
|
|
if (SCpnt->sense_buffer[2] == MEDIUM_ERROR) {
|
442 |
|
|
printk("scsi%d: MEDIUM ERROR on channel %d, id %d, lun %d, CDB: ",
|
443 |
|
|
SCpnt->host->host_no, (int) SCpnt->channel,
|
444 |
|
|
(int) SCpnt->target, (int) SCpnt->lun);
|
445 |
|
|
print_command(SCpnt->cmnd);
|
446 |
|
|
print_sense("sd", SCpnt);
|
447 |
|
|
SCpnt = end_scsi_request(SCpnt, 0, block_sectors);
|
448 |
|
|
requeue_sd_request(SCpnt);
|
449 |
|
|
return;
|
450 |
|
|
}
|
451 |
|
|
} /* driver byte != 0 */
|
452 |
|
|
if (result) {
|
453 |
|
|
printk("SCSI disk error : host %d channel %d id %d lun %d return code = %x\n",
|
454 |
|
|
rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->host->host_no,
|
455 |
|
|
rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->channel,
|
456 |
|
|
rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->id,
|
457 |
|
|
rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->lun, result);
|
458 |
|
|
|
459 |
|
|
if (driver_byte(result) & DRIVER_SENSE)
|
460 |
|
|
print_sense("sd", SCpnt);
|
461 |
|
|
SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
|
462 |
|
|
requeue_sd_request(SCpnt);
|
463 |
|
|
return;
|
464 |
|
|
}
|
465 |
|
|
}
|
466 |
|
|
|
467 |
|
|
/*
|
468 |
|
|
* requeue_sd_request() is the request handler function for the sd driver.
|
469 |
|
|
* Its function in life is to take block device requests, and translate
|
470 |
|
|
* them to SCSI commands.
|
471 |
|
|
*/
|
472 |
|
|
|
473 |
|
|
static void do_sd_request (void)
|
474 |
|
|
{
|
475 |
|
|
Scsi_Cmnd * SCpnt = NULL;
|
476 |
|
|
Scsi_Device * SDev;
|
477 |
|
|
struct request * req = NULL;
|
478 |
|
|
unsigned long flags;
|
479 |
|
|
int flag = 0;
|
480 |
|
|
|
481 |
|
|
save_flags(flags);
|
482 |
|
|
while (1==1){
|
483 |
|
|
cli();
|
484 |
|
|
if (CURRENT != NULL && CURRENT->rq_status == RQ_INACTIVE) {
|
485 |
|
|
restore_flags(flags);
|
486 |
|
|
return;
|
487 |
|
|
}
|
488 |
|
|
|
489 |
|
|
INIT_SCSI_REQUEST;
|
490 |
|
|
SDev = rscsi_disks[DEVICE_NR(CURRENT->rq_dev)].device;
|
491 |
|
|
|
492 |
|
|
/*
|
493 |
|
|
* I am not sure where the best place to do this is. We need
|
494 |
|
|
* to hook in a place where we are likely to come if in user
|
495 |
|
|
* space.
|
496 |
|
|
*/
|
497 |
|
|
if( SDev->was_reset )
|
498 |
|
|
{
|
499 |
|
|
/*
|
500 |
|
|
* We need to relock the door, but we might
|
501 |
|
|
* be in an interrupt handler. Only do this
|
502 |
|
|
* from user space, since we do not want to
|
503 |
|
|
* sleep from an interrupt.
|
504 |
|
|
*/
|
505 |
|
|
if( SDev->removable && !intr_count )
|
506 |
|
|
{
|
507 |
|
|
scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, 0);
|
508 |
|
|
/* scsi_ioctl may allow CURRENT to change, so start over. */
|
509 |
|
|
SDev->was_reset = 0;
|
510 |
|
|
continue;
|
511 |
|
|
}
|
512 |
|
|
SDev->was_reset = 0;
|
513 |
|
|
}
|
514 |
|
|
|
515 |
|
|
/* We have to be careful here. allocate_device will get a free pointer,
|
516 |
|
|
* but there is no guarantee that it is queueable. In normal usage,
|
517 |
|
|
* we want to call this, because other types of devices may have the
|
518 |
|
|
* host all tied up, and we want to make sure that we have at least
|
519 |
|
|
* one request pending for this type of device. We can also come
|
520 |
|
|
* through here while servicing an interrupt, because of the need to
|
521 |
|
|
* start another command. If we call allocate_device more than once,
|
522 |
|
|
* then the system can wedge if the command is not queueable. The
|
523 |
|
|
* request_queueable function is safe because it checks to make sure
|
524 |
|
|
* that the host is able to take another command before it returns
|
525 |
|
|
* a pointer.
|
526 |
|
|
*/
|
527 |
|
|
|
528 |
|
|
if (flag++ == 0)
|
529 |
|
|
SCpnt = allocate_device(&CURRENT,
|
530 |
|
|
rscsi_disks[DEVICE_NR(CURRENT->rq_dev)].device, 0);
|
531 |
|
|
else SCpnt = NULL;
|
532 |
|
|
|
533 |
|
|
/*
|
534 |
|
|
* The following restore_flags leads to latency problems. FIXME.
|
535 |
|
|
* Using a "sti()" gets rid of the latency problems but causes
|
536 |
|
|
* race conditions and crashes.
|
537 |
|
|
*/
|
538 |
|
|
restore_flags(flags);
|
539 |
|
|
|
540 |
|
|
/* This is a performance enhancement. We dig down into the request
|
541 |
|
|
* list and try to find a queueable request (i.e. device not busy,
|
542 |
|
|
* and host able to accept another command. If we find one, then we
|
543 |
|
|
* queue it. This can make a big difference on systems with more than
|
544 |
|
|
* one disk drive. We want to have the interrupts off when monkeying
|
545 |
|
|
* with the request list, because otherwise the kernel might try to
|
546 |
|
|
* slip in a request in between somewhere.
|
547 |
|
|
*/
|
548 |
|
|
|
549 |
|
|
if (!SCpnt && sd_template.nr_dev > 1){
|
550 |
|
|
struct request *req1;
|
551 |
|
|
req1 = NULL;
|
552 |
|
|
cli();
|
553 |
|
|
req = CURRENT;
|
554 |
|
|
while(req){
|
555 |
|
|
SCpnt = request_queueable(req,
|
556 |
|
|
rscsi_disks[DEVICE_NR(req->rq_dev)].device);
|
557 |
|
|
if(SCpnt) break;
|
558 |
|
|
req1 = req;
|
559 |
|
|
req = req->next;
|
560 |
|
|
}
|
561 |
|
|
if (SCpnt && req->rq_status == RQ_INACTIVE) {
|
562 |
|
|
if (req == CURRENT)
|
563 |
|
|
CURRENT = CURRENT->next;
|
564 |
|
|
else
|
565 |
|
|
req1->next = req->next;
|
566 |
|
|
}
|
567 |
|
|
restore_flags(flags);
|
568 |
|
|
}
|
569 |
|
|
|
570 |
|
|
if (!SCpnt) return; /* Could not find anything to do */
|
571 |
|
|
|
572 |
|
|
/* Queue command */
|
573 |
|
|
requeue_sd_request(SCpnt);
|
574 |
|
|
} /* While */
|
575 |
|
|
}
|
576 |
|
|
|
577 |
|
|
static void requeue_sd_request (Scsi_Cmnd * SCpnt)
|
578 |
|
|
{
|
579 |
|
|
int dev, devm, block, this_count;
|
580 |
|
|
unsigned char cmd[10];
|
581 |
|
|
int bounce_size, contiguous;
|
582 |
|
|
int max_sg;
|
583 |
|
|
struct buffer_head * bh, *bhp;
|
584 |
|
|
char * buff, *bounce_buffer;
|
585 |
|
|
|
586 |
|
|
repeat:
|
587 |
|
|
|
588 |
|
|
if(!SCpnt || SCpnt->request.rq_status == RQ_INACTIVE) {
|
589 |
|
|
do_sd_request();
|
590 |
|
|
return;
|
591 |
|
|
}
|
592 |
|
|
|
593 |
|
|
devm = MINOR(SCpnt->request.rq_dev);
|
594 |
|
|
dev = DEVICE_NR(SCpnt->request.rq_dev);
|
595 |
|
|
|
596 |
|
|
block = SCpnt->request.sector;
|
597 |
|
|
this_count = 0;
|
598 |
|
|
|
599 |
|
|
#ifdef DEBUG
|
600 |
|
|
printk("Doing sd request, dev = %d, block = %d\n", devm, block);
|
601 |
|
|
#endif
|
602 |
|
|
|
603 |
|
|
if (devm >= (sd_template.dev_max << 4) ||
|
604 |
|
|
!rscsi_disks[dev].device ||
|
605 |
|
|
block + SCpnt->request.nr_sectors > sd[devm].nr_sects)
|
606 |
|
|
{
|
607 |
|
|
SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
|
608 |
|
|
goto repeat;
|
609 |
|
|
}
|
610 |
|
|
|
611 |
|
|
block += sd[devm].start_sect;
|
612 |
|
|
|
613 |
|
|
if (rscsi_disks[dev].device->changed)
|
614 |
|
|
{
|
615 |
|
|
/*
|
616 |
|
|
* quietly refuse to do anything to a changed disc until the changed
|
617 |
|
|
* bit has been reset
|
618 |
|
|
*/
|
619 |
|
|
/* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */
|
620 |
|
|
SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
|
621 |
|
|
goto repeat;
|
622 |
|
|
}
|
623 |
|
|
|
624 |
|
|
#ifdef DEBUG
|
625 |
|
|
printk("sd%c : real dev = /dev/sd%c, block = %d\n",
|
626 |
|
|
'a' + devm, dev, block);
|
627 |
|
|
#endif
|
628 |
|
|
|
629 |
|
|
/*
|
630 |
|
|
* If we have a 1K hardware sectorsize, prevent access to single
|
631 |
|
|
* 512 byte sectors. In theory we could handle this - in fact
|
632 |
|
|
* the scsi cdrom driver must be able to handle this because
|
633 |
|
|
* we typically use 1K blocksizes, and cdroms typically have
|
634 |
|
|
* 2K hardware sectorsizes. Of course, things are simpler
|
635 |
|
|
* with the cdrom, since it is read-only. For performance
|
636 |
|
|
* reasons, the filesystems should be able to handle this
|
637 |
|
|
* and not force the scsi disk driver to use bounce buffers
|
638 |
|
|
* for this.
|
639 |
|
|
*/
|
640 |
|
|
if (rscsi_disks[dev].sector_size == 1024)
|
641 |
|
|
if((block & 1) || (SCpnt->request.nr_sectors & 1)) {
|
642 |
|
|
printk("sd.c:Bad block number requested");
|
643 |
|
|
SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
|
644 |
|
|
goto repeat;
|
645 |
|
|
}
|
646 |
|
|
|
647 |
|
|
switch (SCpnt->request.cmd)
|
648 |
|
|
{
|
649 |
|
|
case WRITE :
|
650 |
|
|
if (!rscsi_disks[dev].device->writeable)
|
651 |
|
|
{
|
652 |
|
|
SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
|
653 |
|
|
goto repeat;
|
654 |
|
|
}
|
655 |
|
|
cmd[0] = WRITE_6;
|
656 |
|
|
break;
|
657 |
|
|
case READ :
|
658 |
|
|
cmd[0] = READ_6;
|
659 |
|
|
break;
|
660 |
|
|
default :
|
661 |
|
|
panic ("Unknown sd command %d\n", SCpnt->request.cmd);
|
662 |
|
|
}
|
663 |
|
|
|
664 |
|
|
SCpnt->this_count = 0;
|
665 |
|
|
|
666 |
|
|
/* If the host adapter can deal with very large scatter-gather
|
667 |
|
|
* requests, it is a waste of time to cluster
|
668 |
|
|
*/
|
669 |
|
|
contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
|
670 |
|
|
bounce_buffer = NULL;
|
671 |
|
|
bounce_size = (SCpnt->request.nr_sectors << 9);
|
672 |
|
|
|
673 |
|
|
/* First see if we need a bounce buffer for this request. If we do, make
|
674 |
|
|
* sure that we can allocate a buffer. Do not waste space by allocating
|
675 |
|
|
* a bounce buffer if we are straddling the 16Mb line
|
676 |
|
|
*/
|
677 |
|
|
if (contiguous && SCpnt->request.bh &&
|
678 |
|
|
((long) SCpnt->request.bh->b_data)
|
679 |
|
|
+ (SCpnt->request.nr_sectors << 9) - 1 > ISA_DMA_THRESHOLD
|
680 |
|
|
&& SCpnt->host->unchecked_isa_dma) {
|
681 |
|
|
if(((long) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
|
682 |
|
|
bounce_buffer = (char *) scsi_malloc(bounce_size);
|
683 |
|
|
if(!bounce_buffer) contiguous = 0;
|
684 |
|
|
}
|
685 |
|
|
|
686 |
|
|
if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
|
687 |
|
|
for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp,
|
688 |
|
|
bhp = bhp->b_reqnext) {
|
689 |
|
|
if(!CONTIGUOUS_BUFFERS(bh,bhp)) {
|
690 |
|
|
if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
|
691 |
|
|
contiguous = 0;
|
692 |
|
|
break;
|
693 |
|
|
}
|
694 |
|
|
}
|
695 |
|
|
if (!SCpnt->request.bh || contiguous) {
|
696 |
|
|
|
697 |
|
|
/* case of page request (i.e. raw device), or unlinked buffer */
|
698 |
|
|
this_count = SCpnt->request.nr_sectors;
|
699 |
|
|
buff = SCpnt->request.buffer;
|
700 |
|
|
SCpnt->use_sg = 0;
|
701 |
|
|
|
702 |
|
|
} else if (SCpnt->host->sg_tablesize == 0 ||
|
703 |
|
|
(need_isa_buffer && dma_free_sectors <= 10)) {
|
704 |
|
|
|
705 |
|
|
/* Case of host adapter that cannot scatter-gather. We also
|
706 |
|
|
* come here if we are running low on DMA buffer memory. We set
|
707 |
|
|
* a threshold higher than that we would need for this request so
|
708 |
|
|
* we leave room for other requests. Even though we would not need
|
709 |
|
|
* it all, we need to be conservative, because if we run low enough
|
710 |
|
|
* we have no choice but to panic.
|
711 |
|
|
*/
|
712 |
|
|
if (SCpnt->host->sg_tablesize != 0 &&
|
713 |
|
|
need_isa_buffer &&
|
714 |
|
|
dma_free_sectors <= 10)
|
715 |
|
|
printk("Warning: SCSI DMA buffer space running low. Using non scatter-gather I/O.\n");
|
716 |
|
|
|
717 |
|
|
this_count = SCpnt->request.current_nr_sectors;
|
718 |
|
|
buff = SCpnt->request.buffer;
|
719 |
|
|
SCpnt->use_sg = 0;
|
720 |
|
|
|
721 |
|
|
} else {
|
722 |
|
|
|
723 |
|
|
/* Scatter-gather capable host adapter */
|
724 |
|
|
struct scatterlist * sgpnt;
|
725 |
|
|
int count, this_count_max;
|
726 |
|
|
int counted;
|
727 |
|
|
|
728 |
|
|
bh = SCpnt->request.bh;
|
729 |
|
|
this_count = 0;
|
730 |
|
|
this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
|
731 |
|
|
count = 0;
|
732 |
|
|
bhp = NULL;
|
733 |
|
|
while(bh) {
|
734 |
|
|
if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
|
735 |
|
|
if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
|
736 |
|
|
!CLUSTERABLE_DEVICE(SCpnt) ||
|
737 |
|
|
(SCpnt->host->unchecked_isa_dma &&
|
738 |
|
|
((unsigned long) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
|
739 |
|
|
if (count < SCpnt->host->sg_tablesize) count++;
|
740 |
|
|
else break;
|
741 |
|
|
}
|
742 |
|
|
this_count += (bh->b_size >> 9);
|
743 |
|
|
bhp = bh;
|
744 |
|
|
bh = bh->b_reqnext;
|
745 |
|
|
}
|
746 |
|
|
#if 0
|
747 |
|
|
if(SCpnt->host->unchecked_isa_dma &&
|
748 |
|
|
((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
|
749 |
|
|
#endif
|
750 |
|
|
SCpnt->use_sg = count; /* Number of chains */
|
751 |
|
|
/* scsi_malloc can only allocate in chunks of 512 bytes */
|
752 |
|
|
count = (SCpnt->use_sg * sizeof(struct scatterlist) + 511) & ~511;
|
753 |
|
|
|
754 |
|
|
SCpnt->sglist_len = count;
|
755 |
|
|
max_sg = count / sizeof(struct scatterlist);
|
756 |
|
|
if(SCpnt->host->sg_tablesize < max_sg)
|
757 |
|
|
max_sg = SCpnt->host->sg_tablesize;
|
758 |
|
|
sgpnt = (struct scatterlist * ) scsi_malloc(count);
|
759 |
|
|
if (!sgpnt) {
|
760 |
|
|
printk("Warning - running *really* short on DMA buffers\n");
|
761 |
|
|
SCpnt->use_sg = 0; /* No memory left - bail out */
|
762 |
|
|
this_count = SCpnt->request.current_nr_sectors;
|
763 |
|
|
buff = SCpnt->request.buffer;
|
764 |
|
|
} else {
|
765 |
|
|
memset(sgpnt, 0, count); /* Zero so it is easy to fill, but only
|
766 |
|
|
* if memory is available
|
767 |
|
|
*/
|
768 |
|
|
buff = (char *) sgpnt;
|
769 |
|
|
counted = 0;
|
770 |
|
|
for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
|
771 |
|
|
count < SCpnt->use_sg && bh;
|
772 |
|
|
count++, bh = bhp) {
|
773 |
|
|
|
774 |
|
|
bhp = bh->b_reqnext;
|
775 |
|
|
|
776 |
|
|
if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
|
777 |
|
|
sgpnt[count].length += bh->b_size;
|
778 |
|
|
counted += bh->b_size >> 9;
|
779 |
|
|
|
780 |
|
|
if (((long) sgpnt[count].address) + sgpnt[count].length - 1 >
|
781 |
|
|
ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
|
782 |
|
|
!sgpnt[count].alt_address) {
|
783 |
|
|
sgpnt[count].alt_address = sgpnt[count].address;
|
784 |
|
|
/* We try to avoid exhausting the DMA pool, since it is
|
785 |
|
|
* easier to control usage here. In other places we might
|
786 |
|
|
* have a more pressing need, and we would be screwed if
|
787 |
|
|
* we ran out */
|
788 |
|
|
if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
|
789 |
|
|
sgpnt[count].address = NULL;
|
790 |
|
|
} else {
|
791 |
|
|
sgpnt[count].address =
|
792 |
|
|
(char *) scsi_malloc(sgpnt[count].length);
|
793 |
|
|
}
|
794 |
|
|
/* If we start running low on DMA buffers, we abort the
|
795 |
|
|
* scatter-gather operation, and free all of the memory
|
796 |
|
|
* we have allocated. We want to ensure that all scsi
|
797 |
|
|
* operations are able to do at least a non-scatter/gather
|
798 |
|
|
* operation */
|
799 |
|
|
if(sgpnt[count].address == NULL){ /* Out of dma memory */
|
800 |
|
|
#if 0
|
801 |
|
|
printk("Warning: Running low on SCSI DMA buffers");
|
802 |
|
|
/* Try switching back to a non s-g operation. */
|
803 |
|
|
while(--count >= 0){
|
804 |
|
|
if(sgpnt[count].alt_address)
|
805 |
|
|
scsi_free(sgpnt[count].address,
|
806 |
|
|
sgpnt[count].length);
|
807 |
|
|
}
|
808 |
|
|
this_count = SCpnt->request.current_nr_sectors;
|
809 |
|
|
buff = SCpnt->request.buffer;
|
810 |
|
|
SCpnt->use_sg = 0;
|
811 |
|
|
scsi_free(sgpnt, SCpnt->sglist_len);
|
812 |
|
|
#endif
|
813 |
|
|
SCpnt->use_sg = count;
|
814 |
|
|
this_count = counted -= bh->b_size >> 9;
|
815 |
|
|
break;
|
816 |
|
|
}
|
817 |
|
|
}
|
818 |
|
|
|
819 |
|
|
/* Only cluster buffers if we know that we can supply DMA
|
820 |
|
|
* buffers large enough to satisfy the request. Do not cluster
|
821 |
|
|
* a new request if this would mean that we suddenly need to
|
822 |
|
|
* start using DMA bounce buffers */
|
823 |
|
|
if(bhp && CONTIGUOUS_BUFFERS(bh,bhp)
|
824 |
|
|
&& CLUSTERABLE_DEVICE(SCpnt)) {
|
825 |
|
|
char * tmp;
|
826 |
|
|
|
827 |
|
|
if (((long) sgpnt[count].address) + sgpnt[count].length +
|
828 |
|
|
bhp->b_size - 1 > ISA_DMA_THRESHOLD &&
|
829 |
|
|
(SCpnt->host->unchecked_isa_dma) &&
|
830 |
|
|
!sgpnt[count].alt_address) continue;
|
831 |
|
|
|
832 |
|
|
if(!sgpnt[count].alt_address) {count--; continue; }
|
833 |
|
|
if(dma_free_sectors > 10)
|
834 |
|
|
tmp = (char *) scsi_malloc(sgpnt[count].length
|
835 |
|
|
+ bhp->b_size);
|
836 |
|
|
else {
|
837 |
|
|
tmp = NULL;
|
838 |
|
|
max_sg = SCpnt->use_sg;
|
839 |
|
|
}
|
840 |
|
|
if(tmp){
|
841 |
|
|
scsi_free(sgpnt[count].address, sgpnt[count].length);
|
842 |
|
|
sgpnt[count].address = tmp;
|
843 |
|
|
count--;
|
844 |
|
|
continue;
|
845 |
|
|
}
|
846 |
|
|
|
847 |
|
|
/* If we are allowed another sg chain, then increment
|
848 |
|
|
* counter so we can insert it. Otherwise we will end
|
849 |
|
|
up truncating */
|
850 |
|
|
|
851 |
|
|
if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
|
852 |
|
|
} /* contiguous buffers */
|
853 |
|
|
} /* for loop */
|
854 |
|
|
|
855 |
|
|
/* This is actually how many we are going to transfer */
|
856 |
|
|
this_count = counted;
|
857 |
|
|
|
858 |
|
|
if(count < SCpnt->use_sg || SCpnt->use_sg
|
859 |
|
|
> SCpnt->host->sg_tablesize){
|
860 |
|
|
bh = SCpnt->request.bh;
|
861 |
|
|
printk("Use sg, count %d %x %d\n",
|
862 |
|
|
SCpnt->use_sg, count, dma_free_sectors);
|
863 |
|
|
printk("maxsg = %x, counted = %d this_count = %d\n",
|
864 |
|
|
max_sg, counted, this_count);
|
865 |
|
|
while(bh){
|
866 |
|
|
printk("[%p %lx] ", bh->b_data, bh->b_size);
|
867 |
|
|
bh = bh->b_reqnext;
|
868 |
|
|
}
|
869 |
|
|
if(SCpnt->use_sg < 16)
|
870 |
|
|
for(count=0; count<SCpnt->use_sg; count++)
|
871 |
|
|
printk("{%d:%p %p %d} ", count,
|
872 |
|
|
sgpnt[count].address,
|
873 |
|
|
sgpnt[count].alt_address,
|
874 |
|
|
sgpnt[count].length);
|
875 |
|
|
panic("Ooops");
|
876 |
|
|
}
|
877 |
|
|
|
878 |
|
|
if (SCpnt->request.cmd == WRITE)
|
879 |
|
|
for(count=0; count<SCpnt->use_sg; count++)
|
880 |
|
|
if(sgpnt[count].alt_address)
|
881 |
|
|
memcpy(sgpnt[count].address, sgpnt[count].alt_address,
|
882 |
|
|
sgpnt[count].length);
|
883 |
|
|
} /* Able to malloc sgpnt */
|
884 |
|
|
} /* Host adapter capable of scatter-gather */
|
885 |
|
|
|
886 |
|
|
/* Now handle the possibility of DMA to addresses > 16Mb */
|
887 |
|
|
|
888 |
|
|
if(SCpnt->use_sg == 0){
|
889 |
|
|
if (((long) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD &&
|
890 |
|
|
(SCpnt->host->unchecked_isa_dma)) {
|
891 |
|
|
if(bounce_buffer)
|
892 |
|
|
buff = bounce_buffer;
|
893 |
|
|
else
|
894 |
|
|
buff = (char *) scsi_malloc(this_count << 9);
|
895 |
|
|
if(buff == NULL) { /* Try backing off a bit if we are low on mem*/
|
896 |
|
|
this_count = SCpnt->request.current_nr_sectors;
|
897 |
|
|
buff = (char *) scsi_malloc(this_count << 9);
|
898 |
|
|
if(!buff) panic("Ran out of DMA buffers.");
|
899 |
|
|
}
|
900 |
|
|
if (SCpnt->request.cmd == WRITE)
|
901 |
|
|
memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
|
902 |
|
|
}
|
903 |
|
|
}
|
904 |
|
|
#ifdef DEBUG
|
905 |
|
|
printk("sd%c : %s %d/%d 512 byte blocks.\n",
|
906 |
|
|
'a' + devm,
|
907 |
|
|
(SCpnt->request.cmd == WRITE) ? "writing" : "reading",
|
908 |
|
|
this_count, SCpnt->request.nr_sectors);
|
909 |
|
|
#endif
|
910 |
|
|
|
911 |
|
|
cmd[1] = (SCpnt->lun << 5) & 0xe0;
|
912 |
|
|
|
913 |
|
|
if (rscsi_disks[dev].sector_size == 1024){
|
914 |
|
|
if(block & 1) panic("sd.c:Bad block number requested");
|
915 |
|
|
if(this_count & 1) panic("sd.c:Bad block number requested");
|
916 |
|
|
block = block >> 1;
|
917 |
|
|
this_count = this_count >> 1;
|
918 |
|
|
}
|
919 |
|
|
|
920 |
|
|
if (rscsi_disks[dev].sector_size == 256){
|
921 |
|
|
block = block << 1;
|
922 |
|
|
this_count = this_count << 1;
|
923 |
|
|
}
|
924 |
|
|
|
925 |
|
|
if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten)
|
926 |
|
|
{
|
927 |
|
|
if (this_count > 0xffff)
|
928 |
|
|
this_count = 0xffff;
|
929 |
|
|
|
930 |
|
|
cmd[0] += READ_10 - READ_6 ;
|
931 |
|
|
cmd[2] = (unsigned char) (block >> 24) & 0xff;
|
932 |
|
|
cmd[3] = (unsigned char) (block >> 16) & 0xff;
|
933 |
|
|
cmd[4] = (unsigned char) (block >> 8) & 0xff;
|
934 |
|
|
cmd[5] = (unsigned char) block & 0xff;
|
935 |
|
|
cmd[6] = cmd[9] = 0;
|
936 |
|
|
cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
|
937 |
|
|
cmd[8] = (unsigned char) this_count & 0xff;
|
938 |
|
|
}
|
939 |
|
|
else
|
940 |
|
|
{
|
941 |
|
|
if (this_count > 0xff)
|
942 |
|
|
this_count = 0xff;
|
943 |
|
|
|
944 |
|
|
cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
|
945 |
|
|
cmd[2] = (unsigned char) ((block >> 8) & 0xff);
|
946 |
|
|
cmd[3] = (unsigned char) block & 0xff;
|
947 |
|
|
cmd[4] = (unsigned char) this_count;
|
948 |
|
|
cmd[5] = 0;
|
949 |
|
|
}
|
950 |
|
|
|
951 |
|
|
/*
|
952 |
|
|
* We shouldn't disconnect in the middle of a sector, so with a dumb
|
953 |
|
|
* host adapter, it's safe to assume that we can at least transfer
|
954 |
|
|
* this many bytes between each connect / disconnect.
|
955 |
|
|
*/
|
956 |
|
|
|
957 |
|
|
SCpnt->transfersize = rscsi_disks[dev].sector_size;
|
958 |
|
|
SCpnt->underflow = this_count << 9;
|
959 |
|
|
scsi_do_cmd (SCpnt, (void *) cmd, buff,
|
960 |
|
|
this_count * rscsi_disks[dev].sector_size,
|
961 |
|
|
rw_intr,
|
962 |
|
|
(SCpnt->device->type == TYPE_DISK ?
|
963 |
|
|
SD_TIMEOUT : SD_MOD_TIMEOUT),
|
964 |
|
|
MAX_RETRIES);
|
965 |
|
|
}
|
966 |
|
|
|
967 |
|
|
static int check_scsidisk_media_change(kdev_t full_dev){
|
968 |
|
|
int retval;
|
969 |
|
|
int target;
|
970 |
|
|
struct inode inode;
|
971 |
|
|
int flag = 0;
|
972 |
|
|
|
973 |
|
|
target = DEVICE_NR(full_dev);
|
974 |
|
|
|
975 |
|
|
if (target >= sd_template.dev_max ||
|
976 |
|
|
!rscsi_disks[target].device) {
|
977 |
|
|
printk("SCSI disk request error: invalid device.\n");
|
978 |
|
|
return 0;
|
979 |
|
|
}
|
980 |
|
|
|
981 |
|
|
if(!rscsi_disks[target].device->removable) return 0;
|
982 |
|
|
|
983 |
|
|
inode.i_rdev = full_dev; /* This is all we really need here */
|
984 |
|
|
|
985 |
|
|
/* Using Start/Stop enables differentiation between drive with
|
986 |
|
|
* no cartridge loaded - NOT READY, drive with changed cartridge -
|
987 |
|
|
* UNIT ATTENTION, or with same cartridge - GOOD STATUS.
|
988 |
|
|
* This also handles drives that auto spin down. eg iomega jaz 1GB
|
989 |
|
|
* as this will spin up the drive.
|
990 |
|
|
*/
|
991 |
|
|
retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_START_UNIT, 0);
|
992 |
|
|
|
993 |
|
|
if(retval){ /* Unable to test, unit probably not ready. This usually
|
994 |
|
|
* means there is no disc in the drive. Mark as changed,
|
995 |
|
|
* and we will figure it out later once the drive is
|
996 |
|
|
* available again. */
|
997 |
|
|
|
998 |
|
|
rscsi_disks[target].ready = 0;
|
999 |
|
|
rscsi_disks[target].device->changed = 1;
|
1000 |
|
|
return 1; /* This will force a flush, if called from
|
1001 |
|
|
* check_disk_change */
|
1002 |
|
|
}
|
1003 |
|
|
|
1004 |
|
|
/*
|
1005 |
|
|
* for removable scsi disk ( FLOPTICAL ) we have to recognise the
|
1006 |
|
|
* presence of disk in the drive. This is kept in the Scsi_Disk
|
1007 |
|
|
* struct and tested at open ! Daniel Roche ( dan@lectra.fr )
|
1008 |
|
|
*/
|
1009 |
|
|
|
1010 |
|
|
rscsi_disks[target].ready = 1; /* FLOPTICAL */
|
1011 |
|
|
|
1012 |
|
|
retval = rscsi_disks[target].device->changed;
|
1013 |
|
|
if(!flag) rscsi_disks[target].device->changed = 0;
|
1014 |
|
|
return retval;
|
1015 |
|
|
}
|
1016 |
|
|
|
1017 |
|
|
static void sd_init_done (Scsi_Cmnd * SCpnt)
|
1018 |
|
|
{
|
1019 |
|
|
struct request * req;
|
1020 |
|
|
|
1021 |
|
|
req = &SCpnt->request;
|
1022 |
|
|
req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
|
1023 |
|
|
|
1024 |
|
|
if (req->sem != NULL) {
|
1025 |
|
|
up(req->sem);
|
1026 |
|
|
}
|
1027 |
|
|
}
|
1028 |
|
|
|
1029 |
|
|
static int sd_init_onedisk(int i)
|
1030 |
|
|
{
|
1031 |
|
|
unsigned char cmd[10];
|
1032 |
|
|
unsigned char *buffer;
|
1033 |
|
|
unsigned long spintime;
|
1034 |
|
|
int the_result, retries;
|
1035 |
|
|
Scsi_Cmnd * SCpnt;
|
1036 |
|
|
|
1037 |
|
|
/* We need to retry the READ_CAPACITY because a UNIT_ATTENTION is
|
1038 |
|
|
* considered a fatal error, and many devices report such an error
|
1039 |
|
|
* just after a scsi bus reset.
|
1040 |
|
|
*/
|
1041 |
|
|
|
1042 |
|
|
SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
|
1043 |
|
|
buffer = (unsigned char *) scsi_malloc(512);
|
1044 |
|
|
|
1045 |
|
|
spintime = 0;
|
1046 |
|
|
|
1047 |
|
|
/* Spin up drives, as required. Only do this at boot time */
|
1048 |
|
|
/* Spinup needs to be done for module loads too. */
|
1049 |
|
|
do{
|
1050 |
|
|
retries = 0;
|
1051 |
|
|
while(retries < 3)
|
1052 |
|
|
{
|
1053 |
|
|
cmd[0] = TEST_UNIT_READY;
|
1054 |
|
|
cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
|
1055 |
|
|
memset ((void *) &cmd[2], 0, 8);
|
1056 |
|
|
SCpnt->cmd_len = 0;
|
1057 |
|
|
SCpnt->sense_buffer[0] = 0;
|
1058 |
|
|
SCpnt->sense_buffer[2] = 0;
|
1059 |
|
|
|
1060 |
|
|
{
|
1061 |
|
|
struct semaphore sem = MUTEX_LOCKED;
|
1062 |
|
|
/* Mark as really busy again */
|
1063 |
|
|
SCpnt->request.rq_status = RQ_SCSI_BUSY;
|
1064 |
|
|
SCpnt->request.sem = &sem;
|
1065 |
|
|
scsi_do_cmd (SCpnt,
|
1066 |
|
|
(void *) cmd, (void *) buffer,
|
1067 |
|
|
512, sd_init_done, SD_TIMEOUT,
|
1068 |
|
|
MAX_RETRIES);
|
1069 |
|
|
down(&sem);
|
1070 |
|
|
}
|
1071 |
|
|
|
1072 |
|
|
the_result = SCpnt->result;
|
1073 |
|
|
retries++;
|
1074 |
|
|
if( the_result == 0
|
1075 |
|
|
|| SCpnt->sense_buffer[2] != UNIT_ATTENTION)
|
1076 |
|
|
break;
|
1077 |
|
|
}
|
1078 |
|
|
|
1079 |
|
|
/* Look for non-removable devices that return NOT_READY.
|
1080 |
|
|
* Issue command to spin up drive for these cases. */
|
1081 |
|
|
if(the_result && !rscsi_disks[i].device->removable &&
|
1082 |
|
|
SCpnt->sense_buffer[2] == NOT_READY) {
|
1083 |
|
|
unsigned long time1;
|
1084 |
|
|
if(!spintime){
|
1085 |
|
|
printk( "sd%c: Spinning up disk...", 'a' + i );
|
1086 |
|
|
cmd[0] = START_STOP;
|
1087 |
|
|
cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
|
1088 |
|
|
cmd[1] |= 1; /* Return immediately */
|
1089 |
|
|
memset ((void *) &cmd[2], 0, 8);
|
1090 |
|
|
cmd[4] = 1; /* Start spin cycle */
|
1091 |
|
|
SCpnt->cmd_len = 0;
|
1092 |
|
|
SCpnt->sense_buffer[0] = 0;
|
1093 |
|
|
SCpnt->sense_buffer[2] = 0;
|
1094 |
|
|
|
1095 |
|
|
{
|
1096 |
|
|
struct semaphore sem = MUTEX_LOCKED;
|
1097 |
|
|
/* Mark as really busy again */
|
1098 |
|
|
SCpnt->request.rq_status = RQ_SCSI_BUSY;
|
1099 |
|
|
SCpnt->request.sem = &sem;
|
1100 |
|
|
scsi_do_cmd (SCpnt,
|
1101 |
|
|
(void *) cmd, (void *) buffer,
|
1102 |
|
|
512, sd_init_done, SD_TIMEOUT,
|
1103 |
|
|
MAX_RETRIES);
|
1104 |
|
|
down(&sem);
|
1105 |
|
|
}
|
1106 |
|
|
|
1107 |
|
|
spintime = jiffies;
|
1108 |
|
|
}
|
1109 |
|
|
|
1110 |
|
|
time1 = jiffies + HZ;
|
1111 |
|
|
while(jiffies < time1); /* Wait 1 second for next try */
|
1112 |
|
|
printk( "." );
|
1113 |
|
|
}
|
1114 |
|
|
} while(the_result && spintime && spintime+100*HZ > jiffies);
|
1115 |
|
|
if (spintime) {
|
1116 |
|
|
if (the_result)
|
1117 |
|
|
printk( "not responding...\n" );
|
1118 |
|
|
else
|
1119 |
|
|
printk( "ready\n" );
|
1120 |
|
|
}
|
1121 |
|
|
|
1122 |
|
|
retries = 3;
|
1123 |
|
|
do {
|
1124 |
|
|
cmd[0] = READ_CAPACITY;
|
1125 |
|
|
cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
|
1126 |
|
|
memset ((void *) &cmd[2], 0, 8);
|
1127 |
|
|
memset ((void *) buffer, 0, 8);
|
1128 |
|
|
SCpnt->cmd_len = 0;
|
1129 |
|
|
SCpnt->sense_buffer[0] = 0;
|
1130 |
|
|
SCpnt->sense_buffer[2] = 0;
|
1131 |
|
|
|
1132 |
|
|
{
|
1133 |
|
|
struct semaphore sem = MUTEX_LOCKED;
|
1134 |
|
|
/* Mark as really busy again */
|
1135 |
|
|
SCpnt->request.rq_status = RQ_SCSI_BUSY;
|
1136 |
|
|
SCpnt->request.sem = &sem;
|
1137 |
|
|
scsi_do_cmd (SCpnt,
|
1138 |
|
|
(void *) cmd, (void *) buffer,
|
1139 |
|
|
8, sd_init_done, SD_TIMEOUT,
|
1140 |
|
|
MAX_RETRIES);
|
1141 |
|
|
down(&sem); /* sleep until it is ready */
|
1142 |
|
|
}
|
1143 |
|
|
|
1144 |
|
|
the_result = SCpnt->result;
|
1145 |
|
|
retries--;
|
1146 |
|
|
|
1147 |
|
|
} while(the_result && retries);
|
1148 |
|
|
|
1149 |
|
|
SCpnt->request.rq_status = RQ_INACTIVE; /* Mark as not busy */
|
1150 |
|
|
|
1151 |
|
|
wake_up(&SCpnt->device->device_wait);
|
1152 |
|
|
|
1153 |
|
|
/* Wake up a process waiting for device */
|
1154 |
|
|
|
1155 |
|
|
/*
|
1156 |
|
|
* The SCSI standard says:
|
1157 |
|
|
* "READ CAPACITY is necessary for self configuring software"
|
1158 |
|
|
* While not mandatory, support of READ CAPACITY is strongly encouraged.
|
1159 |
|
|
* We used to die if we couldn't successfully do a READ CAPACITY.
|
1160 |
|
|
* But, now we go on about our way. The side effects of this are
|
1161 |
|
|
*
|
1162 |
|
|
* 1. We can't know block size with certainty. I have said "512 bytes
|
1163 |
|
|
* is it" as this is most common.
|
1164 |
|
|
*
|
1165 |
|
|
* 2. Recovery from when some one attempts to read past the end of the
|
1166 |
|
|
* raw device will be slower.
|
1167 |
|
|
*/
|
1168 |
|
|
|
1169 |
|
|
if (the_result)
|
1170 |
|
|
{
|
1171 |
|
|
printk ("sd%c : READ CAPACITY failed.\n"
|
1172 |
|
|
"sd%c : status = %x, message = %02x, host = %d, driver = %02x \n",
|
1173 |
|
|
'a' + i, 'a' + i,
|
1174 |
|
|
status_byte(the_result),
|
1175 |
|
|
msg_byte(the_result),
|
1176 |
|
|
host_byte(the_result),
|
1177 |
|
|
driver_byte(the_result)
|
1178 |
|
|
);
|
1179 |
|
|
if (driver_byte(the_result) & DRIVER_SENSE)
|
1180 |
|
|
printk("sd%c : extended sense code = %1x \n",
|
1181 |
|
|
'a' + i, SCpnt->sense_buffer[2] & 0xf);
|
1182 |
|
|
else
|
1183 |
|
|
printk("sd%c : sense not available. \n", 'a' + i);
|
1184 |
|
|
|
1185 |
|
|
printk("sd%c : block size assumed to be 512 bytes, disk size 1GB. \n",
|
1186 |
|
|
'a' + i);
|
1187 |
|
|
rscsi_disks[i].capacity = 0x1fffff;
|
1188 |
|
|
rscsi_disks[i].sector_size = 512;
|
1189 |
|
|
|
1190 |
|
|
/* Set dirty bit for removable devices if not ready - sometimes drives
|
1191 |
|
|
* will not report this properly. */
|
1192 |
|
|
if(rscsi_disks[i].device->removable &&
|
1193 |
|
|
SCpnt->sense_buffer[2] == NOT_READY)
|
1194 |
|
|
rscsi_disks[i].device->changed = 1;
|
1195 |
|
|
|
1196 |
|
|
}
|
1197 |
|
|
else
|
1198 |
|
|
{
|
1199 |
|
|
/*
|
1200 |
|
|
* FLOPTICAL , if read_capa is ok , drive is assumed to be ready
|
1201 |
|
|
*/
|
1202 |
|
|
rscsi_disks[i].ready = 1;
|
1203 |
|
|
|
1204 |
|
|
rscsi_disks[i].capacity = 1 + ((buffer[0] << 24) |
|
1205 |
|
|
(buffer[1] << 16) |
|
1206 |
|
|
(buffer[2] << 8) |
|
1207 |
|
|
buffer[3]);
|
1208 |
|
|
|
1209 |
|
|
rscsi_disks[i].sector_size = (buffer[4] << 24) |
|
1210 |
|
|
(buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
|
1211 |
|
|
|
1212 |
|
|
if (rscsi_disks[i].sector_size == 0) {
|
1213 |
|
|
rscsi_disks[i].sector_size = 512;
|
1214 |
|
|
printk("sd%c : sector size 0 reported, assuming 512.\n", 'a' + i);
|
1215 |
|
|
}
|
1216 |
|
|
|
1217 |
|
|
|
1218 |
|
|
if (rscsi_disks[i].sector_size != 512 &&
|
1219 |
|
|
rscsi_disks[i].sector_size != 1024 &&
|
1220 |
|
|
rscsi_disks[i].sector_size != 256)
|
1221 |
|
|
{
|
1222 |
|
|
printk ("sd%c : unsupported sector size %d.\n",
|
1223 |
|
|
'a' + i, rscsi_disks[i].sector_size);
|
1224 |
|
|
if(rscsi_disks[i].device->removable){
|
1225 |
|
|
rscsi_disks[i].capacity = 0;
|
1226 |
|
|
} else {
|
1227 |
|
|
printk ("scsi : deleting disk entry.\n");
|
1228 |
|
|
rscsi_disks[i].device = NULL;
|
1229 |
|
|
sd_template.nr_dev--;
|
1230 |
|
|
sd_gendisk.nr_real--;
|
1231 |
|
|
return i;
|
1232 |
|
|
}
|
1233 |
|
|
}
|
1234 |
|
|
{
|
1235 |
|
|
/*
|
1236 |
|
|
* The msdos fs needs to know the hardware sector size
|
1237 |
|
|
* So I have created this table. See ll_rw_blk.c
|
1238 |
|
|
* Jacques Gelinas (Jacques@solucorp.qc.ca)
|
1239 |
|
|
*/
|
1240 |
|
|
int m, mb;
|
1241 |
|
|
int sz_quot, sz_rem;
|
1242 |
|
|
int hard_sector = rscsi_disks[i].sector_size;
|
1243 |
|
|
/* There are 16 minors allocated for each major device */
|
1244 |
|
|
for (m=i<<4; m<((i+1)<<4); m++){
|
1245 |
|
|
sd_hardsizes[m] = hard_sector;
|
1246 |
|
|
}
|
1247 |
|
|
mb = rscsi_disks[i].capacity / 1024 * hard_sector / 1024;
|
1248 |
|
|
/* sz = div(m/100, 10); this seems to not be in the libr */
|
1249 |
|
|
m = (mb + 50) / 100;
|
1250 |
|
|
sz_quot = m / 10;
|
1251 |
|
|
sz_rem = m - (10 * sz_quot);
|
1252 |
|
|
printk ("SCSI device sd%c: hdwr sector= %d bytes."
|
1253 |
|
|
" Sectors= %d [%d MB] [%d.%1d GB]\n",
|
1254 |
|
|
i+'a', hard_sector, rscsi_disks[i].capacity,
|
1255 |
|
|
mb, sz_quot, sz_rem);
|
1256 |
|
|
}
|
1257 |
|
|
if(rscsi_disks[i].sector_size == 1024)
|
1258 |
|
|
rscsi_disks[i].capacity <<= 1; /* Change into 512 byte sectors */
|
1259 |
|
|
if(rscsi_disks[i].sector_size == 256)
|
1260 |
|
|
rscsi_disks[i].capacity >>= 1; /* Change into 512 byte sectors */
|
1261 |
|
|
}
|
1262 |
|
|
|
1263 |
|
|
|
1264 |
|
|
/*
|
1265 |
|
|
* Unless otherwise specified, this is not write protected.
|
1266 |
|
|
*/
|
1267 |
|
|
rscsi_disks[i].write_prot = 0;
|
1268 |
|
|
if ( rscsi_disks[i].device->removable && rscsi_disks[i].ready ) {
|
1269 |
|
|
/* FLOPTICAL */
|
1270 |
|
|
|
1271 |
|
|
/*
|
1272 |
|
|
* for removable scsi disk ( FLOPTICAL ) we have to recognise
|
1273 |
|
|
* the Write Protect Flag. This flag is kept in the Scsi_Disk struct
|
1274 |
|
|
* and tested at open !
|
1275 |
|
|
* Daniel Roche ( dan@lectra.fr )
|
1276 |
|
|
*/
|
1277 |
|
|
|
1278 |
|
|
memset ((void *) &cmd[0], 0, 8);
|
1279 |
|
|
cmd[0] = MODE_SENSE;
|
1280 |
|
|
cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
|
1281 |
|
|
cmd[2] = 1; /* page code 1 ?? */
|
1282 |
|
|
cmd[4] = 12;
|
1283 |
|
|
SCpnt->cmd_len = 0;
|
1284 |
|
|
SCpnt->sense_buffer[0] = 0;
|
1285 |
|
|
SCpnt->sense_buffer[2] = 0;
|
1286 |
|
|
|
1287 |
|
|
/* same code as READCAPA !! */
|
1288 |
|
|
{
|
1289 |
|
|
struct semaphore sem = MUTEX_LOCKED;
|
1290 |
|
|
SCpnt->request.rq_status = RQ_SCSI_BUSY; /* Mark as really busy again */
|
1291 |
|
|
SCpnt->request.sem = &sem;
|
1292 |
|
|
scsi_do_cmd (SCpnt,
|
1293 |
|
|
(void *) cmd, (void *) buffer,
|
1294 |
|
|
512, sd_init_done, SD_TIMEOUT,
|
1295 |
|
|
MAX_RETRIES);
|
1296 |
|
|
down(&sem);
|
1297 |
|
|
}
|
1298 |
|
|
|
1299 |
|
|
the_result = SCpnt->result;
|
1300 |
|
|
SCpnt->request.rq_status = RQ_INACTIVE; /* Mark as not busy */
|
1301 |
|
|
wake_up(&SCpnt->device->device_wait);
|
1302 |
|
|
|
1303 |
|
|
if ( the_result ) {
|
1304 |
|
|
printk ("sd%c: test WP failed, assume Write Protected\n",i+'a');
|
1305 |
|
|
rscsi_disks[i].write_prot = 1;
|
1306 |
|
|
} else {
|
1307 |
|
|
rscsi_disks[i].write_prot = ((buffer[2] & 0x80) != 0);
|
1308 |
|
|
printk ("sd%c: Write Protect is %s\n",i+'a',
|
1309 |
|
|
rscsi_disks[i].write_prot ? "on" : "off");
|
1310 |
|
|
}
|
1311 |
|
|
|
1312 |
|
|
} /* check for write protect */
|
1313 |
|
|
|
1314 |
|
|
rscsi_disks[i].ten = 1;
|
1315 |
|
|
rscsi_disks[i].remap = 1;
|
1316 |
|
|
scsi_free(buffer, 512);
|
1317 |
|
|
return i;
|
1318 |
|
|
}
|
1319 |
|
|
|
1320 |
|
|
/*
|
1321 |
|
|
* The sd_init() function looks at all SCSI drives present, determines
|
1322 |
|
|
* their size, and reads partition table entries for them.
|
1323 |
|
|
*/
|
1324 |
|
|
|
1325 |
|
|
static int sd_registered = 0;
|
1326 |
|
|
|
1327 |
|
|
static int sd_init()
|
1328 |
|
|
{
|
1329 |
|
|
int i;
|
1330 |
|
|
|
1331 |
|
|
if (sd_template.dev_noticed == 0) return 0;
|
1332 |
|
|
|
1333 |
|
|
if(!sd_registered) {
|
1334 |
|
|
if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
|
1335 |
|
|
printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
|
1336 |
|
|
return 1;
|
1337 |
|
|
}
|
1338 |
|
|
sd_registered++;
|
1339 |
|
|
}
|
1340 |
|
|
|
1341 |
|
|
/* We do not support attaching loadable devices yet. */
|
1342 |
|
|
if(rscsi_disks) return 0;
|
1343 |
|
|
|
1344 |
|
|
sd_template.dev_max = sd_template.dev_noticed + SD_EXTRA_DEVS;
|
1345 |
|
|
|
1346 |
|
|
rscsi_disks = (Scsi_Disk *)
|
1347 |
|
|
scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk), GFP_ATOMIC);
|
1348 |
|
|
memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk));
|
1349 |
|
|
|
1350 |
|
|
sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
|
1351 |
|
|
sizeof(int), GFP_ATOMIC);
|
1352 |
|
|
memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
|
1353 |
|
|
|
1354 |
|
|
sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
|
1355 |
|
|
sizeof(int), GFP_ATOMIC);
|
1356 |
|
|
|
1357 |
|
|
sd_hardsizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
|
1358 |
|
|
sizeof(int), GFP_ATOMIC);
|
1359 |
|
|
|
1360 |
|
|
for(i=0;i<(sd_template.dev_max << 4);i++){
|
1361 |
|
|
sd_blocksizes[i] = 1024;
|
1362 |
|
|
sd_hardsizes[i] = 512;
|
1363 |
|
|
}
|
1364 |
|
|
blksize_size[MAJOR_NR] = sd_blocksizes;
|
1365 |
|
|
hardsect_size[MAJOR_NR] = sd_hardsizes;
|
1366 |
|
|
sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) *
|
1367 |
|
|
sizeof(struct hd_struct),
|
1368 |
|
|
GFP_ATOMIC);
|
1369 |
|
|
|
1370 |
|
|
|
1371 |
|
|
sd_gendisk.max_nr = sd_template.dev_max;
|
1372 |
|
|
sd_gendisk.part = sd;
|
1373 |
|
|
sd_gendisk.sizes = sd_sizes;
|
1374 |
|
|
sd_gendisk.real_devices = (void *) rscsi_disks;
|
1375 |
|
|
return 0;
|
1376 |
|
|
}
|
1377 |
|
|
|
1378 |
|
|
static void sd_finish(void)
|
1379 |
|
|
{
|
1380 |
|
|
struct gendisk *gendisk;
|
1381 |
|
|
int i;
|
1382 |
|
|
|
1383 |
|
|
blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
|
1384 |
|
|
|
1385 |
|
|
for (gendisk = gendisk_head; gendisk != NULL; gendisk = gendisk->next)
|
1386 |
|
|
if (gendisk == &sd_gendisk)
|
1387 |
|
|
break;
|
1388 |
|
|
if (gendisk == NULL)
|
1389 |
|
|
{
|
1390 |
|
|
sd_gendisk.next = gendisk_head;
|
1391 |
|
|
gendisk_head = &sd_gendisk;
|
1392 |
|
|
}
|
1393 |
|
|
|
1394 |
|
|
for (i = 0; i < sd_template.dev_max; ++i)
|
1395 |
|
|
if (!rscsi_disks[i].capacity &&
|
1396 |
|
|
rscsi_disks[i].device)
|
1397 |
|
|
{
|
1398 |
|
|
if (MODULE_FLAG
|
1399 |
|
|
&& !rscsi_disks[i].has_part_table) {
|
1400 |
|
|
sd_sizes[i << 4] = rscsi_disks[i].capacity;
|
1401 |
|
|
/* revalidate does sd_init_onedisk via MAYBE_REINIT*/
|
1402 |
|
|
revalidate_scsidisk(MKDEV(MAJOR_NR, i << 4), 0);
|
1403 |
|
|
}
|
1404 |
|
|
else
|
1405 |
|
|
i=sd_init_onedisk(i);
|
1406 |
|
|
rscsi_disks[i].has_part_table = 1;
|
1407 |
|
|
}
|
1408 |
|
|
|
1409 |
|
|
/* If our host adapter is capable of scatter-gather, then we increase
|
1410 |
|
|
* the read-ahead to 16 blocks (32 sectors). If not, we use
|
1411 |
|
|
* a two block (4 sector) read ahead.
|
1412 |
|
|
*/
|
1413 |
|
|
if(rscsi_disks[0].device && rscsi_disks[0].device->host->sg_tablesize)
|
1414 |
|
|
read_ahead[MAJOR_NR] = 120; /* 120 sector read-ahead */
|
1415 |
|
|
else
|
1416 |
|
|
read_ahead[MAJOR_NR] = 4; /* 4 sector read-ahead */
|
1417 |
|
|
|
1418 |
|
|
return;
|
1419 |
|
|
}
|
1420 |
|
|
|
1421 |
|
|
static int sd_detect(Scsi_Device * SDp){
|
1422 |
|
|
if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
|
1423 |
|
|
|
1424 |
|
|
printk("Detected scsi %sdisk sd%c at scsi%d, channel %d, id %d, lun %d\n",
|
1425 |
|
|
SDp->removable ? "removable " : "",
|
1426 |
|
|
'a'+ (sd_template.dev_noticed++),
|
1427 |
|
|
SDp->host->host_no, SDp->channel, SDp->id, SDp->lun);
|
1428 |
|
|
|
1429 |
|
|
return 1;
|
1430 |
|
|
}
|
1431 |
|
|
|
1432 |
|
|
static int sd_attach(Scsi_Device * SDp){
|
1433 |
|
|
Scsi_Disk * dpnt;
|
1434 |
|
|
int i;
|
1435 |
|
|
|
1436 |
|
|
if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
|
1437 |
|
|
|
1438 |
|
|
if(sd_template.nr_dev >= sd_template.dev_max) {
|
1439 |
|
|
SDp->attached--;
|
1440 |
|
|
return 1;
|
1441 |
|
|
}
|
1442 |
|
|
|
1443 |
|
|
for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
|
1444 |
|
|
if(!dpnt->device) break;
|
1445 |
|
|
|
1446 |
|
|
if(i >= sd_template.dev_max) panic ("scsi_devices corrupt (sd)");
|
1447 |
|
|
|
1448 |
|
|
SDp->scsi_request_fn = do_sd_request;
|
1449 |
|
|
rscsi_disks[i].device = SDp;
|
1450 |
|
|
rscsi_disks[i].has_part_table = 0;
|
1451 |
|
|
sd_template.nr_dev++;
|
1452 |
|
|
sd_gendisk.nr_real++;
|
1453 |
|
|
return 0;
|
1454 |
|
|
}
|
1455 |
|
|
|
1456 |
|
|
#define DEVICE_BUSY rscsi_disks[target].device->busy
|
1457 |
|
|
#define USAGE rscsi_disks[target].device->access_count
|
1458 |
|
|
#define CAPACITY rscsi_disks[target].capacity
|
1459 |
|
|
#define MAYBE_REINIT sd_init_onedisk(target)
|
1460 |
|
|
#define GENDISK_STRUCT sd_gendisk
|
1461 |
|
|
|
1462 |
|
|
/* This routine is called to flush all partitions and partition tables
|
1463 |
|
|
* for a changed scsi disk, and then re-read the new partition table.
|
1464 |
|
|
* If we are revalidating a disk because of a media change, then we
|
1465 |
|
|
* enter with usage == 0. If we are using an ioctl, we automatically have
|
1466 |
|
|
* usage == 1 (we need an open channel to use an ioctl :-), so this
|
1467 |
|
|
* is our limit.
|
1468 |
|
|
*/
|
1469 |
|
|
int revalidate_scsidisk(kdev_t dev, int maxusage){
|
1470 |
|
|
int target;
|
1471 |
|
|
struct gendisk * gdev;
|
1472 |
|
|
unsigned long flags;
|
1473 |
|
|
int max_p;
|
1474 |
|
|
int start;
|
1475 |
|
|
int i;
|
1476 |
|
|
|
1477 |
|
|
target = DEVICE_NR(dev);
|
1478 |
|
|
gdev = &GENDISK_STRUCT;
|
1479 |
|
|
|
1480 |
|
|
save_flags(flags);
|
1481 |
|
|
cli();
|
1482 |
|
|
if (DEVICE_BUSY || USAGE > maxusage) {
|
1483 |
|
|
restore_flags(flags);
|
1484 |
|
|
printk("Device busy for revalidation (usage=%d)\n", USAGE);
|
1485 |
|
|
return -EBUSY;
|
1486 |
|
|
}
|
1487 |
|
|
DEVICE_BUSY = 1;
|
1488 |
|
|
restore_flags(flags);
|
1489 |
|
|
|
1490 |
|
|
max_p = gdev->max_p;
|
1491 |
|
|
start = target << gdev->minor_shift;
|
1492 |
|
|
|
1493 |
|
|
for (i=max_p - 1; i >=0 ; i--) {
|
1494 |
|
|
int minor = start+i;
|
1495 |
|
|
kdev_t devi = MKDEV(MAJOR_NR, minor);
|
1496 |
|
|
sync_dev(devi);
|
1497 |
|
|
invalidate_inodes(devi);
|
1498 |
|
|
invalidate_buffers(devi);
|
1499 |
|
|
gdev->part[minor].start_sect = 0;
|
1500 |
|
|
gdev->part[minor].nr_sects = 0;
|
1501 |
|
|
/*
|
1502 |
|
|
* Reset the blocksize for everything so that we can read
|
1503 |
|
|
* the partition table.
|
1504 |
|
|
*/
|
1505 |
|
|
blksize_size[MAJOR_NR][minor] = 1024;
|
1506 |
|
|
}
|
1507 |
|
|
|
1508 |
|
|
#ifdef MAYBE_REINIT
|
1509 |
|
|
MAYBE_REINIT;
|
1510 |
|
|
#endif
|
1511 |
|
|
|
1512 |
|
|
gdev->part[start].nr_sects = CAPACITY;
|
1513 |
|
|
resetup_one_dev(gdev, target);
|
1514 |
|
|
|
1515 |
|
|
DEVICE_BUSY = 0;
|
1516 |
|
|
return 0;
|
1517 |
|
|
}
|
1518 |
|
|
|
1519 |
|
|
static int fop_revalidate_scsidisk(kdev_t dev){
|
1520 |
|
|
return revalidate_scsidisk(dev, 0);
|
1521 |
|
|
}
|
1522 |
|
|
|
1523 |
|
|
|
1524 |
|
|
static void sd_detach(Scsi_Device * SDp)
|
1525 |
|
|
{
|
1526 |
|
|
Scsi_Disk * dpnt;
|
1527 |
|
|
int i;
|
1528 |
|
|
int max_p;
|
1529 |
|
|
int start;
|
1530 |
|
|
|
1531 |
|
|
for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
|
1532 |
|
|
if(dpnt->device == SDp) {
|
1533 |
|
|
|
1534 |
|
|
/* If we are disconnecting a disk driver, sync and invalidate
|
1535 |
|
|
* everything */
|
1536 |
|
|
max_p = sd_gendisk.max_p;
|
1537 |
|
|
start = i << sd_gendisk.minor_shift;
|
1538 |
|
|
|
1539 |
|
|
for (i=max_p - 1; i >=0 ; i--) {
|
1540 |
|
|
int minor = start+i;
|
1541 |
|
|
kdev_t devi = MKDEV(MAJOR_NR, minor);
|
1542 |
|
|
sync_dev(devi);
|
1543 |
|
|
invalidate_inodes(devi);
|
1544 |
|
|
invalidate_buffers(devi);
|
1545 |
|
|
sd_gendisk.part[minor].start_sect = 0;
|
1546 |
|
|
sd_gendisk.part[minor].nr_sects = 0;
|
1547 |
|
|
sd_sizes[minor] = 0;
|
1548 |
|
|
}
|
1549 |
|
|
|
1550 |
|
|
dpnt->has_part_table = 0;
|
1551 |
|
|
dpnt->device = NULL;
|
1552 |
|
|
dpnt->capacity = 0;
|
1553 |
|
|
SDp->attached--;
|
1554 |
|
|
sd_template.dev_noticed--;
|
1555 |
|
|
sd_template.nr_dev--;
|
1556 |
|
|
sd_gendisk.nr_real--;
|
1557 |
|
|
return;
|
1558 |
|
|
}
|
1559 |
|
|
return;
|
1560 |
|
|
}
|
1561 |
|
|
|
1562 |
|
|
#ifdef MODULE
|
1563 |
|
|
|
1564 |
|
|
int init_module(void) {
|
1565 |
|
|
sd_template.usage_count = &mod_use_count_;
|
1566 |
|
|
return scsi_register_module(MODULE_SCSI_DEV, &sd_template);
|
1567 |
|
|
}
|
1568 |
|
|
|
1569 |
|
|
void cleanup_module( void)
|
1570 |
|
|
{
|
1571 |
|
|
struct gendisk * prev_sdgd;
|
1572 |
|
|
struct gendisk * sdgd;
|
1573 |
|
|
|
1574 |
|
|
scsi_unregister_module(MODULE_SCSI_DEV, &sd_template);
|
1575 |
|
|
unregister_blkdev(SCSI_DISK_MAJOR, "sd");
|
1576 |
|
|
sd_registered--;
|
1577 |
|
|
if( rscsi_disks != NULL )
|
1578 |
|
|
{
|
1579 |
|
|
scsi_init_free((char *) rscsi_disks,
|
1580 |
|
|
(sd_template.dev_noticed + SD_EXTRA_DEVS)
|
1581 |
|
|
* sizeof(Scsi_Disk));
|
1582 |
|
|
|
1583 |
|
|
scsi_init_free((char *) sd_sizes, sd_template.dev_max * sizeof(int));
|
1584 |
|
|
scsi_init_free((char *) sd_blocksizes, sd_template.dev_max * sizeof(int));
|
1585 |
|
|
scsi_init_free((char *) sd_hardsizes, sd_template.dev_max * sizeof(int));
|
1586 |
|
|
scsi_init_free((char *) sd,
|
1587 |
|
|
(sd_template.dev_max << 4) * sizeof(struct hd_struct));
|
1588 |
|
|
/*
|
1589 |
|
|
* Now remove sd_gendisk from the linked list
|
1590 |
|
|
*/
|
1591 |
|
|
sdgd = gendisk_head;
|
1592 |
|
|
prev_sdgd = NULL;
|
1593 |
|
|
while(sdgd != &sd_gendisk)
|
1594 |
|
|
{
|
1595 |
|
|
prev_sdgd = sdgd;
|
1596 |
|
|
sdgd = sdgd->next;
|
1597 |
|
|
}
|
1598 |
|
|
|
1599 |
|
|
if(sdgd != &sd_gendisk)
|
1600 |
|
|
printk("sd_gendisk not in disk chain.\n");
|
1601 |
|
|
else {
|
1602 |
|
|
if(prev_sdgd != NULL)
|
1603 |
|
|
prev_sdgd->next = sdgd->next;
|
1604 |
|
|
else
|
1605 |
|
|
gendisk_head = sdgd->next;
|
1606 |
|
|
}
|
1607 |
|
|
}
|
1608 |
|
|
|
1609 |
|
|
blksize_size[MAJOR_NR] = NULL;
|
1610 |
|
|
blk_dev[MAJOR_NR].request_fn = NULL;
|
1611 |
|
|
blk_size[MAJOR_NR] = NULL;
|
1612 |
|
|
hardsect_size[MAJOR_NR] = NULL;
|
1613 |
|
|
read_ahead[MAJOR_NR] = 0;
|
1614 |
|
|
sd_template.dev_max = 0;
|
1615 |
|
|
}
|
1616 |
|
|
#endif /* MODULE */
|
1617 |
|
|
|
1618 |
|
|
/*
|
1619 |
|
|
* Overrides for Emacs so that we almost follow Linus's tabbing style.
|
1620 |
|
|
* Emacs will notice this stuff at the end of the file and automatically
|
1621 |
|
|
* adjust the settings for this buffer only. This must remain at the end
|
1622 |
|
|
* of the file.
|
1623 |
|
|
* ---------------------------------------------------------------------------
|
1624 |
|
|
* Local variables:
|
1625 |
|
|
* c-indent-level: 4
|
1626 |
|
|
* c-brace-imaginary-offset: 0
|
1627 |
|
|
* c-brace-offset: -4
|
1628 |
|
|
* c-argdecl-indent: 4
|
1629 |
|
|
* c-label-offset: -4
|
1630 |
|
|
* c-continued-statement-offset: 4
|
1631 |
|
|
* c-continued-brace-offset: 0
|
1632 |
|
|
* indent-tabs-mode: nil
|
1633 |
|
|
* tab-width: 8
|
1634 |
|
|
* End:
|
1635 |
|
|
*/
|