1 |
1275 |
phoenix |
/*
|
2 |
|
|
* scsi_lib.c Copyright (C) 1999 Eric Youngdale
|
3 |
|
|
*
|
4 |
|
|
* SCSI queueing library.
|
5 |
|
|
* Initial versions: Eric Youngdale (eric@andante.org).
|
6 |
|
|
* Based upon conversations with large numbers
|
7 |
|
|
* of people at Linux Expo.
|
8 |
|
|
*/
|
9 |
|
|
|
10 |
|
|
/*
|
11 |
|
|
* The fundamental purpose of this file is to contain a library of utility
|
12 |
|
|
* routines that can be used by low-level drivers. Ultimately the idea
|
13 |
|
|
* is that there should be a sufficiently rich number of functions that it
|
14 |
|
|
* would be possible for a driver author to fashion a queueing function for
|
15 |
|
|
* a low-level driver if they wished. Note however that this file also
|
16 |
|
|
* contains the "default" versions of these functions, as we don't want to
|
17 |
|
|
* go through and retrofit queueing functions into all 30 some-odd drivers.
|
18 |
|
|
*/
|
19 |
|
|
|
20 |
|
|
#define __NO_VERSION__
|
21 |
|
|
#include <linux/module.h>
|
22 |
|
|
|
23 |
|
|
#include <linux/sched.h>
|
24 |
|
|
#include <linux/timer.h>
|
25 |
|
|
#include <linux/string.h>
|
26 |
|
|
#include <linux/slab.h>
|
27 |
|
|
#include <linux/ioport.h>
|
28 |
|
|
#include <linux/kernel.h>
|
29 |
|
|
#include <linux/stat.h>
|
30 |
|
|
#include <linux/blk.h>
|
31 |
|
|
#include <linux/interrupt.h>
|
32 |
|
|
#include <linux/delay.h>
|
33 |
|
|
#include <linux/smp_lock.h>
|
34 |
|
|
#include <linux/completion.h>
|
35 |
|
|
|
36 |
|
|
|
37 |
|
|
#define __KERNEL_SYSCALLS__
|
38 |
|
|
|
39 |
|
|
#include <linux/unistd.h>
|
40 |
|
|
|
41 |
|
|
#include <asm/system.h>
|
42 |
|
|
#include <asm/irq.h>
|
43 |
|
|
#include <asm/dma.h>
|
44 |
|
|
|
45 |
|
|
#include "scsi.h"
|
46 |
|
|
#include "hosts.h"
|
47 |
|
|
#include "constants.h"
|
48 |
|
|
#include <scsi/scsi_ioctl.h>
|
49 |
|
|
|
50 |
|
|
/*
|
51 |
|
|
* This entire source file deals with the new queueing code.
|
52 |
|
|
*/
|
53 |
|
|
|
54 |
|
|
/*
|
55 |
|
|
* Function: __scsi_insert_special()
|
56 |
|
|
*
|
57 |
|
|
* Purpose: worker for scsi_insert_special_*()
|
58 |
|
|
*
|
59 |
|
|
* Arguments: q - request queue where request should be inserted
|
60 |
|
|
* rq - request to be inserted
|
61 |
|
|
* data - private data
|
62 |
|
|
* at_head - insert request at head or tail of queue
|
63 |
|
|
*
|
64 |
|
|
* Lock status: Assumed that io_request_lock is not held upon entry.
|
65 |
|
|
*
|
66 |
|
|
* Returns: Nothing
|
67 |
|
|
*/
|
68 |
|
|
static void __scsi_insert_special(request_queue_t *q, struct request *rq,
|
69 |
|
|
void *data, int at_head)
|
70 |
|
|
{
|
71 |
|
|
unsigned long flags;
|
72 |
|
|
|
73 |
|
|
ASSERT_LOCK(&io_request_lock, 0);
|
74 |
|
|
|
75 |
|
|
rq->cmd = SPECIAL;
|
76 |
|
|
rq->special = data;
|
77 |
|
|
rq->q = NULL;
|
78 |
|
|
rq->nr_segments = 0;
|
79 |
|
|
rq->elevator_sequence = 0;
|
80 |
|
|
|
81 |
|
|
/*
|
82 |
|
|
* We have the option of inserting the head or the tail of the queue.
|
83 |
|
|
* Typically we use the tail for new ioctls and so forth. We use the
|
84 |
|
|
* head of the queue for things like a QUEUE_FULL message from a
|
85 |
|
|
* device, or a host that is unable to accept a particular command.
|
86 |
|
|
*/
|
87 |
|
|
spin_lock_irqsave(&io_request_lock, flags);
|
88 |
|
|
|
89 |
|
|
if (at_head)
|
90 |
|
|
list_add(&rq->queue, &q->queue_head);
|
91 |
|
|
else
|
92 |
|
|
list_add_tail(&rq->queue, &q->queue_head);
|
93 |
|
|
|
94 |
|
|
q->request_fn(q);
|
95 |
|
|
spin_unlock_irqrestore(&io_request_lock, flags);
|
96 |
|
|
}
|
97 |
|
|
|
98 |
|
|
|
99 |
|
|
/*
|
100 |
|
|
* Function: scsi_insert_special_cmd()
|
101 |
|
|
*
|
102 |
|
|
* Purpose: Insert pre-formed command into request queue.
|
103 |
|
|
*
|
104 |
|
|
* Arguments: SCpnt - command that is ready to be queued.
|
105 |
|
|
* at_head - boolean. True if we should insert at head
|
106 |
|
|
* of queue, false if we should insert at tail.
|
107 |
|
|
*
|
108 |
|
|
* Lock status: Assumed that lock is not held upon entry.
|
109 |
|
|
*
|
110 |
|
|
* Returns: Nothing
|
111 |
|
|
*
|
112 |
|
|
* Notes: This function is called from character device and from
|
113 |
|
|
* ioctl types of functions where the caller knows exactly
|
114 |
|
|
* what SCSI command needs to be issued. The idea is that
|
115 |
|
|
* we merely inject the command into the queue (at the head
|
116 |
|
|
* for now), and then call the queue request function to actually
|
117 |
|
|
* process it.
|
118 |
|
|
*/
|
119 |
|
|
int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
|
120 |
|
|
{
|
121 |
|
|
request_queue_t *q = &SCpnt->device->request_queue;
|
122 |
|
|
|
123 |
|
|
__scsi_insert_special(q, &SCpnt->request, SCpnt, at_head);
|
124 |
|
|
return 0;
|
125 |
|
|
}
|
126 |
|
|
|
127 |
|
|
/*
|
128 |
|
|
* Function: scsi_insert_special_req()
|
129 |
|
|
*
|
130 |
|
|
* Purpose: Insert pre-formed request into request queue.
|
131 |
|
|
*
|
132 |
|
|
* Arguments: SRpnt - request that is ready to be queued.
|
133 |
|
|
* at_head - boolean. True if we should insert at head
|
134 |
|
|
* of queue, false if we should insert at tail.
|
135 |
|
|
*
|
136 |
|
|
* Lock status: Assumed that lock is not held upon entry.
|
137 |
|
|
*
|
138 |
|
|
* Returns: Nothing
|
139 |
|
|
*
|
140 |
|
|
* Notes: This function is called from character device and from
|
141 |
|
|
* ioctl types of functions where the caller knows exactly
|
142 |
|
|
* what SCSI command needs to be issued. The idea is that
|
143 |
|
|
* we merely inject the command into the queue (at the head
|
144 |
|
|
* for now), and then call the queue request function to actually
|
145 |
|
|
* process it.
|
146 |
|
|
*/
|
147 |
|
|
int scsi_insert_special_req(Scsi_Request * SRpnt, int at_head)
|
148 |
|
|
{
|
149 |
|
|
request_queue_t *q = &SRpnt->sr_device->request_queue;
|
150 |
|
|
|
151 |
|
|
__scsi_insert_special(q, &SRpnt->sr_request, SRpnt, at_head);
|
152 |
|
|
return 0;
|
153 |
|
|
}
|
154 |
|
|
|
155 |
|
|
/*
|
156 |
|
|
* Function: scsi_init_cmd_errh()
|
157 |
|
|
*
|
158 |
|
|
* Purpose: Initialize SCpnt fields related to error handling.
|
159 |
|
|
*
|
160 |
|
|
* Arguments: SCpnt - command that is ready to be queued.
|
161 |
|
|
*
|
162 |
|
|
* Returns: Nothing
|
163 |
|
|
*
|
164 |
|
|
* Notes: This function has the job of initializing a number of
|
165 |
|
|
* fields related to error handling. Typically this will
|
166 |
|
|
* be called once for each command, as required.
|
167 |
|
|
*/
|
168 |
|
|
int scsi_init_cmd_errh(Scsi_Cmnd * SCpnt)
|
169 |
|
|
{
|
170 |
|
|
ASSERT_LOCK(&io_request_lock, 0);
|
171 |
|
|
|
172 |
|
|
SCpnt->owner = SCSI_OWNER_MIDLEVEL;
|
173 |
|
|
SCpnt->reset_chain = NULL;
|
174 |
|
|
SCpnt->serial_number = 0;
|
175 |
|
|
SCpnt->serial_number_at_timeout = 0;
|
176 |
|
|
SCpnt->flags = 0;
|
177 |
|
|
SCpnt->retries = 0;
|
178 |
|
|
|
179 |
|
|
SCpnt->abort_reason = 0;
|
180 |
|
|
|
181 |
|
|
memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
|
182 |
|
|
|
183 |
|
|
if (SCpnt->cmd_len == 0)
|
184 |
|
|
SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
|
185 |
|
|
|
186 |
|
|
/*
|
187 |
|
|
* We need saved copies of a number of fields - this is because
|
188 |
|
|
* error handling may need to overwrite these with different values
|
189 |
|
|
* to run different commands, and once error handling is complete,
|
190 |
|
|
* we will need to restore these values prior to running the actual
|
191 |
|
|
* command.
|
192 |
|
|
*/
|
193 |
|
|
SCpnt->old_use_sg = SCpnt->use_sg;
|
194 |
|
|
SCpnt->old_cmd_len = SCpnt->cmd_len;
|
195 |
|
|
SCpnt->sc_old_data_direction = SCpnt->sc_data_direction;
|
196 |
|
|
SCpnt->old_underflow = SCpnt->underflow;
|
197 |
|
|
memcpy((void *) SCpnt->data_cmnd,
|
198 |
|
|
(const void *) SCpnt->cmnd, sizeof(SCpnt->cmnd));
|
199 |
|
|
SCpnt->buffer = SCpnt->request_buffer;
|
200 |
|
|
SCpnt->bufflen = SCpnt->request_bufflen;
|
201 |
|
|
|
202 |
|
|
SCpnt->reset_chain = NULL;
|
203 |
|
|
|
204 |
|
|
SCpnt->internal_timeout = NORMAL_TIMEOUT;
|
205 |
|
|
SCpnt->abort_reason = 0;
|
206 |
|
|
|
207 |
|
|
return 1;
|
208 |
|
|
}
|
209 |
|
|
|
210 |
|
|
/*
|
211 |
|
|
* Function: scsi_queue_next_request()
|
212 |
|
|
*
|
213 |
|
|
* Purpose: Handle post-processing of completed commands.
|
214 |
|
|
*
|
215 |
|
|
* Arguments: SCpnt - command that may need to be requeued.
|
216 |
|
|
*
|
217 |
|
|
* Returns: Nothing
|
218 |
|
|
*
|
219 |
|
|
* Notes: After command completion, there may be blocks left
|
220 |
|
|
* over which weren't finished by the previous command
|
221 |
|
|
* this can be for a number of reasons - the main one is
|
222 |
|
|
* that a medium error occurred, and the sectors after
|
223 |
|
|
* the bad block need to be re-read.
|
224 |
|
|
*
|
225 |
|
|
* If SCpnt is NULL, it means that the previous command
|
226 |
|
|
* was completely finished, and we should simply start
|
227 |
|
|
* a new command, if possible.
|
228 |
|
|
*
|
229 |
|
|
* This is where a lot of special case code has begun to
|
230 |
|
|
* accumulate. It doesn't really affect readability or
|
231 |
|
|
* anything, but it might be considered architecturally
|
232 |
|
|
* inelegant. If more of these special cases start to
|
233 |
|
|
* accumulate, I am thinking along the lines of implementing
|
234 |
|
|
* an atexit() like technology that gets run when commands
|
235 |
|
|
* complete. I am not convinced that it is worth the
|
236 |
|
|
* added overhead, however. Right now as things stand,
|
237 |
|
|
* there are simple conditional checks, and most hosts
|
238 |
|
|
* would skip past.
|
239 |
|
|
*
|
240 |
|
|
* Another possible solution would be to tailor different
|
241 |
|
|
* handler functions, sort of like what we did in scsi_merge.c.
|
242 |
|
|
* This is probably a better solution, but the number of different
|
243 |
|
|
* permutations grows as 2**N, and if too many more special cases
|
244 |
|
|
* get added, we start to get screwed.
|
245 |
|
|
*/
|
246 |
|
|
void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
|
247 |
|
|
{
|
248 |
|
|
int all_clear;
|
249 |
|
|
unsigned long flags;
|
250 |
|
|
Scsi_Device *SDpnt;
|
251 |
|
|
struct Scsi_Host *SHpnt;
|
252 |
|
|
|
253 |
|
|
ASSERT_LOCK(&io_request_lock, 0);
|
254 |
|
|
|
255 |
|
|
spin_lock_irqsave(&io_request_lock, flags);
|
256 |
|
|
if (SCpnt != NULL) {
|
257 |
|
|
|
258 |
|
|
/*
|
259 |
|
|
* For some reason, we are not done with this request.
|
260 |
|
|
* This happens for I/O errors in the middle of the request,
|
261 |
|
|
* in which case we need to request the blocks that come after
|
262 |
|
|
* the bad sector.
|
263 |
|
|
*/
|
264 |
|
|
SCpnt->request.special = (void *) SCpnt;
|
265 |
|
|
list_add(&SCpnt->request.queue, &q->queue_head);
|
266 |
|
|
}
|
267 |
|
|
|
268 |
|
|
/*
|
269 |
|
|
* Just hit the requeue function for the queue.
|
270 |
|
|
*/
|
271 |
|
|
q->request_fn(q);
|
272 |
|
|
|
273 |
|
|
SDpnt = (Scsi_Device *) q->queuedata;
|
274 |
|
|
SHpnt = SDpnt->host;
|
275 |
|
|
|
276 |
|
|
/*
|
277 |
|
|
* If this is a single-lun device, and we are currently finished
|
278 |
|
|
* with this device, then see if we need to get another device
|
279 |
|
|
* started. FIXME(eric) - if this function gets too cluttered
|
280 |
|
|
* with special case code, then spin off separate versions and
|
281 |
|
|
* use function pointers to pick the right one.
|
282 |
|
|
*/
|
283 |
|
|
if (SDpnt->single_lun
|
284 |
|
|
&& list_empty(&q->queue_head)
|
285 |
|
|
&& SDpnt->device_busy == 0) {
|
286 |
|
|
request_queue_t *q;
|
287 |
|
|
|
288 |
|
|
for (SDpnt = SHpnt->host_queue;
|
289 |
|
|
SDpnt;
|
290 |
|
|
SDpnt = SDpnt->next) {
|
291 |
|
|
if (((SHpnt->can_queue > 0)
|
292 |
|
|
&& (SHpnt->host_busy >= SHpnt->can_queue))
|
293 |
|
|
|| (SHpnt->host_blocked)
|
294 |
|
|
|| (SHpnt->host_self_blocked)
|
295 |
|
|
|| (SDpnt->device_blocked)) {
|
296 |
|
|
break;
|
297 |
|
|
}
|
298 |
|
|
q = &SDpnt->request_queue;
|
299 |
|
|
q->request_fn(q);
|
300 |
|
|
}
|
301 |
|
|
}
|
302 |
|
|
|
303 |
|
|
/*
|
304 |
|
|
* Now see whether there are other devices on the bus which
|
305 |
|
|
* might be starved. If so, hit the request function. If we
|
306 |
|
|
* don't find any, then it is safe to reset the flag. If we
|
307 |
|
|
* find any device that it is starved, it isn't safe to reset the
|
308 |
|
|
* flag as the queue function releases the lock and thus some
|
309 |
|
|
* other device might have become starved along the way.
|
310 |
|
|
*/
|
311 |
|
|
all_clear = 1;
|
312 |
|
|
if (SHpnt->some_device_starved) {
|
313 |
|
|
for (SDpnt = SHpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
|
314 |
|
|
request_queue_t *q;
|
315 |
|
|
if ((SHpnt->can_queue > 0 && (SHpnt->host_busy >= SHpnt->can_queue))
|
316 |
|
|
|| (SHpnt->host_blocked)
|
317 |
|
|
|| (SHpnt->host_self_blocked)) {
|
318 |
|
|
break;
|
319 |
|
|
}
|
320 |
|
|
if (SDpnt->device_blocked || !SDpnt->starved) {
|
321 |
|
|
continue;
|
322 |
|
|
}
|
323 |
|
|
q = &SDpnt->request_queue;
|
324 |
|
|
q->request_fn(q);
|
325 |
|
|
all_clear = 0;
|
326 |
|
|
}
|
327 |
|
|
if (SDpnt == NULL && all_clear) {
|
328 |
|
|
SHpnt->some_device_starved = 0;
|
329 |
|
|
}
|
330 |
|
|
}
|
331 |
|
|
spin_unlock_irqrestore(&io_request_lock, flags);
|
332 |
|
|
}
|
333 |
|
|
|
334 |
|
|
/*
|
335 |
|
|
* Function: scsi_end_request()
|
336 |
|
|
*
|
337 |
|
|
* Purpose: Post-processing of completed commands called from interrupt
|
338 |
|
|
* handler or a bottom-half handler.
|
339 |
|
|
*
|
340 |
|
|
* Arguments: SCpnt - command that is complete.
|
341 |
|
|
* uptodate - 1 if I/O indicates success, 0 for I/O error.
|
342 |
|
|
* sectors - number of sectors we want to mark.
|
343 |
|
|
* requeue - indicates whether we should requeue leftovers.
|
344 |
|
|
* frequeue - indicates that if we release the command block
|
345 |
|
|
* that the queue request function should be called.
|
346 |
|
|
*
|
347 |
|
|
* Lock status: Assumed that lock is not held upon entry.
|
348 |
|
|
*
|
349 |
|
|
* Returns: Nothing
|
350 |
|
|
*
|
351 |
|
|
* Notes: This is called for block device requests in order to
|
352 |
|
|
* mark some number of sectors as complete.
|
353 |
|
|
*
|
354 |
|
|
* We are guaranteeing that the request queue will be goosed
|
355 |
|
|
* at some point during this call.
|
356 |
|
|
*/
|
357 |
|
|
static Scsi_Cmnd *__scsi_end_request(Scsi_Cmnd * SCpnt,
|
358 |
|
|
int uptodate,
|
359 |
|
|
int sectors,
|
360 |
|
|
int requeue,
|
361 |
|
|
int frequeue)
|
362 |
|
|
{
|
363 |
|
|
request_queue_t *q = &SCpnt->device->request_queue;
|
364 |
|
|
struct request *req;
|
365 |
|
|
struct buffer_head *bh;
|
366 |
|
|
unsigned long flags;
|
367 |
|
|
int nsect;
|
368 |
|
|
|
369 |
|
|
ASSERT_LOCK(&io_request_lock, 0);
|
370 |
|
|
|
371 |
|
|
req = &SCpnt->request;
|
372 |
|
|
req->errors = 0;
|
373 |
|
|
if (!uptodate) {
|
374 |
|
|
printk(" I/O error: dev %s, sector %lu\n",
|
375 |
|
|
kdevname(req->rq_dev), req->sector);
|
376 |
|
|
}
|
377 |
|
|
do {
|
378 |
|
|
if ((bh = req->bh) != NULL) {
|
379 |
|
|
nsect = bh->b_size >> 9;
|
380 |
|
|
blk_finished_io(nsect);
|
381 |
|
|
blk_finished_sectors(req, nsect);
|
382 |
|
|
req->bh = bh->b_reqnext;
|
383 |
|
|
bh->b_reqnext = NULL;
|
384 |
|
|
sectors -= nsect;
|
385 |
|
|
bh->b_end_io(bh, uptodate);
|
386 |
|
|
if ((bh = req->bh) != NULL) {
|
387 |
|
|
req->hard_sector += nsect;
|
388 |
|
|
req->hard_nr_sectors -= nsect;
|
389 |
|
|
req->sector += nsect;
|
390 |
|
|
req->nr_sectors -= nsect;
|
391 |
|
|
|
392 |
|
|
req->current_nr_sectors = bh->b_size >> 9;
|
393 |
|
|
req->hard_cur_sectors = req->current_nr_sectors;
|
394 |
|
|
if (req->nr_sectors < req->current_nr_sectors) {
|
395 |
|
|
req->nr_sectors = req->current_nr_sectors;
|
396 |
|
|
printk("scsi_end_request: buffer-list destroyed\n");
|
397 |
|
|
}
|
398 |
|
|
}
|
399 |
|
|
}
|
400 |
|
|
} while (sectors && bh);
|
401 |
|
|
|
402 |
|
|
/*
|
403 |
|
|
* If there are blocks left over at the end, set up the command
|
404 |
|
|
* to queue the remainder of them.
|
405 |
|
|
*/
|
406 |
|
|
if (req->bh) {
|
407 |
|
|
/*
|
408 |
|
|
* Recount segments whether we are immediately going to
|
409 |
|
|
* requeue the command or not, other code might requeue
|
410 |
|
|
* it later and since we changed the segment count up above,
|
411 |
|
|
* we need it updated.
|
412 |
|
|
*/
|
413 |
|
|
recount_segments(SCpnt);
|
414 |
|
|
|
415 |
|
|
/*
|
416 |
|
|
* Bleah. Leftovers again. Stick the leftovers in
|
417 |
|
|
* the front of the queue, and goose the queue again.
|
418 |
|
|
*/
|
419 |
|
|
if (requeue)
|
420 |
|
|
scsi_queue_next_request(q, SCpnt);
|
421 |
|
|
|
422 |
|
|
return SCpnt;
|
423 |
|
|
}
|
424 |
|
|
|
425 |
|
|
/*
|
426 |
|
|
* This request is done. If there is someone blocked waiting for this
|
427 |
|
|
* request, wake them up. Typically used to wake up processes trying
|
428 |
|
|
* to swap a page into memory.
|
429 |
|
|
*/
|
430 |
|
|
if (req->waiting)
|
431 |
|
|
complete(req->waiting);
|
432 |
|
|
|
433 |
|
|
spin_lock_irqsave(&io_request_lock, flags);
|
434 |
|
|
req_finished_io(req);
|
435 |
|
|
spin_unlock_irqrestore(&io_request_lock, flags);
|
436 |
|
|
|
437 |
|
|
add_blkdev_randomness(MAJOR(req->rq_dev));
|
438 |
|
|
|
439 |
|
|
/*
|
440 |
|
|
* This will goose the queue request function at the end, so we don't
|
441 |
|
|
* need to worry about launching another command.
|
442 |
|
|
*/
|
443 |
|
|
__scsi_release_command(SCpnt);
|
444 |
|
|
|
445 |
|
|
if (frequeue)
|
446 |
|
|
scsi_queue_next_request(q, NULL);
|
447 |
|
|
|
448 |
|
|
return NULL;
|
449 |
|
|
}
|
450 |
|
|
|
451 |
|
|
/*
|
452 |
|
|
* Function: scsi_end_request()
|
453 |
|
|
*
|
454 |
|
|
* Purpose: Post-processing of completed commands called from interrupt
|
455 |
|
|
* handler or a bottom-half handler.
|
456 |
|
|
*
|
457 |
|
|
* Arguments: SCpnt - command that is complete.
|
458 |
|
|
* uptodate - 1 if I/O indicates success, 0 for I/O error.
|
459 |
|
|
* sectors - number of sectors we want to mark.
|
460 |
|
|
*
|
461 |
|
|
* Lock status: Assumed that lock is not held upon entry.
|
462 |
|
|
*
|
463 |
|
|
* Returns: Nothing
|
464 |
|
|
*
|
465 |
|
|
* Notes: This is called for block device requests in order to
|
466 |
|
|
* mark some number of sectors as complete.
|
467 |
|
|
*
|
468 |
|
|
* We are guaranteeing that the request queue will be goosed
|
469 |
|
|
* at some point during this call.
|
470 |
|
|
*/
|
471 |
|
|
Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * SCpnt, int uptodate, int sectors)
|
472 |
|
|
{
|
473 |
|
|
return __scsi_end_request(SCpnt, uptodate, sectors, 1, 1);
|
474 |
|
|
}
|
475 |
|
|
|
476 |
|
|
/*
|
477 |
|
|
* Function: scsi_release_buffers()
|
478 |
|
|
*
|
479 |
|
|
* Purpose: Completion processing for block device I/O requests.
|
480 |
|
|
*
|
481 |
|
|
* Arguments: SCpnt - command that we are bailing.
|
482 |
|
|
*
|
483 |
|
|
* Lock status: Assumed that no lock is held upon entry.
|
484 |
|
|
*
|
485 |
|
|
* Returns: Nothing
|
486 |
|
|
*
|
487 |
|
|
* Notes: In the event that an upper level driver rejects a
|
488 |
|
|
* command, we must release resources allocated during
|
489 |
|
|
* the __init_io() function. Primarily this would involve
|
490 |
|
|
* the scatter-gather table, and potentially any bounce
|
491 |
|
|
* buffers.
|
492 |
|
|
*/
|
493 |
|
|
static void scsi_release_buffers(Scsi_Cmnd * SCpnt)
|
494 |
|
|
{
|
495 |
|
|
ASSERT_LOCK(&io_request_lock, 0);
|
496 |
|
|
|
497 |
|
|
/*
|
498 |
|
|
* Free up any indirection buffers we allocated for DMA purposes.
|
499 |
|
|
*/
|
500 |
|
|
if (SCpnt->use_sg) {
|
501 |
|
|
struct scatterlist *sgpnt;
|
502 |
|
|
void **bbpnt;
|
503 |
|
|
int i;
|
504 |
|
|
|
505 |
|
|
sgpnt = (struct scatterlist *) SCpnt->request_buffer;
|
506 |
|
|
bbpnt = SCpnt->bounce_buffers;
|
507 |
|
|
|
508 |
|
|
if (bbpnt) {
|
509 |
|
|
for (i = 0; i < SCpnt->use_sg; i++) {
|
510 |
|
|
if (bbpnt[i])
|
511 |
|
|
scsi_free(sgpnt[i].address, sgpnt[i].length);
|
512 |
|
|
}
|
513 |
|
|
}
|
514 |
|
|
scsi_free(SCpnt->request_buffer, SCpnt->sglist_len);
|
515 |
|
|
} else {
|
516 |
|
|
if (SCpnt->request_buffer != SCpnt->request.buffer) {
|
517 |
|
|
scsi_free(SCpnt->request_buffer, SCpnt->request_bufflen);
|
518 |
|
|
}
|
519 |
|
|
}
|
520 |
|
|
|
521 |
|
|
/*
|
522 |
|
|
* Zero these out. They now point to freed memory, and it is
|
523 |
|
|
* dangerous to hang onto the pointers.
|
524 |
|
|
*/
|
525 |
|
|
SCpnt->buffer = NULL;
|
526 |
|
|
SCpnt->bufflen = 0;
|
527 |
|
|
SCpnt->request_buffer = NULL;
|
528 |
|
|
SCpnt->request_bufflen = 0;
|
529 |
|
|
}
|
530 |
|
|
|
531 |
|
|
/*
|
532 |
|
|
* Function: scsi_io_completion()
|
533 |
|
|
*
|
534 |
|
|
* Purpose: Completion processing for block device I/O requests.
|
535 |
|
|
*
|
536 |
|
|
* Arguments: SCpnt - command that is finished.
|
537 |
|
|
*
|
538 |
|
|
* Lock status: Assumed that no lock is held upon entry.
|
539 |
|
|
*
|
540 |
|
|
* Returns: Nothing
|
541 |
|
|
*
|
542 |
|
|
* Notes: This function is matched in terms of capabilities to
|
543 |
|
|
* the function that created the scatter-gather list.
|
544 |
|
|
* In other words, if there are no bounce buffers
|
545 |
|
|
* (the normal case for most drivers), we don't need
|
546 |
|
|
* the logic to deal with cleaning up afterwards.
|
547 |
|
|
*/
|
548 |
|
|
void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
|
549 |
|
|
int block_sectors)
|
550 |
|
|
{
|
551 |
|
|
int result = SCpnt->result;
|
552 |
|
|
int this_count = SCpnt->bufflen >> 9;
|
553 |
|
|
request_queue_t *q = &SCpnt->device->request_queue;
|
554 |
|
|
struct request *req = &SCpnt->request;
|
555 |
|
|
|
556 |
|
|
/*
|
557 |
|
|
* We must do one of several things here:
|
558 |
|
|
*
|
559 |
|
|
* Call scsi_end_request. This will finish off the specified
|
560 |
|
|
* number of sectors. If we are done, the command block will
|
561 |
|
|
* be released, and the queue function will be goosed. If we
|
562 |
|
|
* are not done, then scsi_end_request will directly goose
|
563 |
|
|
* the queue.
|
564 |
|
|
*
|
565 |
|
|
* We can just use scsi_queue_next_request() here. This
|
566 |
|
|
* would be used if we just wanted to retry, for example.
|
567 |
|
|
*
|
568 |
|
|
*/
|
569 |
|
|
ASSERT_LOCK(&io_request_lock, 0);
|
570 |
|
|
|
571 |
|
|
/*
|
572 |
|
|
* Free up any indirection buffers we allocated for DMA purposes.
|
573 |
|
|
* For the case of a READ, we need to copy the data out of the
|
574 |
|
|
* bounce buffer and into the real buffer.
|
575 |
|
|
*/
|
576 |
|
|
if (SCpnt->use_sg) {
|
577 |
|
|
struct scatterlist *sgpnt;
|
578 |
|
|
void **bbpnt;
|
579 |
|
|
int i;
|
580 |
|
|
|
581 |
|
|
sgpnt = (struct scatterlist *) SCpnt->buffer;
|
582 |
|
|
bbpnt = SCpnt->bounce_buffers;
|
583 |
|
|
|
584 |
|
|
if (bbpnt) {
|
585 |
|
|
for (i = 0; i < SCpnt->use_sg; i++) {
|
586 |
|
|
if (bbpnt[i]) {
|
587 |
|
|
if (req->cmd == READ) {
|
588 |
|
|
memcpy(bbpnt[i],
|
589 |
|
|
sgpnt[i].address,
|
590 |
|
|
sgpnt[i].length);
|
591 |
|
|
}
|
592 |
|
|
scsi_free(sgpnt[i].address, sgpnt[i].length);
|
593 |
|
|
}
|
594 |
|
|
}
|
595 |
|
|
}
|
596 |
|
|
scsi_free(SCpnt->buffer, SCpnt->sglist_len);
|
597 |
|
|
} else {
|
598 |
|
|
if (SCpnt->buffer != req->buffer) {
|
599 |
|
|
if (PageHighMem(req->bh->b_page))
|
600 |
|
|
BUG();
|
601 |
|
|
if (req->cmd == READ)
|
602 |
|
|
memcpy(req->buffer, SCpnt->buffer, SCpnt->bufflen);
|
603 |
|
|
scsi_free(SCpnt->buffer, SCpnt->bufflen);
|
604 |
|
|
}
|
605 |
|
|
}
|
606 |
|
|
|
607 |
|
|
/*
|
608 |
|
|
* Zero these out. They now point to freed memory, and it is
|
609 |
|
|
* dangerous to hang onto the pointers.
|
610 |
|
|
*/
|
611 |
|
|
SCpnt->buffer = NULL;
|
612 |
|
|
SCpnt->bufflen = 0;
|
613 |
|
|
SCpnt->request_buffer = NULL;
|
614 |
|
|
SCpnt->request_bufflen = 0;
|
615 |
|
|
|
616 |
|
|
/*
|
617 |
|
|
* Next deal with any sectors which we were able to correctly
|
618 |
|
|
* handle.
|
619 |
|
|
*/
|
620 |
|
|
if (good_sectors > 0) {
|
621 |
|
|
SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d sectors done.\n",
|
622 |
|
|
SCpnt->request.nr_sectors,
|
623 |
|
|
good_sectors));
|
624 |
|
|
SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n ", SCpnt->use_sg));
|
625 |
|
|
|
626 |
|
|
req->errors = 0;
|
627 |
|
|
/*
|
628 |
|
|
* If multiple sectors are requested in one buffer, then
|
629 |
|
|
* they will have been finished off by the first command.
|
630 |
|
|
* If not, then we have a multi-buffer command.
|
631 |
|
|
*
|
632 |
|
|
* If block_sectors != 0, it means we had a medium error
|
633 |
|
|
* of some sort, and that we want to mark some number of
|
634 |
|
|
* sectors as not uptodate. Thus we want to inhibit
|
635 |
|
|
* requeueing right here - we will requeue down below
|
636 |
|
|
* when we handle the bad sectors.
|
637 |
|
|
*/
|
638 |
|
|
SCpnt = __scsi_end_request(SCpnt,
|
639 |
|
|
1,
|
640 |
|
|
good_sectors,
|
641 |
|
|
result == 0,
|
642 |
|
|
1);
|
643 |
|
|
|
644 |
|
|
/*
|
645 |
|
|
* If the command completed without error, then either finish off the
|
646 |
|
|
* rest of the command, or start a new one.
|
647 |
|
|
*/
|
648 |
|
|
if (result == 0 || SCpnt == NULL ) {
|
649 |
|
|
return;
|
650 |
|
|
}
|
651 |
|
|
}
|
652 |
|
|
/*
|
653 |
|
|
* Now, if we were good little boys and girls, Santa left us a request
|
654 |
|
|
* sense buffer. We can extract information from this, so we
|
655 |
|
|
* can choose a block to remap, etc.
|
656 |
|
|
*/
|
657 |
|
|
if (driver_byte(result) != 0) {
|
658 |
|
|
if (suggestion(result) == SUGGEST_REMAP) {
|
659 |
|
|
#ifdef REMAP
|
660 |
|
|
/*
|
661 |
|
|
* Not yet implemented. A read will fail after being remapped,
|
662 |
|
|
* a write will call the strategy routine again.
|
663 |
|
|
*/
|
664 |
|
|
if (SCpnt->device->remap) {
|
665 |
|
|
result = 0;
|
666 |
|
|
}
|
667 |
|
|
#endif
|
668 |
|
|
}
|
669 |
|
|
if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
|
670 |
|
|
/*
|
671 |
|
|
* If the device is in the process of becoming ready,
|
672 |
|
|
* retry.
|
673 |
|
|
*/
|
674 |
|
|
if (SCpnt->sense_buffer[12] == 0x04 &&
|
675 |
|
|
SCpnt->sense_buffer[13] == 0x01) {
|
676 |
|
|
scsi_queue_next_request(q, SCpnt);
|
677 |
|
|
return;
|
678 |
|
|
}
|
679 |
|
|
if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
|
680 |
|
|
if (SCpnt->device->removable) {
|
681 |
|
|
/* detected disc change. set a bit
|
682 |
|
|
* and quietly refuse further access.
|
683 |
|
|
*/
|
684 |
|
|
SCpnt->device->changed = 1;
|
685 |
|
|
SCpnt = scsi_end_request(SCpnt, 0, this_count);
|
686 |
|
|
return;
|
687 |
|
|
} else {
|
688 |
|
|
/*
|
689 |
|
|
* Must have been a power glitch, or a
|
690 |
|
|
* bus reset. Could not have been a
|
691 |
|
|
* media change, so we just retry the
|
692 |
|
|
* request and see what happens.
|
693 |
|
|
*/
|
694 |
|
|
scsi_queue_next_request(q, SCpnt);
|
695 |
|
|
return;
|
696 |
|
|
}
|
697 |
|
|
}
|
698 |
|
|
}
|
699 |
|
|
/* If we had an ILLEGAL REQUEST returned, then we may have
|
700 |
|
|
* performed an unsupported command. The only thing this should be
|
701 |
|
|
* would be a ten byte read where only a six byte read was supported.
|
702 |
|
|
* Also, on a system where READ CAPACITY failed, we have have read
|
703 |
|
|
* past the end of the disk.
|
704 |
|
|
*/
|
705 |
|
|
|
706 |
|
|
switch (SCpnt->sense_buffer[2]) {
|
707 |
|
|
case RECOVERED_ERROR: /* Added, KG, 2003-01-20 */
|
708 |
|
|
return;
|
709 |
|
|
case ILLEGAL_REQUEST:
|
710 |
|
|
if (SCpnt->device->ten && SCSI_RETRY_10(SCpnt->cmnd[0])) {
|
711 |
|
|
SCpnt->device->ten = 0;
|
712 |
|
|
/*
|
713 |
|
|
* This will cause a retry with a 6-byte
|
714 |
|
|
* command.
|
715 |
|
|
*/
|
716 |
|
|
scsi_queue_next_request(q, SCpnt);
|
717 |
|
|
result = 0;
|
718 |
|
|
} else {
|
719 |
|
|
SCpnt = scsi_end_request(SCpnt, 0, this_count);
|
720 |
|
|
return;
|
721 |
|
|
}
|
722 |
|
|
break;
|
723 |
|
|
case NOT_READY:
|
724 |
|
|
printk(KERN_INFO "Device %s not ready.\n",
|
725 |
|
|
kdevname(SCpnt->request.rq_dev));
|
726 |
|
|
SCpnt = scsi_end_request(SCpnt, 0, this_count);
|
727 |
|
|
return;
|
728 |
|
|
break;
|
729 |
|
|
case MEDIUM_ERROR:
|
730 |
|
|
case VOLUME_OVERFLOW:
|
731 |
|
|
printk("scsi%d: ERROR on channel %d, id %d, lun %d, CDB: ",
|
732 |
|
|
SCpnt->host->host_no, (int) SCpnt->channel,
|
733 |
|
|
(int) SCpnt->target, (int) SCpnt->lun);
|
734 |
|
|
print_command(SCpnt->cmnd);
|
735 |
|
|
print_sense("sd", SCpnt);
|
736 |
|
|
SCpnt = scsi_end_request(SCpnt, 0, block_sectors);
|
737 |
|
|
return;
|
738 |
|
|
default:
|
739 |
|
|
break;
|
740 |
|
|
}
|
741 |
|
|
} /* driver byte != 0 */
|
742 |
|
|
if (host_byte(result) == DID_RESET) {
|
743 |
|
|
/*
|
744 |
|
|
* Third party bus reset or reset for error
|
745 |
|
|
* recovery reasons. Just retry the request
|
746 |
|
|
* and see what happens.
|
747 |
|
|
*/
|
748 |
|
|
scsi_queue_next_request(q, SCpnt);
|
749 |
|
|
return;
|
750 |
|
|
}
|
751 |
|
|
if (result) {
|
752 |
|
|
struct Scsi_Device_Template *STpnt;
|
753 |
|
|
|
754 |
|
|
STpnt = scsi_get_request_dev(&SCpnt->request);
|
755 |
|
|
printk("SCSI %s error : host %d channel %d id %d lun %d return code = %x\n",
|
756 |
|
|
(STpnt ? STpnt->name : "device"),
|
757 |
|
|
SCpnt->device->host->host_no,
|
758 |
|
|
SCpnt->device->channel,
|
759 |
|
|
SCpnt->device->id,
|
760 |
|
|
SCpnt->device->lun, result);
|
761 |
|
|
|
762 |
|
|
if (driver_byte(result) & DRIVER_SENSE)
|
763 |
|
|
print_sense("sd", SCpnt);
|
764 |
|
|
/*
|
765 |
|
|
* Mark a single buffer as not uptodate. Queue the remainder.
|
766 |
|
|
* We sometimes get this cruft in the event that a medium error
|
767 |
|
|
* isn't properly reported.
|
768 |
|
|
*/
|
769 |
|
|
SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
|
770 |
|
|
return;
|
771 |
|
|
}
|
772 |
|
|
}
|
773 |
|
|
|
774 |
|
|
/*
|
775 |
|
|
* Function: scsi_get_request_dev()
|
776 |
|
|
*
|
777 |
|
|
* Purpose: Find the upper-level driver that is responsible for this
|
778 |
|
|
* request
|
779 |
|
|
*
|
780 |
|
|
* Arguments: request - I/O request we are preparing to queue.
|
781 |
|
|
*
|
782 |
|
|
* Lock status: No locks assumed to be held, but as it happens the
|
783 |
|
|
* io_request_lock is held when this is called.
|
784 |
|
|
*
|
785 |
|
|
* Returns: Nothing
|
786 |
|
|
*
|
787 |
|
|
* Notes: The requests in the request queue may have originated
|
788 |
|
|
* from any block device driver. We need to find out which
|
789 |
|
|
* one so that we can later form the appropriate command.
|
790 |
|
|
*/
|
791 |
|
|
struct Scsi_Device_Template *scsi_get_request_dev(struct request *req)
|
792 |
|
|
{
|
793 |
|
|
struct Scsi_Device_Template *spnt;
|
794 |
|
|
kdev_t dev = req->rq_dev;
|
795 |
|
|
int major = MAJOR(dev);
|
796 |
|
|
|
797 |
|
|
ASSERT_LOCK(&io_request_lock, 1);
|
798 |
|
|
|
799 |
|
|
for (spnt = scsi_devicelist; spnt; spnt = spnt->next) {
|
800 |
|
|
/*
|
801 |
|
|
* Search for a block device driver that supports this
|
802 |
|
|
* major.
|
803 |
|
|
*/
|
804 |
|
|
if (spnt->blk && spnt->major == major) {
|
805 |
|
|
return spnt;
|
806 |
|
|
}
|
807 |
|
|
/*
|
808 |
|
|
* I am still not entirely satisfied with this solution,
|
809 |
|
|
* but it is good enough for now. Disks have a number of
|
810 |
|
|
* major numbers associated with them, the primary
|
811 |
|
|
* 8, which we test above, and a secondary range of 7
|
812 |
|
|
* different consecutive major numbers. If this ever
|
813 |
|
|
* becomes insufficient, then we could add another function
|
814 |
|
|
* to the structure, and generalize this completely.
|
815 |
|
|
*/
|
816 |
|
|
if( spnt->min_major != 0
|
817 |
|
|
&& spnt->max_major != 0
|
818 |
|
|
&& major >= spnt->min_major
|
819 |
|
|
&& major <= spnt->max_major )
|
820 |
|
|
{
|
821 |
|
|
return spnt;
|
822 |
|
|
}
|
823 |
|
|
}
|
824 |
|
|
return NULL;
|
825 |
|
|
}
|
826 |
|
|
|
827 |
|
|
/*
|
828 |
|
|
* Function: scsi_request_fn()
|
829 |
|
|
*
|
830 |
|
|
* Purpose: Generic version of request function for SCSI hosts.
|
831 |
|
|
*
|
832 |
|
|
* Arguments: q - Pointer to actual queue.
|
833 |
|
|
*
|
834 |
|
|
* Returns: Nothing
|
835 |
|
|
*
|
836 |
|
|
* Lock status: IO request lock assumed to be held when called.
|
837 |
|
|
*
|
838 |
|
|
* Notes: The theory is that this function is something which individual
|
839 |
|
|
* drivers could also supply if they wished to. The problem
|
840 |
|
|
* is that we have 30 some odd low-level drivers in the kernel
|
841 |
|
|
* tree already, and it would be most difficult to retrofit
|
842 |
|
|
* this crap into all of them. Thus this function has the job
|
843 |
|
|
* of acting as a generic queue manager for all of those existing
|
844 |
|
|
* drivers.
|
845 |
|
|
*/
|
846 |
|
|
void scsi_request_fn(request_queue_t * q)
|
847 |
|
|
{
|
848 |
|
|
struct request *req;
|
849 |
|
|
Scsi_Cmnd *SCpnt;
|
850 |
|
|
Scsi_Request *SRpnt;
|
851 |
|
|
Scsi_Device *SDpnt;
|
852 |
|
|
struct Scsi_Host *SHpnt;
|
853 |
|
|
struct Scsi_Device_Template *STpnt;
|
854 |
|
|
|
855 |
|
|
ASSERT_LOCK(&io_request_lock, 1);
|
856 |
|
|
|
857 |
|
|
SDpnt = (Scsi_Device *) q->queuedata;
|
858 |
|
|
if (!SDpnt) {
|
859 |
|
|
panic("Missing device");
|
860 |
|
|
}
|
861 |
|
|
SHpnt = SDpnt->host;
|
862 |
|
|
|
863 |
|
|
/*
|
864 |
|
|
* To start with, we keep looping until the queue is empty, or until
|
865 |
|
|
* the host is no longer able to accept any more requests.
|
866 |
|
|
*/
|
867 |
|
|
while (1 == 1) {
|
868 |
|
|
/*
|
869 |
|
|
* Check this again - each time we loop through we will have
|
870 |
|
|
* released the lock and grabbed it again, so each time
|
871 |
|
|
* we need to check to see if the queue is plugged or not.
|
872 |
|
|
*/
|
873 |
|
|
if (SHpnt->in_recovery || q->plugged)
|
874 |
|
|
return;
|
875 |
|
|
|
876 |
|
|
/*
|
877 |
|
|
* If the device cannot accept another request, then quit.
|
878 |
|
|
*/
|
879 |
|
|
if (SDpnt->device_blocked) {
|
880 |
|
|
break;
|
881 |
|
|
}
|
882 |
|
|
if ((SHpnt->can_queue > 0 && (SHpnt->host_busy >= SHpnt->can_queue))
|
883 |
|
|
|| (SHpnt->host_blocked)
|
884 |
|
|
|| (SHpnt->host_self_blocked)) {
|
885 |
|
|
/*
|
886 |
|
|
* If we are unable to process any commands at all for
|
887 |
|
|
* this device, then we consider it to be starved.
|
888 |
|
|
* What this means is that there are no outstanding
|
889 |
|
|
* commands for this device and hence we need a
|
890 |
|
|
* little help getting it started again
|
891 |
|
|
* once the host isn't quite so busy.
|
892 |
|
|
*/
|
893 |
|
|
if (SDpnt->device_busy == 0) {
|
894 |
|
|
SDpnt->starved = 1;
|
895 |
|
|
SHpnt->some_device_starved = 1;
|
896 |
|
|
}
|
897 |
|
|
break;
|
898 |
|
|
} else {
|
899 |
|
|
SDpnt->starved = 0;
|
900 |
|
|
}
|
901 |
|
|
|
902 |
|
|
/*
|
903 |
|
|
* FIXME(eric)
|
904 |
|
|
* I am not sure where the best place to do this is. We need
|
905 |
|
|
* to hook in a place where we are likely to come if in user
|
906 |
|
|
* space. Technically the error handling thread should be
|
907 |
|
|
* doing this crap, but the error handler isn't used by
|
908 |
|
|
* most hosts.
|
909 |
|
|
*/
|
910 |
|
|
if (SDpnt->was_reset) {
|
911 |
|
|
/*
|
912 |
|
|
* We need to relock the door, but we might
|
913 |
|
|
* be in an interrupt handler. Only do this
|
914 |
|
|
* from user space, since we do not want to
|
915 |
|
|
* sleep from an interrupt.
|
916 |
|
|
*
|
917 |
|
|
* FIXME(eric) - have the error handler thread do
|
918 |
|
|
* this work.
|
919 |
|
|
*/
|
920 |
|
|
SDpnt->was_reset = 0;
|
921 |
|
|
if (SDpnt->removable && !in_interrupt()) {
|
922 |
|
|
spin_unlock_irq(&io_request_lock);
|
923 |
|
|
scsi_ioctl(SDpnt, SCSI_IOCTL_DOORLOCK, 0);
|
924 |
|
|
spin_lock_irq(&io_request_lock);
|
925 |
|
|
continue;
|
926 |
|
|
}
|
927 |
|
|
}
|
928 |
|
|
|
929 |
|
|
/*
|
930 |
|
|
* If we couldn't find a request that could be queued, then we
|
931 |
|
|
* can also quit.
|
932 |
|
|
*/
|
933 |
|
|
if (list_empty(&q->queue_head))
|
934 |
|
|
break;
|
935 |
|
|
|
936 |
|
|
/*
|
937 |
|
|
* Loop through all of the requests in this queue, and find
|
938 |
|
|
* one that is queueable.
|
939 |
|
|
*/
|
940 |
|
|
req = blkdev_entry_next_request(&q->queue_head);
|
941 |
|
|
|
942 |
|
|
/*
|
943 |
|
|
* Find the actual device driver associated with this command.
|
944 |
|
|
* The SPECIAL requests are things like character device or
|
945 |
|
|
* ioctls, which did not originate from ll_rw_blk. Note that
|
946 |
|
|
* the special field is also used to indicate the SCpnt for
|
947 |
|
|
* the remainder of a partially fulfilled request that can
|
948 |
|
|
* come up when there is a medium error. We have to treat
|
949 |
|
|
* these two cases differently. We differentiate by looking
|
950 |
|
|
* at request.cmd, as this tells us the real story.
|
951 |
|
|
*/
|
952 |
|
|
if (req->cmd == SPECIAL) {
|
953 |
|
|
STpnt = NULL;
|
954 |
|
|
SCpnt = (Scsi_Cmnd *) req->special;
|
955 |
|
|
SRpnt = (Scsi_Request *) req->special;
|
956 |
|
|
|
957 |
|
|
if( SRpnt->sr_magic == SCSI_REQ_MAGIC ) {
|
958 |
|
|
SCpnt = scsi_allocate_device(SRpnt->sr_device,
|
959 |
|
|
FALSE, FALSE);
|
960 |
|
|
if( !SCpnt ) {
|
961 |
|
|
break;
|
962 |
|
|
}
|
963 |
|
|
scsi_init_cmd_from_req(SCpnt, SRpnt);
|
964 |
|
|
}
|
965 |
|
|
|
966 |
|
|
} else {
|
967 |
|
|
SRpnt = NULL;
|
968 |
|
|
STpnt = scsi_get_request_dev(req);
|
969 |
|
|
if (!STpnt) {
|
970 |
|
|
panic("Unable to find device associated with request");
|
971 |
|
|
}
|
972 |
|
|
/*
|
973 |
|
|
* Now try and find a command block that we can use.
|
974 |
|
|
*/
|
975 |
|
|
if( req->special != NULL ) {
|
976 |
|
|
SCpnt = (Scsi_Cmnd *) req->special;
|
977 |
|
|
} else {
|
978 |
|
|
SCpnt = scsi_allocate_device(SDpnt, FALSE, FALSE);
|
979 |
|
|
}
|
980 |
|
|
/*
|
981 |
|
|
* If so, we are ready to do something. Bump the count
|
982 |
|
|
* while the queue is locked and then break out of the
|
983 |
|
|
* loop. Otherwise loop around and try another request.
|
984 |
|
|
*/
|
985 |
|
|
if (!SCpnt) {
|
986 |
|
|
break;
|
987 |
|
|
}
|
988 |
|
|
}
|
989 |
|
|
|
990 |
|
|
/*
|
991 |
|
|
* Now bump the usage count for both the host and the
|
992 |
|
|
* device.
|
993 |
|
|
*/
|
994 |
|
|
SHpnt->host_busy++;
|
995 |
|
|
SDpnt->device_busy++;
|
996 |
|
|
|
997 |
|
|
/*
|
998 |
|
|
* Finally, before we release the lock, we copy the
|
999 |
|
|
* request to the command block, and remove the
|
1000 |
|
|
* request from the request list. Note that we always
|
1001 |
|
|
* operate on the queue head - there is absolutely no
|
1002 |
|
|
* reason to search the list, because all of the commands
|
1003 |
|
|
* in this queue are for the same device.
|
1004 |
|
|
*/
|
1005 |
|
|
blkdev_dequeue_request(req);
|
1006 |
|
|
|
1007 |
|
|
if (req != &SCpnt->request && req != &SRpnt->sr_request ) {
|
1008 |
|
|
memcpy(&SCpnt->request, req, sizeof(struct request));
|
1009 |
|
|
|
1010 |
|
|
/*
|
1011 |
|
|
* We have copied the data out of the request block -
|
1012 |
|
|
* it is now in a field in SCpnt. Release the request
|
1013 |
|
|
* block.
|
1014 |
|
|
*/
|
1015 |
|
|
blkdev_release_request(req);
|
1016 |
|
|
}
|
1017 |
|
|
/*
|
1018 |
|
|
* Now it is finally safe to release the lock. We are
|
1019 |
|
|
* not going to noodle the request list until this
|
1020 |
|
|
* request has been queued and we loop back to queue
|
1021 |
|
|
* another.
|
1022 |
|
|
*/
|
1023 |
|
|
req = NULL;
|
1024 |
|
|
spin_unlock_irq(&io_request_lock);
|
1025 |
|
|
|
1026 |
|
|
if (SCpnt->request.cmd != SPECIAL) {
|
1027 |
|
|
/*
|
1028 |
|
|
* This will do a couple of things:
|
1029 |
|
|
* 1) Fill in the actual SCSI command.
|
1030 |
|
|
* 2) Fill in any other upper-level specific fields
|
1031 |
|
|
* (timeout).
|
1032 |
|
|
*
|
1033 |
|
|
* If this returns 0, it means that the request failed
|
1034 |
|
|
* (reading past end of disk, reading offline device,
|
1035 |
|
|
* etc). This won't actually talk to the device, but
|
1036 |
|
|
* some kinds of consistency checking may cause the
|
1037 |
|
|
* request to be rejected immediately.
|
1038 |
|
|
*/
|
1039 |
|
|
if (STpnt == NULL) {
|
1040 |
|
|
STpnt = scsi_get_request_dev(req);
|
1041 |
|
|
}
|
1042 |
|
|
/*
|
1043 |
|
|
* This sets up the scatter-gather table (allocating if
|
1044 |
|
|
* required). Hosts that need bounce buffers will also
|
1045 |
|
|
* get those allocated here.
|
1046 |
|
|
*/
|
1047 |
|
|
if (!SDpnt->scsi_init_io_fn(SCpnt)) {
|
1048 |
|
|
/*
|
1049 |
|
|
* probably we ran out of sgtable memory, or
|
1050 |
|
|
* __init_io() wanted to revert to a single
|
1051 |
|
|
* segment request. this would require bouncing
|
1052 |
|
|
* on highmem i/o, so mark the device as
|
1053 |
|
|
* starved and continue later instead
|
1054 |
|
|
*/
|
1055 |
|
|
spin_lock_irq(&io_request_lock);
|
1056 |
|
|
SHpnt->host_busy--;
|
1057 |
|
|
SDpnt->device_busy--;
|
1058 |
|
|
if (SDpnt->device_busy == 0) {
|
1059 |
|
|
SDpnt->starved = 1;
|
1060 |
|
|
SHpnt->some_device_starved = 1;
|
1061 |
|
|
}
|
1062 |
|
|
SCpnt->request.special = SCpnt;
|
1063 |
|
|
list_add(&SCpnt->request.queue, &q->queue_head);
|
1064 |
|
|
break;
|
1065 |
|
|
}
|
1066 |
|
|
|
1067 |
|
|
/*
|
1068 |
|
|
* Initialize the actual SCSI command for this request.
|
1069 |
|
|
*/
|
1070 |
|
|
if (!STpnt->init_command(SCpnt)) {
|
1071 |
|
|
scsi_release_buffers(SCpnt);
|
1072 |
|
|
SCpnt = __scsi_end_request(SCpnt, 0,
|
1073 |
|
|
SCpnt->request.nr_sectors, 0, 0);
|
1074 |
|
|
if( SCpnt != NULL )
|
1075 |
|
|
{
|
1076 |
|
|
panic("Should not have leftover blocks\n");
|
1077 |
|
|
}
|
1078 |
|
|
spin_lock_irq(&io_request_lock);
|
1079 |
|
|
SHpnt->host_busy--;
|
1080 |
|
|
SDpnt->device_busy--;
|
1081 |
|
|
continue;
|
1082 |
|
|
}
|
1083 |
|
|
}
|
1084 |
|
|
/*
|
1085 |
|
|
* Finally, initialize any error handling parameters, and set up
|
1086 |
|
|
* the timers for timeouts.
|
1087 |
|
|
*/
|
1088 |
|
|
scsi_init_cmd_errh(SCpnt);
|
1089 |
|
|
|
1090 |
|
|
/*
|
1091 |
|
|
* Dispatch the command to the low-level driver.
|
1092 |
|
|
*/
|
1093 |
|
|
scsi_dispatch_cmd(SCpnt);
|
1094 |
|
|
|
1095 |
|
|
/*
|
1096 |
|
|
* Now we need to grab the lock again. We are about to mess
|
1097 |
|
|
* with the request queue and try to find another command.
|
1098 |
|
|
*/
|
1099 |
|
|
spin_lock_irq(&io_request_lock);
|
1100 |
|
|
}
|
1101 |
|
|
}
|
1102 |
|
|
|
1103 |
|
|
/*
|
1104 |
|
|
* Function: scsi_block_requests()
|
1105 |
|
|
*
|
1106 |
|
|
* Purpose: Utility function used by low-level drivers to prevent further
|
1107 |
|
|
* commands from being queued to the device.
|
1108 |
|
|
*
|
1109 |
|
|
* Arguments: SHpnt - Host in question
|
1110 |
|
|
*
|
1111 |
|
|
* Returns: Nothing
|
1112 |
|
|
*
|
1113 |
|
|
* Lock status: No locks are assumed held.
|
1114 |
|
|
*
|
1115 |
|
|
* Notes: There is no timer nor any other means by which the requests
|
1116 |
|
|
* get unblocked other than the low-level driver calling
|
1117 |
|
|
* scsi_unblock_requests().
|
1118 |
|
|
*/
|
1119 |
|
|
void scsi_block_requests(struct Scsi_Host * SHpnt)
|
1120 |
|
|
{
|
1121 |
|
|
SHpnt->host_self_blocked = TRUE;
|
1122 |
|
|
}
|
1123 |
|
|
|
1124 |
|
|
/*
|
1125 |
|
|
* Function: scsi_unblock_requests()
|
1126 |
|
|
*
|
1127 |
|
|
* Purpose: Utility function used by low-level drivers to allow further
|
1128 |
|
|
* commands from being queued to the device.
|
1129 |
|
|
*
|
1130 |
|
|
* Arguments: SHpnt - Host in question
|
1131 |
|
|
*
|
1132 |
|
|
* Returns: Nothing
|
1133 |
|
|
*
|
1134 |
|
|
* Lock status: No locks are assumed held.
|
1135 |
|
|
*
|
1136 |
|
|
* Notes: There is no timer nor any other means by which the requests
|
1137 |
|
|
* get unblocked other than the low-level driver calling
|
1138 |
|
|
* scsi_unblock_requests().
|
1139 |
|
|
*
|
1140 |
|
|
* This is done as an API function so that changes to the
|
1141 |
|
|
* internals of the scsi mid-layer won't require wholesale
|
1142 |
|
|
* changes to drivers that use this feature.
|
1143 |
|
|
*/
|
1144 |
|
|
void scsi_unblock_requests(struct Scsi_Host * SHpnt)
|
1145 |
|
|
{
|
1146 |
|
|
Scsi_Device *SDloop;
|
1147 |
|
|
|
1148 |
|
|
SHpnt->host_self_blocked = FALSE;
|
1149 |
|
|
/* Now that we are unblocked, try to start the queues. */
|
1150 |
|
|
for (SDloop = SHpnt->host_queue; SDloop; SDloop = SDloop->next)
|
1151 |
|
|
scsi_queue_next_request(&SDloop->request_queue, NULL);
|
1152 |
|
|
}
|
1153 |
|
|
|
1154 |
|
|
/*
|
1155 |
|
|
* Function: scsi_report_bus_reset()
|
1156 |
|
|
*
|
1157 |
|
|
* Purpose: Utility function used by low-level drivers to report that
|
1158 |
|
|
* they have observed a bus reset on the bus being handled.
|
1159 |
|
|
*
|
1160 |
|
|
* Arguments: SHpnt - Host in question
|
1161 |
|
|
* channel - channel on which reset was observed.
|
1162 |
|
|
*
|
1163 |
|
|
* Returns: Nothing
|
1164 |
|
|
*
|
1165 |
|
|
* Lock status: No locks are assumed held.
|
1166 |
|
|
*
|
1167 |
|
|
* Notes: This only needs to be called if the reset is one which
|
1168 |
|
|
* originates from an unknown location. Resets originated
|
1169 |
|
|
* by the mid-level itself don't need to call this, but there
|
1170 |
|
|
* should be no harm.
|
1171 |
|
|
*
|
1172 |
|
|
* The main purpose of this is to make sure that a CHECK_CONDITION
|
1173 |
|
|
* is properly treated.
|
1174 |
|
|
*/
|
1175 |
|
|
void scsi_report_bus_reset(struct Scsi_Host * SHpnt, int channel)
|
1176 |
|
|
{
|
1177 |
|
|
Scsi_Device *SDloop;
|
1178 |
|
|
for (SDloop = SHpnt->host_queue; SDloop; SDloop = SDloop->next) {
|
1179 |
|
|
if (channel == SDloop->channel) {
|
1180 |
|
|
SDloop->was_reset = 1;
|
1181 |
|
|
SDloop->expecting_cc_ua = 1;
|
1182 |
|
|
}
|
1183 |
|
|
}
|
1184 |
|
|
}
|
1185 |
|
|
|
1186 |
|
|
/*
|
1187 |
|
|
* FIXME(eric) - these are empty stubs for the moment. I need to re-implement
|
1188 |
|
|
* host blocking from scratch. The theory is that hosts that wish to block
|
1189 |
|
|
* will register/deregister using these functions instead of the old way
|
1190 |
|
|
* of setting the wish_block flag.
|
1191 |
|
|
*
|
1192 |
|
|
* The details of the implementation remain to be settled, however the
|
1193 |
|
|
* stubs are here now so that the actual drivers will properly compile.
|
1194 |
|
|
*/
|
1195 |
|
|
void scsi_register_blocked_host(struct Scsi_Host * SHpnt)
|
1196 |
|
|
{
|
1197 |
|
|
}
|
1198 |
|
|
|
1199 |
|
|
void scsi_deregister_blocked_host(struct Scsi_Host * SHpnt)
|
1200 |
|
|
{
|
1201 |
|
|
}
|