1 |
1275 |
phoenix |
/*
|
2 |
|
|
* History:
|
3 |
|
|
* Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
|
4 |
|
|
* to allow user process control of SCSI devices.
|
5 |
|
|
* Development Sponsored by Killy Corp. NY NY
|
6 |
|
|
*
|
7 |
|
|
* Original driver (sg.c):
|
8 |
|
|
* Copyright (C) 1992 Lawrence Foard
|
9 |
|
|
* Version 2 and 3 extensions to driver:
|
10 |
|
|
* Copyright (C) 1998 - 2003 Douglas Gilbert
|
11 |
|
|
*
|
12 |
|
|
* Modified 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
|
13 |
|
|
*
|
14 |
|
|
* This program is free software; you can redistribute it and/or modify
|
15 |
|
|
* it under the terms of the GNU General Public License as published by
|
16 |
|
|
* the Free Software Foundation; either version 2, or (at your option)
|
17 |
|
|
* any later version.
|
18 |
|
|
*
|
19 |
|
|
*/
|
20 |
|
|
#include <linux/config.h>
|
21 |
|
|
#ifdef CONFIG_PROC_FS
|
22 |
|
|
static char * sg_version_str = "Version: 3.1.25 (20030529)";
|
23 |
|
|
#endif
|
24 |
|
|
static int sg_version_num = 30125; /* 2 digits for each component */
|
25 |
|
|
/*
|
26 |
|
|
* D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
|
27 |
|
|
* - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
|
28 |
|
|
* the kernel/module needs to be built with CONFIG_SCSI_LOGGING
|
29 |
|
|
* (otherwise the macros compile to empty statements).
|
30 |
|
|
* Then before running the program to be debugged enter:
|
31 |
|
|
* # echo "scsi log timeout 7" > /proc/scsi/scsi
|
32 |
|
|
* This will send copious output to the console and the log which
|
33 |
|
|
* is usually /var/log/messages. To turn off debugging enter:
|
34 |
|
|
* # echo "scsi log timeout 0" > /proc/scsi/scsi
|
35 |
|
|
* The 'timeout' token was chosen because it is relatively unused.
|
36 |
|
|
* The token 'hlcomplete' should be used but that triggers too
|
37 |
|
|
* much output from the sd device driver. To dump the current
|
38 |
|
|
* state of the SCSI mid level data structures enter:
|
39 |
|
|
* # echo "scsi dump 1" > /proc/scsi/scsi
|
40 |
|
|
* To dump the state of sg's data structures use:
|
41 |
|
|
* # cat /proc/scsi/sg/debug
|
42 |
|
|
*
|
43 |
|
|
*/
|
44 |
|
|
#include <linux/module.h>
|
45 |
|
|
|
46 |
|
|
#include <linux/fs.h>
|
47 |
|
|
#include <linux/kernel.h>
|
48 |
|
|
#include <linux/sched.h>
|
49 |
|
|
#include <linux/string.h>
|
50 |
|
|
#include <linux/mm.h>
|
51 |
|
|
#include <linux/errno.h>
|
52 |
|
|
#include <linux/mtio.h>
|
53 |
|
|
#include <linux/ioctl.h>
|
54 |
|
|
#include <linux/fcntl.h>
|
55 |
|
|
#include <linux/init.h>
|
56 |
|
|
#include <linux/poll.h>
|
57 |
|
|
#include <linux/smp_lock.h>
|
58 |
|
|
|
59 |
|
|
#include <asm/io.h>
|
60 |
|
|
#include <asm/uaccess.h>
|
61 |
|
|
#include <asm/system.h>
|
62 |
|
|
|
63 |
|
|
#include <linux/blk.h>
|
64 |
|
|
#include "scsi.h"
|
65 |
|
|
#include "hosts.h"
|
66 |
|
|
#include <scsi/scsi_ioctl.h>
|
67 |
|
|
#include <scsi/sg.h>
|
68 |
|
|
|
69 |
|
|
#ifdef CONFIG_PROC_FS
|
70 |
|
|
#include <linux/proc_fs.h>
|
71 |
|
|
static int sg_proc_init(void);
|
72 |
|
|
static void sg_proc_cleanup(void);
|
73 |
|
|
#endif
|
74 |
|
|
|
75 |
|
|
#ifndef LINUX_VERSION_CODE
|
76 |
|
|
#include <linux/version.h>
|
77 |
|
|
#endif /* LINUX_VERSION_CODE */
|
78 |
|
|
|
79 |
|
|
#define SG_ALLOW_DIO_DEF 0
|
80 |
|
|
#define SG_ALLOW_DIO_CODE /* compile out be commenting this define */
|
81 |
|
|
#ifdef SG_ALLOW_DIO_CODE
|
82 |
|
|
#include <linux/iobuf.h>
|
83 |
|
|
#endif
|
84 |
|
|
|
85 |
|
|
#define SG_NEW_KIOVEC 0 /* use alloc_kiovec(), not alloc_kiovec_sz() */
|
86 |
|
|
|
87 |
|
|
int sg_big_buff = SG_DEF_RESERVED_SIZE;
|
88 |
|
|
/* N.B. This variable is readable and writeable via
|
89 |
|
|
/proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
|
90 |
|
|
of this size (or less if there is not enough memory) will be reserved
|
91 |
|
|
for use by this file descriptor. [Deprecated usage: this variable is also
|
92 |
|
|
readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
|
93 |
|
|
the kernel (i.e. it is not a module).] */
|
94 |
|
|
static int def_reserved_size = -1; /* picks up init parameter */
|
95 |
|
|
static int sg_allow_dio = SG_ALLOW_DIO_DEF;
|
96 |
|
|
|
97 |
|
|
#define SG_SECTOR_SZ 512
|
98 |
|
|
#define SG_SECTOR_MSK (SG_SECTOR_SZ - 1)
|
99 |
|
|
|
100 |
|
|
#define SG_LOW_POOL_THRESHHOLD 30
|
101 |
|
|
#define SG_MAX_POOL_SECTORS 320 /* Max. number of pool sectors to take */
|
102 |
|
|
|
103 |
|
|
static int sg_pool_secs_avail = SG_MAX_POOL_SECTORS;
|
104 |
|
|
|
105 |
|
|
#define SG_HEAP_PAGE 1 /* heap from kernel via get_free_pages() */
|
106 |
|
|
#define SG_HEAP_KMAL 2 /* heap from kernel via kmalloc() */
|
107 |
|
|
#define SG_HEAP_POOL 3 /* heap from scsi dma pool (mid-level) */
|
108 |
|
|
#define SG_USER_MEM 4 /* memory belongs to user space */
|
109 |
|
|
|
110 |
|
|
#define SG_DEV_ARR_LUMP 6 /* amount to over allocate sg_dev_arr by */
|
111 |
|
|
|
112 |
|
|
|
113 |
|
|
static int sg_init(void);
|
114 |
|
|
static int sg_attach(Scsi_Device *);
|
115 |
|
|
static void sg_finish(void);
|
116 |
|
|
static int sg_detect(Scsi_Device *);
|
117 |
|
|
static void sg_detach(Scsi_Device *);
|
118 |
|
|
|
119 |
|
|
static Scsi_Request * dummy_cmdp; /* only used for sizeof */
|
120 |
|
|
|
121 |
|
|
static rwlock_t sg_dev_arr_lock = RW_LOCK_UNLOCKED; /* Also used to lock
|
122 |
|
|
file descriptor list for device */
|
123 |
|
|
|
124 |
|
|
static struct Scsi_Device_Template sg_template =
|
125 |
|
|
{
|
126 |
|
|
tag:"sg",
|
127 |
|
|
scsi_type:0xff,
|
128 |
|
|
major:SCSI_GENERIC_MAJOR,
|
129 |
|
|
detect:sg_detect,
|
130 |
|
|
init:sg_init,
|
131 |
|
|
finish:sg_finish,
|
132 |
|
|
attach:sg_attach,
|
133 |
|
|
detach:sg_detach
|
134 |
|
|
};
|
135 |
|
|
|
136 |
|
|
|
137 |
|
|
typedef struct sg_scatter_hold /* holding area for scsi scatter gather info */
|
138 |
|
|
{
|
139 |
|
|
unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
|
140 |
|
|
unsigned short sglist_len; /* size of malloc'd scatter-gather list ++ */
|
141 |
|
|
unsigned bufflen; /* Size of (aggregate) data buffer */
|
142 |
|
|
unsigned b_malloc_len; /* actual len malloc'ed in buffer */
|
143 |
|
|
void * buffer; /* Data buffer or scatter list + mem_src_arr */
|
144 |
|
|
struct kiobuf * kiobp; /* for direct IO information */
|
145 |
|
|
char mapped; /* indicates kiobp has locked pages */
|
146 |
|
|
char buffer_mem_src; /* heap whereabouts of 'buffer' */
|
147 |
|
|
unsigned char cmd_opcode; /* first byte of command */
|
148 |
|
|
} Sg_scatter_hold; /* 24 bytes long on i386 */
|
149 |
|
|
|
150 |
|
|
struct sg_device; /* forward declarations */
|
151 |
|
|
struct sg_fd;
|
152 |
|
|
|
153 |
|
|
typedef struct sg_request /* SG_MAX_QUEUE requests outstanding per file */
|
154 |
|
|
{
|
155 |
|
|
Scsi_Request * my_cmdp; /* != 0 when request with lower levels */
|
156 |
|
|
struct sg_request * nextrp; /* NULL -> tail request (slist) */
|
157 |
|
|
struct sg_fd * parentfp; /* NULL -> not in use */
|
158 |
|
|
Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
|
159 |
|
|
sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
|
160 |
|
|
unsigned char sense_b[sizeof(dummy_cmdp->sr_sense_buffer)];
|
161 |
|
|
char res_used; /* 1 -> using reserve buffer, 0 -> not ... */
|
162 |
|
|
char orphan; /* 1 -> drop on sight, 0 -> normal */
|
163 |
|
|
char sg_io_owned; /* 1 -> packet belongs to SG_IO */
|
164 |
|
|
volatile char done; /* 0->before bh, 1->before read, 2->read */
|
165 |
|
|
} Sg_request; /* 168 bytes long on i386 */
|
166 |
|
|
|
167 |
|
|
typedef struct sg_fd /* holds the state of a file descriptor */
|
168 |
|
|
{
|
169 |
|
|
struct sg_fd * nextfp; /* NULL when last opened fd on this device */
|
170 |
|
|
struct sg_device * parentdp; /* owning device */
|
171 |
|
|
wait_queue_head_t read_wait; /* queue read until command done */
|
172 |
|
|
rwlock_t rq_list_lock; /* protect access to list in req_arr */
|
173 |
|
|
int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
|
174 |
|
|
Sg_scatter_hold reserve; /* buffer held for this file descriptor */
|
175 |
|
|
unsigned save_scat_len; /* original length of trunc. scat. element */
|
176 |
|
|
Sg_request * headrp; /* head of request slist, NULL->empty */
|
177 |
|
|
struct fasync_struct * async_qp; /* used by asynchronous notification */
|
178 |
|
|
Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
|
179 |
|
|
char low_dma; /* as in parent but possibly overridden to 1 */
|
180 |
|
|
char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
|
181 |
|
|
volatile char closed; /* 1 -> fd closed but request(s) outstanding */
|
182 |
|
|
char fd_mem_src; /* heap whereabouts of this Sg_fd object */
|
183 |
|
|
char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
|
184 |
|
|
char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */
|
185 |
|
|
char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
|
186 |
|
|
char mmap_called; /* 0 -> mmap() never called on this fd */
|
187 |
|
|
} Sg_fd; /* 2760 bytes long on i386 */
|
188 |
|
|
|
189 |
|
|
typedef struct sg_device /* holds the state of each scsi generic device */
|
190 |
|
|
{
|
191 |
|
|
Scsi_Device * device;
|
192 |
|
|
wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
|
193 |
|
|
int sg_tablesize; /* adapter's max scatter-gather table size */
|
194 |
|
|
Sg_fd * headfp; /* first open fd belonging to this device */
|
195 |
|
|
devfs_handle_t de;
|
196 |
|
|
kdev_t i_rdev; /* holds device major+minor number */
|
197 |
|
|
volatile char detached; /* 0->attached, 1->detached pending removal */
|
198 |
|
|
volatile char exclude; /* opened for exclusive access */
|
199 |
|
|
char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
|
200 |
|
|
} Sg_device; /* 36 bytes long on i386 */
|
201 |
|
|
|
202 |
|
|
|
203 |
|
|
static int sg_fasync(int fd, struct file * filp, int mode);
|
204 |
|
|
static void sg_cmd_done_bh(Scsi_Cmnd * SCpnt);
|
205 |
|
|
static int sg_start_req(Sg_request * srp);
|
206 |
|
|
static void sg_finish_rem_req(Sg_request * srp);
|
207 |
|
|
static int sg_build_indi(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
|
208 |
|
|
static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
|
209 |
|
|
int tablesize);
|
210 |
|
|
static ssize_t sg_new_read(Sg_fd * sfp, char * buf, size_t count,
|
211 |
|
|
Sg_request * srp);
|
212 |
|
|
static ssize_t sg_new_write(Sg_fd * sfp, const char * buf, size_t count,
|
213 |
|
|
int blocking, int read_only, Sg_request ** o_srp);
|
214 |
|
|
static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
|
215 |
|
|
unsigned char * cmnd, int timeout, int blocking);
|
216 |
|
|
static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
|
217 |
|
|
int wr_xf, int * countp, unsigned char ** up);
|
218 |
|
|
static int sg_write_xfer(Sg_request * srp);
|
219 |
|
|
static int sg_read_xfer(Sg_request * srp);
|
220 |
|
|
static void sg_read_oxfer(Sg_request * srp, char * outp, int num_read_xfer);
|
221 |
|
|
static void sg_remove_scat(Sg_scatter_hold * schp);
|
222 |
|
|
static char * sg_get_sgat_msa(Sg_scatter_hold * schp);
|
223 |
|
|
static void sg_build_reserve(Sg_fd * sfp, int req_size);
|
224 |
|
|
static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
|
225 |
|
|
static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
|
226 |
|
|
static char * sg_malloc(const Sg_fd * sfp, int size, int * retSzp,
|
227 |
|
|
int * mem_srcp);
|
228 |
|
|
static void sg_free(char * buff, int size, int mem_src);
|
229 |
|
|
static char * sg_low_malloc(int rqSz, int lowDma, int mem_src,
|
230 |
|
|
int * retSzp);
|
231 |
|
|
static void sg_low_free(char * buff, int size, int mem_src);
|
232 |
|
|
static Sg_fd * sg_add_sfp(Sg_device * sdp, int dev);
|
233 |
|
|
static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
|
234 |
|
|
static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
|
235 |
|
|
static Sg_request * sg_get_rq_mark(Sg_fd * sfp, int pack_id);
|
236 |
|
|
static Sg_request * sg_add_request(Sg_fd * sfp);
|
237 |
|
|
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
|
238 |
|
|
static int sg_res_in_use(Sg_fd * sfp);
|
239 |
|
|
static int sg_ms_to_jif(unsigned int msecs);
|
240 |
|
|
static inline unsigned sg_jif_to_ms(int jifs);
|
241 |
|
|
static int sg_allow_access(unsigned char opcode, char dev_type);
|
242 |
|
|
static int sg_build_dir(Sg_request * srp, Sg_fd * sfp, int dxfer_len);
|
243 |
|
|
static void sg_unmap_and(Sg_scatter_hold * schp, int free_also);
|
244 |
|
|
static Sg_device * sg_get_dev(int dev);
|
245 |
|
|
static inline int sg_alloc_kiovec(int nr, struct kiobuf **bufp, int *szp);
|
246 |
|
|
static inline void sg_free_kiovec(int nr, struct kiobuf **bufp, int *szp);
|
247 |
|
|
#ifdef CONFIG_PROC_FS
|
248 |
|
|
static int sg_last_dev(void);
|
249 |
|
|
#endif
|
250 |
|
|
|
251 |
|
|
static Sg_device ** sg_dev_arr = NULL;
|
252 |
|
|
|
253 |
|
|
#define SZ_SG_HEADER sizeof(struct sg_header)
|
254 |
|
|
#define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
|
255 |
|
|
#define SZ_SG_IOVEC sizeof(sg_iovec_t)
|
256 |
|
|
#define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
|
257 |
|
|
|
258 |
|
|
|
259 |
|
|
static int sg_open(struct inode * inode, struct file * filp)
|
260 |
|
|
{
|
261 |
|
|
int dev = MINOR(inode->i_rdev);
|
262 |
|
|
int flags = filp->f_flags;
|
263 |
|
|
Sg_device * sdp;
|
264 |
|
|
Sg_fd * sfp;
|
265 |
|
|
int res;
|
266 |
|
|
int retval = -EBUSY;
|
267 |
|
|
|
268 |
|
|
SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
|
269 |
|
|
sdp = sg_get_dev(dev);
|
270 |
|
|
if ((! sdp) || (! sdp->device))
|
271 |
|
|
return -ENXIO;
|
272 |
|
|
if (sdp->detached)
|
273 |
|
|
return -ENODEV;
|
274 |
|
|
|
275 |
|
|
/* This driver's module count bumped by fops_get in <linux/fs.h> */
|
276 |
|
|
/* Prevent the device driver from vanishing while we sleep */
|
277 |
|
|
if (sdp->device->host->hostt->module)
|
278 |
|
|
__MOD_INC_USE_COUNT(sdp->device->host->hostt->module);
|
279 |
|
|
sdp->device->access_count++;
|
280 |
|
|
|
281 |
|
|
if (! ((flags & O_NONBLOCK) ||
|
282 |
|
|
scsi_block_when_processing_errors(sdp->device))) {
|
283 |
|
|
retval = -ENXIO;
|
284 |
|
|
/* we are in error recovery for this device */
|
285 |
|
|
goto error_out;
|
286 |
|
|
}
|
287 |
|
|
|
288 |
|
|
if (flags & O_EXCL) {
|
289 |
|
|
if (O_RDONLY == (flags & O_ACCMODE)) {
|
290 |
|
|
retval = -EPERM; /* Can't lock it with read only access */
|
291 |
|
|
goto error_out;
|
292 |
|
|
}
|
293 |
|
|
if (sdp->headfp && (flags & O_NONBLOCK))
|
294 |
|
|
goto error_out;
|
295 |
|
|
res = 0;
|
296 |
|
|
__wait_event_interruptible(sdp->o_excl_wait,
|
297 |
|
|
((sdp->headfp || sdp->exclude) ? 0 : (sdp->exclude = 1)),
|
298 |
|
|
res);
|
299 |
|
|
if (res) {
|
300 |
|
|
retval = res; /* -ERESTARTSYS because signal hit process */
|
301 |
|
|
goto error_out;
|
302 |
|
|
}
|
303 |
|
|
}
|
304 |
|
|
else if (sdp->exclude) { /* some other fd has an exclusive lock on dev */
|
305 |
|
|
if (flags & O_NONBLOCK)
|
306 |
|
|
goto error_out;
|
307 |
|
|
res = 0;
|
308 |
|
|
__wait_event_interruptible(sdp->o_excl_wait, (! sdp->exclude), res);
|
309 |
|
|
if (res) {
|
310 |
|
|
retval = res; /* -ERESTARTSYS because signal hit process */
|
311 |
|
|
goto error_out;
|
312 |
|
|
}
|
313 |
|
|
}
|
314 |
|
|
if (sdp->detached) {
|
315 |
|
|
retval = -ENODEV;
|
316 |
|
|
goto error_out;
|
317 |
|
|
}
|
318 |
|
|
if (! sdp->headfp) { /* no existing opens on this device */
|
319 |
|
|
sdp->sgdebug = 0;
|
320 |
|
|
sdp->sg_tablesize = sdp->device->host->sg_tablesize;
|
321 |
|
|
}
|
322 |
|
|
if ((sfp = sg_add_sfp(sdp, dev)))
|
323 |
|
|
filp->private_data = sfp;
|
324 |
|
|
else {
|
325 |
|
|
if (flags & O_EXCL) sdp->exclude = 0; /* undo if error */
|
326 |
|
|
retval = -ENOMEM;
|
327 |
|
|
goto error_out;
|
328 |
|
|
}
|
329 |
|
|
return 0;
|
330 |
|
|
|
331 |
|
|
error_out:
|
332 |
|
|
sdp->device->access_count--;
|
333 |
|
|
if ((! sdp->detached) && sdp->device->host->hostt->module)
|
334 |
|
|
__MOD_DEC_USE_COUNT(sdp->device->host->hostt->module);
|
335 |
|
|
return retval;
|
336 |
|
|
}
|
337 |
|
|
|
338 |
|
|
/* Following function was formerly called 'sg_close' */
|
339 |
|
|
static int sg_release(struct inode * inode, struct file * filp)
|
340 |
|
|
{
|
341 |
|
|
Sg_device * sdp;
|
342 |
|
|
Sg_fd * sfp;
|
343 |
|
|
|
344 |
|
|
lock_kernel();
|
345 |
|
|
if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp))) {
|
346 |
|
|
unlock_kernel();
|
347 |
|
|
return -ENXIO;
|
348 |
|
|
}
|
349 |
|
|
SCSI_LOG_TIMEOUT(3, printk("sg_release: dev=%d\n", MINOR(sdp->i_rdev)));
|
350 |
|
|
sg_fasync(-1, filp, 0); /* remove filp from async notification list */
|
351 |
|
|
if (0 == sg_remove_sfp(sdp, sfp)) { /* Returns 1 when sdp gone */
|
352 |
|
|
if (! sdp->detached) {
|
353 |
|
|
sdp->device->access_count--;
|
354 |
|
|
if (sdp->device->host->hostt->module)
|
355 |
|
|
__MOD_DEC_USE_COUNT(sdp->device->host->hostt->module);
|
356 |
|
|
}
|
357 |
|
|
sdp->exclude = 0;
|
358 |
|
|
wake_up_interruptible(&sdp->o_excl_wait);
|
359 |
|
|
}
|
360 |
|
|
unlock_kernel();
|
361 |
|
|
return 0;
|
362 |
|
|
}
|
363 |
|
|
|
364 |
|
|
static ssize_t sg_read(struct file * filp, char * buf,
|
365 |
|
|
size_t count, loff_t *ppos)
|
366 |
|
|
{
|
367 |
|
|
int k, res;
|
368 |
|
|
Sg_device * sdp;
|
369 |
|
|
Sg_fd * sfp;
|
370 |
|
|
Sg_request * srp;
|
371 |
|
|
int req_pack_id = -1;
|
372 |
|
|
struct sg_header old_hdr;
|
373 |
|
|
sg_io_hdr_t new_hdr;
|
374 |
|
|
sg_io_hdr_t * hp;
|
375 |
|
|
|
376 |
|
|
if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp)))
|
377 |
|
|
return -ENXIO;
|
378 |
|
|
SCSI_LOG_TIMEOUT(3, printk("sg_read: dev=%d, count=%d\n",
|
379 |
|
|
MINOR(sdp->i_rdev), (int)count));
|
380 |
|
|
if (ppos != &filp->f_pos)
|
381 |
|
|
; /* FIXME: Hmm. Seek to the right place, or fail? */
|
382 |
|
|
if ((k = verify_area(VERIFY_WRITE, buf, count)))
|
383 |
|
|
return k;
|
384 |
|
|
if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
|
385 |
|
|
__copy_from_user(&old_hdr, buf, SZ_SG_HEADER);
|
386 |
|
|
if (old_hdr.reply_len < 0) {
|
387 |
|
|
if (count >= SZ_SG_IO_HDR) {
|
388 |
|
|
__copy_from_user(&new_hdr, buf, SZ_SG_IO_HDR);
|
389 |
|
|
req_pack_id = new_hdr.pack_id;
|
390 |
|
|
}
|
391 |
|
|
}
|
392 |
|
|
else
|
393 |
|
|
req_pack_id = old_hdr.pack_id;
|
394 |
|
|
}
|
395 |
|
|
srp = sg_get_rq_mark(sfp, req_pack_id);
|
396 |
|
|
if (! srp) { /* now wait on packet to arrive */
|
397 |
|
|
if (sdp->detached)
|
398 |
|
|
return -ENODEV;
|
399 |
|
|
if (filp->f_flags & O_NONBLOCK)
|
400 |
|
|
return -EAGAIN;
|
401 |
|
|
while (1) {
|
402 |
|
|
res = 0; /* following is a macro that beats race condition */
|
403 |
|
|
__wait_event_interruptible(sfp->read_wait, (sdp->detached ||
|
404 |
|
|
(srp = sg_get_rq_mark(sfp, req_pack_id))), res);
|
405 |
|
|
if (sdp->detached)
|
406 |
|
|
return -ENODEV;
|
407 |
|
|
if (0 == res)
|
408 |
|
|
break;
|
409 |
|
|
return res; /* -ERESTARTSYS because signal hit process */
|
410 |
|
|
}
|
411 |
|
|
}
|
412 |
|
|
if (srp->header.interface_id != '\0')
|
413 |
|
|
return sg_new_read(sfp, buf, count, srp);
|
414 |
|
|
|
415 |
|
|
hp = &srp->header;
|
416 |
|
|
memset(&old_hdr, 0, SZ_SG_HEADER);
|
417 |
|
|
old_hdr.reply_len = (int)hp->timeout;
|
418 |
|
|
old_hdr.pack_len = old_hdr.reply_len; /* very old, strange behaviour */
|
419 |
|
|
old_hdr.pack_id = hp->pack_id;
|
420 |
|
|
old_hdr.twelve_byte =
|
421 |
|
|
((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
|
422 |
|
|
old_hdr.target_status = hp->masked_status;
|
423 |
|
|
old_hdr.host_status = hp->host_status;
|
424 |
|
|
old_hdr.driver_status = hp->driver_status;
|
425 |
|
|
if ((CHECK_CONDITION & hp->masked_status) ||
|
426 |
|
|
(DRIVER_SENSE & hp->driver_status))
|
427 |
|
|
memcpy(old_hdr.sense_buffer, srp->sense_b,
|
428 |
|
|
sizeof(old_hdr.sense_buffer));
|
429 |
|
|
switch (hp->host_status)
|
430 |
|
|
{ /* This setup of 'result' is for backward compatibility and is best
|
431 |
|
|
ignored by the user who should use target, host + driver status */
|
432 |
|
|
case DID_OK:
|
433 |
|
|
case DID_PASSTHROUGH:
|
434 |
|
|
case DID_SOFT_ERROR:
|
435 |
|
|
old_hdr.result = 0;
|
436 |
|
|
break;
|
437 |
|
|
case DID_NO_CONNECT:
|
438 |
|
|
case DID_BUS_BUSY:
|
439 |
|
|
case DID_TIME_OUT:
|
440 |
|
|
old_hdr.result = EBUSY;
|
441 |
|
|
break;
|
442 |
|
|
case DID_BAD_TARGET:
|
443 |
|
|
case DID_ABORT:
|
444 |
|
|
case DID_PARITY:
|
445 |
|
|
case DID_RESET:
|
446 |
|
|
case DID_BAD_INTR:
|
447 |
|
|
old_hdr.result = EIO;
|
448 |
|
|
break;
|
449 |
|
|
case DID_ERROR:
|
450 |
|
|
old_hdr.result =
|
451 |
|
|
(srp->sense_b[0] == 0 && hp->masked_status == GOOD) ? 0 : EIO;
|
452 |
|
|
break;
|
453 |
|
|
default:
|
454 |
|
|
old_hdr.result = EIO;
|
455 |
|
|
break;
|
456 |
|
|
}
|
457 |
|
|
|
458 |
|
|
/* Now copy the result back to the user buffer. */
|
459 |
|
|
if (count >= SZ_SG_HEADER) {
|
460 |
|
|
__copy_to_user(buf, &old_hdr, SZ_SG_HEADER);
|
461 |
|
|
buf += SZ_SG_HEADER;
|
462 |
|
|
if (count > old_hdr.reply_len)
|
463 |
|
|
count = old_hdr.reply_len;
|
464 |
|
|
if (count > SZ_SG_HEADER)
|
465 |
|
|
sg_read_oxfer(srp, buf, count - SZ_SG_HEADER);
|
466 |
|
|
}
|
467 |
|
|
else
|
468 |
|
|
count = (old_hdr.result == 0) ? 0 : -EIO;
|
469 |
|
|
sg_finish_rem_req(srp);
|
470 |
|
|
return count;
|
471 |
|
|
}
|
472 |
|
|
|
473 |
|
|
static ssize_t sg_new_read(Sg_fd * sfp, char * buf, size_t count,
|
474 |
|
|
Sg_request * srp)
|
475 |
|
|
{
|
476 |
|
|
sg_io_hdr_t * hp = &srp->header;
|
477 |
|
|
int err = 0;
|
478 |
|
|
int len;
|
479 |
|
|
|
480 |
|
|
if (count < SZ_SG_IO_HDR) {
|
481 |
|
|
err = -EINVAL;
|
482 |
|
|
goto err_out;
|
483 |
|
|
}
|
484 |
|
|
hp->sb_len_wr = 0;
|
485 |
|
|
if ((hp->mx_sb_len > 0) && hp->sbp) {
|
486 |
|
|
if ((CHECK_CONDITION & hp->masked_status) ||
|
487 |
|
|
(DRIVER_SENSE & hp->driver_status)) {
|
488 |
|
|
int sb_len = sizeof(dummy_cmdp->sr_sense_buffer);
|
489 |
|
|
sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
|
490 |
|
|
len = 8 + (int)srp->sense_b[7]; /* Additional sense length field */
|
491 |
|
|
len = (len > sb_len) ? sb_len : len;
|
492 |
|
|
if ((err = verify_area(VERIFY_WRITE, hp->sbp, len)))
|
493 |
|
|
goto err_out;
|
494 |
|
|
__copy_to_user(hp->sbp, srp->sense_b, len);
|
495 |
|
|
hp->sb_len_wr = len;
|
496 |
|
|
}
|
497 |
|
|
}
|
498 |
|
|
if (hp->masked_status || hp->host_status || hp->driver_status)
|
499 |
|
|
hp->info |= SG_INFO_CHECK;
|
500 |
|
|
copy_to_user(buf, hp, SZ_SG_IO_HDR);
|
501 |
|
|
err = sg_read_xfer(srp);
|
502 |
|
|
err_out:
|
503 |
|
|
sg_finish_rem_req(srp);
|
504 |
|
|
return (0 == err) ? count : err;
|
505 |
|
|
}
|
506 |
|
|
|
507 |
|
|
|
508 |
|
|
static ssize_t sg_write(struct file * filp, const char * buf,
|
509 |
|
|
size_t count, loff_t *ppos)
|
510 |
|
|
{
|
511 |
|
|
int mxsize, cmd_size, k;
|
512 |
|
|
int input_size, blocking;
|
513 |
|
|
unsigned char opcode;
|
514 |
|
|
Sg_device * sdp;
|
515 |
|
|
Sg_fd * sfp;
|
516 |
|
|
Sg_request * srp;
|
517 |
|
|
struct sg_header old_hdr;
|
518 |
|
|
sg_io_hdr_t * hp;
|
519 |
|
|
unsigned char cmnd[sizeof(dummy_cmdp->sr_cmnd)];
|
520 |
|
|
|
521 |
|
|
if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp)))
|
522 |
|
|
return -ENXIO;
|
523 |
|
|
SCSI_LOG_TIMEOUT(3, printk("sg_write: dev=%d, count=%d\n",
|
524 |
|
|
MINOR(sdp->i_rdev), (int)count));
|
525 |
|
|
if (sdp->detached)
|
526 |
|
|
return -ENODEV;
|
527 |
|
|
if (! ((filp->f_flags & O_NONBLOCK) ||
|
528 |
|
|
scsi_block_when_processing_errors(sdp->device)))
|
529 |
|
|
return -ENXIO;
|
530 |
|
|
if (ppos != &filp->f_pos)
|
531 |
|
|
; /* FIXME: Hmm. Seek to the right place, or fail? */
|
532 |
|
|
|
533 |
|
|
if ((k = verify_area(VERIFY_READ, buf, count)))
|
534 |
|
|
return k; /* protects following copy_from_user()s + get_user()s */
|
535 |
|
|
if (count < SZ_SG_HEADER)
|
536 |
|
|
return -EIO;
|
537 |
|
|
__copy_from_user(&old_hdr, buf, SZ_SG_HEADER);
|
538 |
|
|
blocking = !(filp->f_flags & O_NONBLOCK);
|
539 |
|
|
if (old_hdr.reply_len < 0)
|
540 |
|
|
return sg_new_write(sfp, buf, count, blocking, 0, NULL);
|
541 |
|
|
if (count < (SZ_SG_HEADER + 6))
|
542 |
|
|
return -EIO; /* The minimum scsi command length is 6 bytes. */
|
543 |
|
|
|
544 |
|
|
if (! (srp = sg_add_request(sfp))) {
|
545 |
|
|
SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n"));
|
546 |
|
|
return -EDOM;
|
547 |
|
|
}
|
548 |
|
|
buf += SZ_SG_HEADER;
|
549 |
|
|
__get_user(opcode, buf);
|
550 |
|
|
if (sfp->next_cmd_len > 0) {
|
551 |
|
|
if (sfp->next_cmd_len > MAX_COMMAND_SIZE) {
|
552 |
|
|
SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n"));
|
553 |
|
|
sfp->next_cmd_len = 0;
|
554 |
|
|
sg_remove_request(sfp, srp);
|
555 |
|
|
return -EIO;
|
556 |
|
|
}
|
557 |
|
|
cmd_size = sfp->next_cmd_len;
|
558 |
|
|
sfp->next_cmd_len = 0; /* reset so only this write() effected */
|
559 |
|
|
}
|
560 |
|
|
else {
|
561 |
|
|
cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */
|
562 |
|
|
if ((opcode >= 0xc0) && old_hdr.twelve_byte)
|
563 |
|
|
cmd_size = 12;
|
564 |
|
|
}
|
565 |
|
|
SCSI_LOG_TIMEOUT(4, printk("sg_write: scsi opcode=0x%02x, cmd_size=%d\n",
|
566 |
|
|
(int)opcode, cmd_size));
|
567 |
|
|
/* Determine buffer size. */
|
568 |
|
|
input_size = count - cmd_size;
|
569 |
|
|
mxsize = (input_size > old_hdr.reply_len) ? input_size :
|
570 |
|
|
old_hdr.reply_len;
|
571 |
|
|
mxsize -= SZ_SG_HEADER;
|
572 |
|
|
input_size -= SZ_SG_HEADER;
|
573 |
|
|
if (input_size < 0) {
|
574 |
|
|
sg_remove_request(sfp, srp);
|
575 |
|
|
return -EIO; /* User did not pass enough bytes for this command. */
|
576 |
|
|
}
|
577 |
|
|
hp = &srp->header;
|
578 |
|
|
hp->interface_id = '\0'; /* indicator of old interface tunnelled */
|
579 |
|
|
hp->cmd_len = (unsigned char)cmd_size;
|
580 |
|
|
hp->iovec_count = 0;
|
581 |
|
|
hp->mx_sb_len = 0;
|
582 |
|
|
if (input_size > 0)
|
583 |
|
|
hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
|
584 |
|
|
SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
|
585 |
|
|
else
|
586 |
|
|
hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV :
|
587 |
|
|
SG_DXFER_NONE;
|
588 |
|
|
hp->dxfer_len = mxsize;
|
589 |
|
|
hp->dxferp = (unsigned char *)buf + cmd_size;
|
590 |
|
|
hp->sbp = NULL;
|
591 |
|
|
hp->timeout = old_hdr.reply_len; /* structure abuse ... */
|
592 |
|
|
hp->flags = input_size; /* structure abuse ... */
|
593 |
|
|
hp->pack_id = old_hdr.pack_id;
|
594 |
|
|
hp->usr_ptr = NULL;
|
595 |
|
|
__copy_from_user(cmnd, buf, cmd_size);
|
596 |
|
|
k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
|
597 |
|
|
return (k < 0) ? k : count;
|
598 |
|
|
}
|
599 |
|
|
|
600 |
|
|
static ssize_t sg_new_write(Sg_fd * sfp, const char * buf, size_t count,
|
601 |
|
|
int blocking, int read_only, Sg_request ** o_srp)
|
602 |
|
|
{
|
603 |
|
|
int k;
|
604 |
|
|
Sg_request * srp;
|
605 |
|
|
sg_io_hdr_t * hp;
|
606 |
|
|
unsigned char cmnd[sizeof(dummy_cmdp->sr_cmnd)];
|
607 |
|
|
int timeout;
|
608 |
|
|
|
609 |
|
|
if (count < SZ_SG_IO_HDR)
|
610 |
|
|
return -EINVAL;
|
611 |
|
|
if ((k = verify_area(VERIFY_READ, buf, count)))
|
612 |
|
|
return k; /* protects following copy_from_user()s + get_user()s */
|
613 |
|
|
|
614 |
|
|
sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
|
615 |
|
|
if (! (srp = sg_add_request(sfp))) {
|
616 |
|
|
SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n"));
|
617 |
|
|
return -EDOM;
|
618 |
|
|
}
|
619 |
|
|
hp = &srp->header;
|
620 |
|
|
__copy_from_user(hp, buf, SZ_SG_IO_HDR);
|
621 |
|
|
if (hp->interface_id != 'S') {
|
622 |
|
|
sg_remove_request(sfp, srp);
|
623 |
|
|
return -ENOSYS;
|
624 |
|
|
}
|
625 |
|
|
if (hp->flags & SG_FLAG_MMAP_IO) {
|
626 |
|
|
if (hp->dxfer_len > sfp->reserve.bufflen) {
|
627 |
|
|
sg_remove_request(sfp, srp);
|
628 |
|
|
return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */
|
629 |
|
|
}
|
630 |
|
|
if (hp->flags & SG_FLAG_DIRECT_IO) {
|
631 |
|
|
sg_remove_request(sfp, srp);
|
632 |
|
|
return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
|
633 |
|
|
}
|
634 |
|
|
if (sg_res_in_use(sfp)) {
|
635 |
|
|
sg_remove_request(sfp, srp);
|
636 |
|
|
return -EBUSY; /* reserve buffer already being used */
|
637 |
|
|
}
|
638 |
|
|
}
|
639 |
|
|
timeout = sg_ms_to_jif(srp->header.timeout);
|
640 |
|
|
if ((! hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof(cmnd))) {
|
641 |
|
|
sg_remove_request(sfp, srp);
|
642 |
|
|
return -EMSGSIZE;
|
643 |
|
|
}
|
644 |
|
|
if ((k = verify_area(VERIFY_READ, hp->cmdp, hp->cmd_len))) {
|
645 |
|
|
sg_remove_request(sfp, srp);
|
646 |
|
|
return k; /* protects following copy_from_user()s + get_user()s */
|
647 |
|
|
}
|
648 |
|
|
__copy_from_user(cmnd, hp->cmdp, hp->cmd_len);
|
649 |
|
|
if (read_only &&
|
650 |
|
|
(! sg_allow_access(cmnd[0], sfp->parentdp->device->type))) {
|
651 |
|
|
sg_remove_request(sfp, srp);
|
652 |
|
|
return -EPERM;
|
653 |
|
|
}
|
654 |
|
|
k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
|
655 |
|
|
if (k < 0) return k;
|
656 |
|
|
if (o_srp) *o_srp = srp;
|
657 |
|
|
return count;
|
658 |
|
|
}
|
659 |
|
|
|
660 |
|
|
static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
|
661 |
|
|
unsigned char * cmnd, int timeout, int blocking)
|
662 |
|
|
{
|
663 |
|
|
int k;
|
664 |
|
|
Scsi_Request * SRpnt;
|
665 |
|
|
Sg_device * sdp = sfp->parentdp;
|
666 |
|
|
sg_io_hdr_t * hp = &srp->header;
|
667 |
|
|
request_queue_t * q;
|
668 |
|
|
|
669 |
|
|
srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */
|
670 |
|
|
hp->status = 0;
|
671 |
|
|
hp->masked_status = 0;
|
672 |
|
|
hp->msg_status = 0;
|
673 |
|
|
hp->info = 0;
|
674 |
|
|
hp->host_status = 0;
|
675 |
|
|
hp->driver_status = 0;
|
676 |
|
|
hp->resid = 0;
|
677 |
|
|
SCSI_LOG_TIMEOUT(4,
|
678 |
|
|
printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
|
679 |
|
|
(int)cmnd[0], (int)hp->cmd_len));
|
680 |
|
|
|
681 |
|
|
if ((k = sg_start_req(srp))) {
|
682 |
|
|
SCSI_LOG_TIMEOUT(1, printk("sg_write: start_req err=%d\n", k));
|
683 |
|
|
sg_finish_rem_req(srp);
|
684 |
|
|
return k; /* probably out of space --> ENOMEM */
|
685 |
|
|
}
|
686 |
|
|
if ((k = sg_write_xfer(srp))) {
|
687 |
|
|
SCSI_LOG_TIMEOUT(1, printk("sg_write: write_xfer, bad address\n"));
|
688 |
|
|
sg_finish_rem_req(srp);
|
689 |
|
|
return k;
|
690 |
|
|
}
|
691 |
|
|
if (sdp->detached) {
|
692 |
|
|
sg_finish_rem_req(srp);
|
693 |
|
|
return -ENODEV;
|
694 |
|
|
}
|
695 |
|
|
SRpnt = scsi_allocate_request(sdp->device);
|
696 |
|
|
if(SRpnt == NULL) {
|
697 |
|
|
SCSI_LOG_TIMEOUT(1, printk("sg_write: no mem\n"));
|
698 |
|
|
sg_finish_rem_req(srp);
|
699 |
|
|
return -ENOMEM;
|
700 |
|
|
}
|
701 |
|
|
|
702 |
|
|
srp->my_cmdp = SRpnt;
|
703 |
|
|
q = &SRpnt->sr_device->request_queue;
|
704 |
|
|
SRpnt->sr_request.rq_dev = sdp->i_rdev;
|
705 |
|
|
SRpnt->sr_request.rq_status = RQ_ACTIVE;
|
706 |
|
|
SRpnt->sr_sense_buffer[0] = 0;
|
707 |
|
|
SRpnt->sr_cmd_len = hp->cmd_len;
|
708 |
|
|
if (! (hp->flags & SG_FLAG_LUN_INHIBIT)) {
|
709 |
|
|
if (sdp->device->scsi_level <= SCSI_2)
|
710 |
|
|
cmnd[1] = (cmnd[1] & 0x1f) | (sdp->device->lun << 5);
|
711 |
|
|
}
|
712 |
|
|
SRpnt->sr_use_sg = srp->data.k_use_sg;
|
713 |
|
|
SRpnt->sr_sglist_len = srp->data.sglist_len;
|
714 |
|
|
SRpnt->sr_bufflen = srp->data.bufflen;
|
715 |
|
|
SRpnt->sr_underflow = 0;
|
716 |
|
|
SRpnt->sr_buffer = srp->data.buffer;
|
717 |
|
|
switch (hp->dxfer_direction) {
|
718 |
|
|
case SG_DXFER_TO_FROM_DEV:
|
719 |
|
|
case SG_DXFER_FROM_DEV:
|
720 |
|
|
SRpnt->sr_data_direction = SCSI_DATA_READ; break;
|
721 |
|
|
case SG_DXFER_TO_DEV:
|
722 |
|
|
SRpnt->sr_data_direction = SCSI_DATA_WRITE; break;
|
723 |
|
|
case SG_DXFER_UNKNOWN:
|
724 |
|
|
SRpnt->sr_data_direction = SCSI_DATA_UNKNOWN; break;
|
725 |
|
|
default:
|
726 |
|
|
SRpnt->sr_data_direction = SCSI_DATA_NONE; break;
|
727 |
|
|
}
|
728 |
|
|
srp->data.k_use_sg = 0;
|
729 |
|
|
srp->data.sglist_len = 0;
|
730 |
|
|
srp->data.bufflen = 0;
|
731 |
|
|
srp->data.buffer = NULL;
|
732 |
|
|
hp->duration = jiffies; /* unit jiffies now, millisecs after done */
|
733 |
|
|
/* Now send everything of to mid-level. The next time we hear about this
|
734 |
|
|
packet is when sg_cmd_done_bh() is called (i.e. a callback). */
|
735 |
|
|
scsi_do_req(SRpnt, (void *)cmnd,
|
736 |
|
|
(void *)SRpnt->sr_buffer, hp->dxfer_len,
|
737 |
|
|
sg_cmd_done_bh, timeout, SG_DEFAULT_RETRIES);
|
738 |
|
|
/* dxfer_len overwrites SRpnt->sr_bufflen, hence need for b_malloc_len */
|
739 |
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,1)
|
740 |
|
|
generic_unplug_device(q);
|
741 |
|
|
#endif
|
742 |
|
|
return 0;
|
743 |
|
|
}
|
744 |
|
|
|
745 |
|
|
static int sg_ioctl(struct inode * inode, struct file * filp,
|
746 |
|
|
unsigned int cmd_in, unsigned long arg)
|
747 |
|
|
{
|
748 |
|
|
int result, val, read_only;
|
749 |
|
|
Sg_device * sdp;
|
750 |
|
|
Sg_fd * sfp;
|
751 |
|
|
Sg_request * srp;
|
752 |
|
|
unsigned long iflags;
|
753 |
|
|
|
754 |
|
|
if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp)))
|
755 |
|
|
return -ENXIO;
|
756 |
|
|
SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: dev=%d, cmd=0x%x\n",
|
757 |
|
|
MINOR(sdp->i_rdev), (int)cmd_in));
|
758 |
|
|
read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
|
759 |
|
|
|
760 |
|
|
switch(cmd_in)
|
761 |
|
|
{
|
762 |
|
|
case SG_IO:
|
763 |
|
|
{
|
764 |
|
|
int blocking = 1; /* ignore O_NONBLOCK flag */
|
765 |
|
|
|
766 |
|
|
if (sdp->detached)
|
767 |
|
|
return -ENODEV;
|
768 |
|
|
if(! scsi_block_when_processing_errors(sdp->device) )
|
769 |
|
|
return -ENXIO;
|
770 |
|
|
result = verify_area(VERIFY_WRITE, (void *)arg, SZ_SG_IO_HDR);
|
771 |
|
|
if (result) return result;
|
772 |
|
|
result = sg_new_write(sfp, (const char *)arg, SZ_SG_IO_HDR,
|
773 |
|
|
blocking, read_only, &srp);
|
774 |
|
|
if (result < 0) return result;
|
775 |
|
|
srp->sg_io_owned = 1;
|
776 |
|
|
while (1) {
|
777 |
|
|
result = 0; /* following macro to beat race condition */
|
778 |
|
|
__wait_event_interruptible(sfp->read_wait,
|
779 |
|
|
(sdp->detached || sfp->closed || srp->done), result);
|
780 |
|
|
if (sdp->detached)
|
781 |
|
|
return -ENODEV;
|
782 |
|
|
if (sfp->closed)
|
783 |
|
|
return 0; /* request packet dropped already */
|
784 |
|
|
if (0 == result)
|
785 |
|
|
break;
|
786 |
|
|
srp->orphan = 1;
|
787 |
|
|
return result; /* -ERESTARTSYS because signal hit process */
|
788 |
|
|
}
|
789 |
|
|
srp->done = 2;
|
790 |
|
|
result = sg_new_read(sfp, (char *)arg, SZ_SG_IO_HDR, srp);
|
791 |
|
|
return (result < 0) ? result : 0;
|
792 |
|
|
}
|
793 |
|
|
case SG_SET_TIMEOUT:
|
794 |
|
|
result = get_user(val, (int *)arg);
|
795 |
|
|
if (result) return result;
|
796 |
|
|
if (val < 0)
|
797 |
|
|
return -EIO;
|
798 |
|
|
sfp->timeout = val;
|
799 |
|
|
return 0;
|
800 |
|
|
case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
|
801 |
|
|
return sfp->timeout; /* strange ..., for backward compatibility */
|
802 |
|
|
case SG_SET_FORCE_LOW_DMA:
|
803 |
|
|
result = get_user(val, (int *)arg);
|
804 |
|
|
if (result) return result;
|
805 |
|
|
if (val) {
|
806 |
|
|
sfp->low_dma = 1;
|
807 |
|
|
if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
|
808 |
|
|
val = (int)sfp->reserve.bufflen;
|
809 |
|
|
sg_remove_scat(&sfp->reserve);
|
810 |
|
|
sg_build_reserve(sfp, val);
|
811 |
|
|
}
|
812 |
|
|
}
|
813 |
|
|
else {
|
814 |
|
|
if (sdp->detached)
|
815 |
|
|
return -ENODEV;
|
816 |
|
|
sfp->low_dma = sdp->device->host->unchecked_isa_dma;
|
817 |
|
|
}
|
818 |
|
|
return 0;
|
819 |
|
|
case SG_GET_LOW_DMA:
|
820 |
|
|
return put_user((int)sfp->low_dma, (int *)arg);
|
821 |
|
|
case SG_GET_SCSI_ID:
|
822 |
|
|
result = verify_area(VERIFY_WRITE, (void *)arg, sizeof(sg_scsi_id_t));
|
823 |
|
|
if (result) return result;
|
824 |
|
|
else {
|
825 |
|
|
sg_scsi_id_t * sg_idp = (sg_scsi_id_t *)arg;
|
826 |
|
|
|
827 |
|
|
if (sdp->detached)
|
828 |
|
|
return -ENODEV;
|
829 |
|
|
__put_user((int)sdp->device->host->host_no, &sg_idp->host_no);
|
830 |
|
|
__put_user((int)sdp->device->channel, &sg_idp->channel);
|
831 |
|
|
__put_user((int)sdp->device->id, &sg_idp->scsi_id);
|
832 |
|
|
__put_user((int)sdp->device->lun, &sg_idp->lun);
|
833 |
|
|
__put_user((int)sdp->device->type, &sg_idp->scsi_type);
|
834 |
|
|
__put_user((short)sdp->device->host->cmd_per_lun,
|
835 |
|
|
&sg_idp->h_cmd_per_lun);
|
836 |
|
|
__put_user((short)sdp->device->queue_depth,
|
837 |
|
|
&sg_idp->d_queue_depth);
|
838 |
|
|
__put_user(0, &sg_idp->unused[0]);
|
839 |
|
|
__put_user(0, &sg_idp->unused[1]);
|
840 |
|
|
return 0;
|
841 |
|
|
}
|
842 |
|
|
case SG_SET_FORCE_PACK_ID:
|
843 |
|
|
result = get_user(val, (int *)arg);
|
844 |
|
|
if (result) return result;
|
845 |
|
|
sfp->force_packid = val ? 1 : 0;
|
846 |
|
|
return 0;
|
847 |
|
|
case SG_GET_PACK_ID:
|
848 |
|
|
result = verify_area(VERIFY_WRITE, (void *) arg, sizeof(int));
|
849 |
|
|
if (result) return result;
|
850 |
|
|
read_lock_irqsave(&sfp->rq_list_lock, iflags);
|
851 |
|
|
for (srp = sfp->headrp; srp; srp = srp->nextrp) {
|
852 |
|
|
if ((1 == srp->done) && (! srp->sg_io_owned)) {
|
853 |
|
|
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
|
854 |
|
|
__put_user(srp->header.pack_id, (int *)arg);
|
855 |
|
|
return 0;
|
856 |
|
|
}
|
857 |
|
|
}
|
858 |
|
|
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
|
859 |
|
|
__put_user(-1, (int *)arg);
|
860 |
|
|
return 0;
|
861 |
|
|
case SG_GET_NUM_WAITING:
|
862 |
|
|
read_lock_irqsave(&sfp->rq_list_lock, iflags);
|
863 |
|
|
for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
|
864 |
|
|
if ((1 == srp->done) && (! srp->sg_io_owned))
|
865 |
|
|
++val;
|
866 |
|
|
}
|
867 |
|
|
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
|
868 |
|
|
return put_user(val, (int *)arg);
|
869 |
|
|
case SG_GET_SG_TABLESIZE:
|
870 |
|
|
return put_user(sdp->sg_tablesize, (int *)arg);
|
871 |
|
|
case SG_SET_RESERVED_SIZE:
|
872 |
|
|
result = get_user(val, (int *)arg);
|
873 |
|
|
if (result) return result;
|
874 |
|
|
if (val < 0)
|
875 |
|
|
return -EINVAL;
|
876 |
|
|
if (val != sfp->reserve.bufflen) {
|
877 |
|
|
if (sg_res_in_use(sfp) || sfp->mmap_called)
|
878 |
|
|
return -EBUSY;
|
879 |
|
|
sg_remove_scat(&sfp->reserve);
|
880 |
|
|
sg_build_reserve(sfp, val);
|
881 |
|
|
}
|
882 |
|
|
return 0;
|
883 |
|
|
case SG_GET_RESERVED_SIZE:
|
884 |
|
|
val = (int)sfp->reserve.bufflen;
|
885 |
|
|
return put_user(val, (int *)arg);
|
886 |
|
|
case SG_SET_COMMAND_Q:
|
887 |
|
|
result = get_user(val, (int *)arg);
|
888 |
|
|
if (result) return result;
|
889 |
|
|
sfp->cmd_q = val ? 1 : 0;
|
890 |
|
|
return 0;
|
891 |
|
|
case SG_GET_COMMAND_Q:
|
892 |
|
|
return put_user((int)sfp->cmd_q, (int *)arg);
|
893 |
|
|
case SG_SET_KEEP_ORPHAN:
|
894 |
|
|
result = get_user(val, (int *)arg);
|
895 |
|
|
if (result) return result;
|
896 |
|
|
sfp->keep_orphan = val;
|
897 |
|
|
return 0;
|
898 |
|
|
case SG_GET_KEEP_ORPHAN:
|
899 |
|
|
return put_user((int)sfp->keep_orphan, (int *)arg);
|
900 |
|
|
case SG_NEXT_CMD_LEN:
|
901 |
|
|
result = get_user(val, (int *)arg);
|
902 |
|
|
if (result) return result;
|
903 |
|
|
sfp->next_cmd_len = (val > 0) ? val : 0;
|
904 |
|
|
return 0;
|
905 |
|
|
case SG_GET_VERSION_NUM:
|
906 |
|
|
return put_user(sg_version_num, (int *)arg);
|
907 |
|
|
case SG_GET_ACCESS_COUNT:
|
908 |
|
|
val = (sdp->device ? sdp->device->access_count : 0);
|
909 |
|
|
return put_user(val, (int *)arg);
|
910 |
|
|
case SG_GET_REQUEST_TABLE:
|
911 |
|
|
result = verify_area(VERIFY_WRITE, (void *) arg,
|
912 |
|
|
SZ_SG_REQ_INFO * SG_MAX_QUEUE);
|
913 |
|
|
if (result) return result;
|
914 |
|
|
else {
|
915 |
|
|
sg_req_info_t rinfo[SG_MAX_QUEUE];
|
916 |
|
|
Sg_request * srp;
|
917 |
|
|
read_lock_irqsave(&sfp->rq_list_lock, iflags);
|
918 |
|
|
for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
|
919 |
|
|
++val, srp = srp ? srp->nextrp : srp) {
|
920 |
|
|
memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
|
921 |
|
|
if (srp) {
|
922 |
|
|
rinfo[val].req_state = srp->done + 1;
|
923 |
|
|
rinfo[val].problem = srp->header.masked_status &
|
924 |
|
|
srp->header.host_status & srp->header.driver_status;
|
925 |
|
|
rinfo[val].duration = srp->done ?
|
926 |
|
|
srp->header.duration :
|
927 |
|
|
sg_jif_to_ms(jiffies - srp->header.duration);
|
928 |
|
|
rinfo[val].orphan = srp->orphan;
|
929 |
|
|
rinfo[val].sg_io_owned = srp->sg_io_owned;
|
930 |
|
|
rinfo[val].pack_id = srp->header.pack_id;
|
931 |
|
|
rinfo[val].usr_ptr = srp->header.usr_ptr;
|
932 |
|
|
}
|
933 |
|
|
}
|
934 |
|
|
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
|
935 |
|
|
__copy_to_user((void *)arg, rinfo, SZ_SG_REQ_INFO * SG_MAX_QUEUE);
|
936 |
|
|
return 0;
|
937 |
|
|
}
|
938 |
|
|
case SG_EMULATED_HOST:
|
939 |
|
|
if (sdp->detached)
|
940 |
|
|
return -ENODEV;
|
941 |
|
|
return put_user(sdp->device->host->hostt->emulated, (int *)arg);
|
942 |
|
|
case SG_SCSI_RESET:
|
943 |
|
|
if (sdp->detached)
|
944 |
|
|
return -ENODEV;
|
945 |
|
|
if (filp->f_flags & O_NONBLOCK) {
|
946 |
|
|
if (sdp->device->host->in_recovery)
|
947 |
|
|
return -EBUSY;
|
948 |
|
|
}
|
949 |
|
|
else if (! scsi_block_when_processing_errors(sdp->device))
|
950 |
|
|
return -EBUSY;
|
951 |
|
|
result = get_user(val, (int *)arg);
|
952 |
|
|
if (result) return result;
|
953 |
|
|
if (SG_SCSI_RESET_NOTHING == val)
|
954 |
|
|
return 0;
|
955 |
|
|
#ifdef SCSI_TRY_RESET_DEVICE
|
956 |
|
|
switch (val)
|
957 |
|
|
{
|
958 |
|
|
case SG_SCSI_RESET_DEVICE:
|
959 |
|
|
val = SCSI_TRY_RESET_DEVICE;
|
960 |
|
|
break;
|
961 |
|
|
case SG_SCSI_RESET_BUS:
|
962 |
|
|
val = SCSI_TRY_RESET_BUS;
|
963 |
|
|
break;
|
964 |
|
|
case SG_SCSI_RESET_HOST:
|
965 |
|
|
val = SCSI_TRY_RESET_HOST;
|
966 |
|
|
break;
|
967 |
|
|
default:
|
968 |
|
|
return -EINVAL;
|
969 |
|
|
}
|
970 |
|
|
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
|
971 |
|
|
return -EACCES;
|
972 |
|
|
return (scsi_reset_provider(sdp->device, val) == SUCCESS) ? 0 : -EIO;
|
973 |
|
|
#else
|
974 |
|
|
SCSI_LOG_TIMEOUT(1, printk("sg_ioctl: SG_RESET_SCSI not supported\n"));
|
975 |
|
|
result = -EINVAL;
|
976 |
|
|
#endif
|
977 |
|
|
case SCSI_IOCTL_SEND_COMMAND:
|
978 |
|
|
if (sdp->detached)
|
979 |
|
|
return -ENODEV;
|
980 |
|
|
if (read_only) {
|
981 |
|
|
unsigned char opcode = WRITE_6;
|
982 |
|
|
Scsi_Ioctl_Command * siocp = (void *)arg;
|
983 |
|
|
|
984 |
|
|
copy_from_user(&opcode, siocp->data, 1);
|
985 |
|
|
if (! sg_allow_access(opcode, sdp->device->type))
|
986 |
|
|
return -EPERM;
|
987 |
|
|
}
|
988 |
|
|
return scsi_ioctl_send_command(sdp->device, (void *)arg);
|
989 |
|
|
case SG_SET_DEBUG:
|
990 |
|
|
result = get_user(val, (int *)arg);
|
991 |
|
|
if (result) return result;
|
992 |
|
|
sdp->sgdebug = (char)val;
|
993 |
|
|
return 0;
|
994 |
|
|
case SCSI_IOCTL_GET_IDLUN:
|
995 |
|
|
case SCSI_IOCTL_GET_BUS_NUMBER:
|
996 |
|
|
case SCSI_IOCTL_PROBE_HOST:
|
997 |
|
|
case SG_GET_TRANSFORM:
|
998 |
|
|
if (sdp->detached)
|
999 |
|
|
return -ENODEV;
|
1000 |
|
|
return scsi_ioctl(sdp->device, cmd_in, (void *)arg);
|
1001 |
|
|
default:
|
1002 |
|
|
if (read_only)
|
1003 |
|
|
return -EPERM; /* don't know so take safe approach */
|
1004 |
|
|
return scsi_ioctl(sdp->device, cmd_in, (void *)arg);
|
1005 |
|
|
}
|
1006 |
|
|
}
|
1007 |
|
|
|
1008 |
|
|
static unsigned int sg_poll(struct file * filp, poll_table * wait)
|
1009 |
|
|
{
|
1010 |
|
|
unsigned int res = 0;
|
1011 |
|
|
Sg_device * sdp;
|
1012 |
|
|
Sg_fd * sfp;
|
1013 |
|
|
Sg_request * srp;
|
1014 |
|
|
int count = 0;
|
1015 |
|
|
unsigned long iflags;
|
1016 |
|
|
|
1017 |
|
|
if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp))
|
1018 |
|
|
|| sfp->closed)
|
1019 |
|
|
return POLLERR;
|
1020 |
|
|
poll_wait(filp, &sfp->read_wait, wait);
|
1021 |
|
|
read_lock_irqsave(&sfp->rq_list_lock, iflags);
|
1022 |
|
|
for (srp = sfp->headrp; srp; srp = srp->nextrp) {
|
1023 |
|
|
/* if any read waiting, flag it */
|
1024 |
|
|
if ((0 == res) && (1 == srp->done) && (! srp->sg_io_owned))
|
1025 |
|
|
res = POLLIN | POLLRDNORM;
|
1026 |
|
|
++count;
|
1027 |
|
|
}
|
1028 |
|
|
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
|
1029 |
|
|
|
1030 |
|
|
if (sdp->detached)
|
1031 |
|
|
res |= POLLHUP;
|
1032 |
|
|
else if (! sfp->cmd_q) {
|
1033 |
|
|
if (0 == count)
|
1034 |
|
|
res |= POLLOUT | POLLWRNORM;
|
1035 |
|
|
}
|
1036 |
|
|
else if (count < SG_MAX_QUEUE)
|
1037 |
|
|
res |= POLLOUT | POLLWRNORM;
|
1038 |
|
|
SCSI_LOG_TIMEOUT(3, printk("sg_poll: dev=%d, res=0x%x\n",
|
1039 |
|
|
MINOR(sdp->i_rdev), (int)res));
|
1040 |
|
|
return res;
|
1041 |
|
|
}
|
1042 |
|
|
|
1043 |
|
|
static int sg_fasync(int fd, struct file * filp, int mode)
|
1044 |
|
|
{
|
1045 |
|
|
int retval;
|
1046 |
|
|
Sg_device * sdp;
|
1047 |
|
|
Sg_fd * sfp;
|
1048 |
|
|
|
1049 |
|
|
if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp)))
|
1050 |
|
|
return -ENXIO;
|
1051 |
|
|
SCSI_LOG_TIMEOUT(3, printk("sg_fasync: dev=%d, mode=%d\n",
|
1052 |
|
|
MINOR(sdp->i_rdev), mode));
|
1053 |
|
|
|
1054 |
|
|
retval = fasync_helper(fd, filp, mode, &sfp->async_qp);
|
1055 |
|
|
return (retval < 0) ? retval : 0;
|
1056 |
|
|
}
|
1057 |
|
|
|
1058 |
|
|
static void sg_rb_correct4mmap(Sg_scatter_hold * rsv_schp, int startFinish)
|
1059 |
|
|
{
|
1060 |
|
|
void * page_ptr;
|
1061 |
|
|
struct page * page;
|
1062 |
|
|
int k, m;
|
1063 |
|
|
|
1064 |
|
|
SCSI_LOG_TIMEOUT(3, printk("sg_rb_correct4mmap: startFinish=%d, "
|
1065 |
|
|
"scatg=%d\n", startFinish, rsv_schp->k_use_sg));
|
1066 |
|
|
/* N.B. correction _not_ applied to base page of aech allocation */
|
1067 |
|
|
if (rsv_schp->k_use_sg) { /* reserve buffer is a scatter gather list */
|
1068 |
|
|
struct scatterlist * sclp = rsv_schp->buffer;
|
1069 |
|
|
|
1070 |
|
|
for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sclp) {
|
1071 |
|
|
for (m = PAGE_SIZE; m < sclp->length; m += PAGE_SIZE) {
|
1072 |
|
|
page_ptr = (unsigned char *)sclp->address + m;
|
1073 |
|
|
page = virt_to_page(page_ptr);
|
1074 |
|
|
if (startFinish)
|
1075 |
|
|
get_page(page); /* increment page count */
|
1076 |
|
|
else {
|
1077 |
|
|
if (page_count(page) > 0)
|
1078 |
|
|
put_page_testzero(page); /* decrement page count */
|
1079 |
|
|
}
|
1080 |
|
|
}
|
1081 |
|
|
}
|
1082 |
|
|
}
|
1083 |
|
|
else { /* reserve buffer is just a single allocation */
|
1084 |
|
|
for (m = PAGE_SIZE; m < rsv_schp->bufflen; m += PAGE_SIZE) {
|
1085 |
|
|
page_ptr = (unsigned char *)rsv_schp->buffer + m;
|
1086 |
|
|
page = virt_to_page(page_ptr);
|
1087 |
|
|
if (startFinish)
|
1088 |
|
|
get_page(page); /* increment page count */
|
1089 |
|
|
else {
|
1090 |
|
|
if (page_count(page) > 0)
|
1091 |
|
|
put_page_testzero(page); /* decrement page count */
|
1092 |
|
|
}
|
1093 |
|
|
}
|
1094 |
|
|
}
|
1095 |
|
|
}
|
1096 |
|
|
|
1097 |
|
|
static struct page * sg_vma_nopage(struct vm_area_struct *vma,
|
1098 |
|
|
unsigned long addr, int unused)
|
1099 |
|
|
{
|
1100 |
|
|
Sg_fd * sfp;
|
1101 |
|
|
struct page * page = NOPAGE_SIGBUS;
|
1102 |
|
|
void * page_ptr = NULL;
|
1103 |
|
|
unsigned long offset;
|
1104 |
|
|
Sg_scatter_hold * rsv_schp;
|
1105 |
|
|
|
1106 |
|
|
if ((NULL == vma) || (! (sfp = (Sg_fd *)vma->vm_private_data)))
|
1107 |
|
|
return page;
|
1108 |
|
|
rsv_schp = &sfp->reserve;
|
1109 |
|
|
offset = addr - vma->vm_start;
|
1110 |
|
|
if (offset >= rsv_schp->bufflen)
|
1111 |
|
|
return page;
|
1112 |
|
|
SCSI_LOG_TIMEOUT(3, printk("sg_vma_nopage: offset=%lu, scatg=%d\n",
|
1113 |
|
|
offset, rsv_schp->k_use_sg));
|
1114 |
|
|
if (rsv_schp->k_use_sg) { /* reserve buffer is a scatter gather list */
|
1115 |
|
|
int k;
|
1116 |
|
|
unsigned long sa = vma->vm_start;
|
1117 |
|
|
unsigned long len;
|
1118 |
|
|
struct scatterlist * sclp = rsv_schp->buffer;
|
1119 |
|
|
|
1120 |
|
|
for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
|
1121 |
|
|
++k, ++sclp) {
|
1122 |
|
|
len = vma->vm_end - sa;
|
1123 |
|
|
len = (len < sclp->length) ? len : sclp->length;
|
1124 |
|
|
if (offset < len) {
|
1125 |
|
|
page_ptr = (unsigned char *)sclp->address + offset;
|
1126 |
|
|
page = virt_to_page(page_ptr);
|
1127 |
|
|
get_page(page); /* increment page count */
|
1128 |
|
|
break;
|
1129 |
|
|
}
|
1130 |
|
|
sa += len;
|
1131 |
|
|
offset -= len;
|
1132 |
|
|
}
|
1133 |
|
|
}
|
1134 |
|
|
else { /* reserve buffer is just a single allocation */
|
1135 |
|
|
page_ptr = (unsigned char *)rsv_schp->buffer + offset;
|
1136 |
|
|
page = virt_to_page(page_ptr);
|
1137 |
|
|
get_page(page); /* increment page count */
|
1138 |
|
|
}
|
1139 |
|
|
return page;
|
1140 |
|
|
}
|
1141 |
|
|
|
1142 |
|
|
static struct vm_operations_struct sg_mmap_vm_ops = {
|
1143 |
|
|
nopage : sg_vma_nopage,
|
1144 |
|
|
};
|
1145 |
|
|
|
1146 |
|
|
static int sg_mmap(struct file * filp, struct vm_area_struct *vma)
|
1147 |
|
|
{
|
1148 |
|
|
Sg_fd * sfp;
|
1149 |
|
|
unsigned long req_sz = vma->vm_end - vma->vm_start;
|
1150 |
|
|
Sg_scatter_hold * rsv_schp;
|
1151 |
|
|
|
1152 |
|
|
if ((! filp) || (! vma) || (! (sfp = (Sg_fd *)filp->private_data)))
|
1153 |
|
|
return -ENXIO;
|
1154 |
|
|
SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n",
|
1155 |
|
|
(void *)vma->vm_start, (int)req_sz));
|
1156 |
|
|
if (vma->vm_pgoff)
|
1157 |
|
|
return -EINVAL; /* want no offset */
|
1158 |
|
|
rsv_schp = &sfp->reserve;
|
1159 |
|
|
if (req_sz > rsv_schp->bufflen)
|
1160 |
|
|
return -ENOMEM; /* cannot map more than reserved buffer */
|
1161 |
|
|
|
1162 |
|
|
if (rsv_schp->k_use_sg) { /* reserve buffer is a scatter gather list */
|
1163 |
|
|
int k;
|
1164 |
|
|
unsigned long sa = vma->vm_start;
|
1165 |
|
|
unsigned long len;
|
1166 |
|
|
struct scatterlist * sclp = rsv_schp->buffer;
|
1167 |
|
|
|
1168 |
|
|
for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
|
1169 |
|
|
++k, ++sclp) {
|
1170 |
|
|
if ((unsigned long)sclp->address & (PAGE_SIZE - 1))
|
1171 |
|
|
return -EFAULT; /* non page aligned memory ?? */
|
1172 |
|
|
len = vma->vm_end - sa;
|
1173 |
|
|
len = (len < sclp->length) ? len : sclp->length;
|
1174 |
|
|
sa += len;
|
1175 |
|
|
}
|
1176 |
|
|
}
|
1177 |
|
|
else { /* reserve buffer is just a single allocation */
|
1178 |
|
|
if ((unsigned long)rsv_schp->buffer & (PAGE_SIZE - 1))
|
1179 |
|
|
return -EFAULT; /* non page aligned memory ?? */
|
1180 |
|
|
}
|
1181 |
|
|
if (0 == sfp->mmap_called) {
|
1182 |
|
|
sg_rb_correct4mmap(rsv_schp, 1); /* do only once per fd lifetime */
|
1183 |
|
|
sfp->mmap_called = 1;
|
1184 |
|
|
}
|
1185 |
|
|
vma->vm_flags |= (VM_RESERVED | VM_IO);
|
1186 |
|
|
vma->vm_private_data = sfp;
|
1187 |
|
|
vma->vm_ops = &sg_mmap_vm_ops;
|
1188 |
|
|
return 0;
|
1189 |
|
|
}
|
1190 |
|
|
|
1191 |
|
|
/* This function is a "bottom half" handler that is called by the
|
1192 |
|
|
* mid level when a command is completed (or has failed). */
|
1193 |
|
|
static void sg_cmd_done_bh(Scsi_Cmnd * SCpnt)
|
1194 |
|
|
{
|
1195 |
|
|
Scsi_Request * SRpnt = SCpnt->sc_request;
|
1196 |
|
|
int dev = MINOR(SRpnt->sr_request.rq_dev);
|
1197 |
|
|
Sg_device * sdp = NULL;
|
1198 |
|
|
Sg_fd * sfp;
|
1199 |
|
|
Sg_request * srp = NULL;
|
1200 |
|
|
|
1201 |
|
|
read_lock(&sg_dev_arr_lock);
|
1202 |
|
|
if (sg_dev_arr && (dev >= 0)) {
|
1203 |
|
|
if (dev < sg_template.dev_max)
|
1204 |
|
|
sdp = sg_dev_arr[dev];
|
1205 |
|
|
}
|
1206 |
|
|
if ((NULL == sdp) || sdp->detached) {
|
1207 |
|
|
read_unlock(&sg_dev_arr_lock);
|
1208 |
|
|
SCSI_LOG_TIMEOUT(1, printk("sg...bh: dev=%d gone\n", dev));
|
1209 |
|
|
scsi_release_request(SRpnt);
|
1210 |
|
|
SRpnt = NULL;
|
1211 |
|
|
return;
|
1212 |
|
|
}
|
1213 |
|
|
sfp = sdp->headfp;
|
1214 |
|
|
while (sfp) {
|
1215 |
|
|
read_lock(&sfp->rq_list_lock);
|
1216 |
|
|
for (srp = sfp->headrp; srp; srp = srp->nextrp) {
|
1217 |
|
|
if (SRpnt == srp->my_cmdp)
|
1218 |
|
|
break;
|
1219 |
|
|
}
|
1220 |
|
|
read_unlock(&sfp->rq_list_lock);
|
1221 |
|
|
if (srp)
|
1222 |
|
|
break;
|
1223 |
|
|
sfp = sfp->nextfp;
|
1224 |
|
|
}
|
1225 |
|
|
if (! srp) {
|
1226 |
|
|
read_unlock(&sg_dev_arr_lock);
|
1227 |
|
|
SCSI_LOG_TIMEOUT(1, printk("sg...bh: req missing, dev=%d\n", dev));
|
1228 |
|
|
scsi_release_request(SRpnt);
|
1229 |
|
|
SRpnt = NULL;
|
1230 |
|
|
return;
|
1231 |
|
|
}
|
1232 |
|
|
/* First transfer ownership of data buffers to sg_device object. */
|
1233 |
|
|
srp->data.k_use_sg = SRpnt->sr_use_sg;
|
1234 |
|
|
srp->data.sglist_len = SRpnt->sr_sglist_len;
|
1235 |
|
|
srp->data.bufflen = SRpnt->sr_bufflen;
|
1236 |
|
|
srp->data.buffer = SRpnt->sr_buffer;
|
1237 |
|
|
/* now clear out request structure */
|
1238 |
|
|
SRpnt->sr_use_sg = 0;
|
1239 |
|
|
SRpnt->sr_sglist_len = 0;
|
1240 |
|
|
SRpnt->sr_bufflen = 0;
|
1241 |
|
|
SRpnt->sr_buffer = NULL;
|
1242 |
|
|
SRpnt->sr_underflow = 0;
|
1243 |
|
|
SRpnt->sr_request.rq_dev = MKDEV(0, 0); /* "sg" _disowns_ request blk */
|
1244 |
|
|
|
1245 |
|
|
srp->my_cmdp = NULL;
|
1246 |
|
|
srp->done = 1;
|
1247 |
|
|
read_unlock(&sg_dev_arr_lock);
|
1248 |
|
|
|
1249 |
|
|
SCSI_LOG_TIMEOUT(4, printk("sg...bh: dev=%d, pack_id=%d, res=0x%x\n",
|
1250 |
|
|
dev, srp->header.pack_id, (int)SRpnt->sr_result));
|
1251 |
|
|
srp->header.resid = SCpnt->resid;
|
1252 |
|
|
/* sg_unmap_and(&srp->data, 0); */ /* unmap locked pages a.s.a.p. */
|
1253 |
|
|
/* N.B. unit of duration changes here from jiffies to millisecs */
|
1254 |
|
|
srp->header.duration = sg_jif_to_ms(jiffies - (int)srp->header.duration);
|
1255 |
|
|
if (0 != SRpnt->sr_result) {
|
1256 |
|
|
memcpy(srp->sense_b, SRpnt->sr_sense_buffer, sizeof(srp->sense_b));
|
1257 |
|
|
srp->header.status = 0xff & SRpnt->sr_result;
|
1258 |
|
|
srp->header.masked_status = status_byte(SRpnt->sr_result);
|
1259 |
|
|
srp->header.msg_status = msg_byte(SRpnt->sr_result);
|
1260 |
|
|
srp->header.host_status = host_byte(SRpnt->sr_result);
|
1261 |
|
|
srp->header.driver_status = driver_byte(SRpnt->sr_result);
|
1262 |
|
|
if ((sdp->sgdebug > 0) &&
|
1263 |
|
|
((CHECK_CONDITION == srp->header.masked_status) ||
|
1264 |
|
|
(COMMAND_TERMINATED == srp->header.masked_status)))
|
1265 |
|
|
print_req_sense("sg_cmd_done_bh", SRpnt);
|
1266 |
|
|
|
1267 |
|
|
/* Following if statement is a patch supplied by Eric Youngdale */
|
1268 |
|
|
if (driver_byte(SRpnt->sr_result) != 0
|
1269 |
|
|
&& (SRpnt->sr_sense_buffer[0] & 0x7f) == 0x70
|
1270 |
|
|
&& (SRpnt->sr_sense_buffer[2] & 0xf) == UNIT_ATTENTION
|
1271 |
|
|
&& sdp->device->removable) {
|
1272 |
|
|
/* Detected disc change. Set the bit - this may be used if */
|
1273 |
|
|
/* there are filesystems using this device. */
|
1274 |
|
|
sdp->device->changed = 1;
|
1275 |
|
|
}
|
1276 |
|
|
}
|
1277 |
|
|
/* Rely on write phase to clean out srp status values, so no "else" */
|
1278 |
|
|
|
1279 |
|
|
scsi_release_request(SRpnt);
|
1280 |
|
|
SRpnt = NULL;
|
1281 |
|
|
if (sfp->closed) { /* whoops this fd already released, cleanup */
|
1282 |
|
|
SCSI_LOG_TIMEOUT(1,
|
1283 |
|
|
printk("sg...bh: already closed, freeing ...\n"));
|
1284 |
|
|
sg_finish_rem_req(srp);
|
1285 |
|
|
srp = NULL;
|
1286 |
|
|
if (NULL == sfp->headrp) {
|
1287 |
|
|
SCSI_LOG_TIMEOUT(1,
|
1288 |
|
|
printk("sg...bh: already closed, final cleanup\n"));
|
1289 |
|
|
if (0 == sg_remove_sfp(sdp, sfp)) { /* device still present */
|
1290 |
|
|
sdp->device->access_count--;
|
1291 |
|
|
if (sdp->device->host->hostt->module)
|
1292 |
|
|
__MOD_DEC_USE_COUNT(sdp->device->host->hostt->module);
|
1293 |
|
|
}
|
1294 |
|
|
if (sg_template.module)
|
1295 |
|
|
__MOD_DEC_USE_COUNT(sg_template.module);
|
1296 |
|
|
sfp = NULL;
|
1297 |
|
|
}
|
1298 |
|
|
}
|
1299 |
|
|
else if (srp && srp->orphan) {
|
1300 |
|
|
if (sfp->keep_orphan)
|
1301 |
|
|
srp->sg_io_owned = 0;
|
1302 |
|
|
else {
|
1303 |
|
|
sg_finish_rem_req(srp);
|
1304 |
|
|
srp = NULL;
|
1305 |
|
|
}
|
1306 |
|
|
}
|
1307 |
|
|
if (sfp && srp) {
|
1308 |
|
|
/* Now wake up any sg_read() that is waiting for this packet. */
|
1309 |
|
|
wake_up_interruptible(&sfp->read_wait);
|
1310 |
|
|
kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
|
1311 |
|
|
}
|
1312 |
|
|
}
|
1313 |
|
|
|
1314 |
|
|
static struct file_operations sg_fops = {
|
1315 |
|
|
owner: THIS_MODULE,
|
1316 |
|
|
read: sg_read,
|
1317 |
|
|
write: sg_write,
|
1318 |
|
|
poll: sg_poll,
|
1319 |
|
|
ioctl: sg_ioctl,
|
1320 |
|
|
open: sg_open,
|
1321 |
|
|
mmap: sg_mmap,
|
1322 |
|
|
release: sg_release,
|
1323 |
|
|
fasync: sg_fasync,
|
1324 |
|
|
};
|
1325 |
|
|
|
1326 |
|
|
|
1327 |
|
|
static int sg_detect(Scsi_Device * scsidp)
|
1328 |
|
|
{
|
1329 |
|
|
sg_template.dev_noticed++;
|
1330 |
|
|
return 1;
|
1331 |
|
|
}
|
1332 |
|
|
|
1333 |
|
|
/* Driver initialization */
|
1334 |
|
|
static int sg_init()
|
1335 |
|
|
{
|
1336 |
|
|
static int sg_registered = 0;
|
1337 |
|
|
unsigned long iflags;
|
1338 |
|
|
|
1339 |
|
|
if ((sg_template.dev_noticed == 0) || sg_dev_arr)
|
1340 |
|
|
return 0;
|
1341 |
|
|
|
1342 |
|
|
write_lock_irqsave(&sg_dev_arr_lock, iflags);
|
1343 |
|
|
if(!sg_registered) {
|
1344 |
|
|
if (devfs_register_chrdev(SCSI_GENERIC_MAJOR,"sg",&sg_fops))
|
1345 |
|
|
{
|
1346 |
|
|
printk(KERN_ERR "Unable to get major %d for generic SCSI device\n",
|
1347 |
|
|
SCSI_GENERIC_MAJOR);
|
1348 |
|
|
write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
|
1349 |
|
|
sg_template.dev_noticed = 0;
|
1350 |
|
|
return 1;
|
1351 |
|
|
}
|
1352 |
|
|
sg_registered++;
|
1353 |
|
|
}
|
1354 |
|
|
|
1355 |
|
|
SCSI_LOG_TIMEOUT(3, printk("sg_init\n"));
|
1356 |
|
|
sg_template.dev_max = sg_template.dev_noticed + SG_DEV_ARR_LUMP;
|
1357 |
|
|
sg_dev_arr = (Sg_device **)kmalloc(sg_template.dev_max *
|
1358 |
|
|
sizeof(Sg_device *), GFP_ATOMIC);
|
1359 |
|
|
if (NULL == sg_dev_arr) {
|
1360 |
|
|
printk(KERN_ERR "sg_init: no space for sg_dev_arr\n");
|
1361 |
|
|
write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
|
1362 |
|
|
sg_template.dev_noticed = 0;
|
1363 |
|
|
return 1;
|
1364 |
|
|
}
|
1365 |
|
|
memset(sg_dev_arr, 0, sg_template.dev_max * sizeof(Sg_device *));
|
1366 |
|
|
write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
|
1367 |
|
|
#ifdef CONFIG_PROC_FS
|
1368 |
|
|
sg_proc_init();
|
1369 |
|
|
#endif /* CONFIG_PROC_FS */
|
1370 |
|
|
return 0;
|
1371 |
|
|
}
|
1372 |
|
|
|
1373 |
|
|
#ifndef MODULE
|
1374 |
|
|
static int __init sg_def_reserved_size_setup(char *str)
|
1375 |
|
|
{
|
1376 |
|
|
int tmp;
|
1377 |
|
|
|
1378 |
|
|
if (get_option(&str, &tmp) == 1) {
|
1379 |
|
|
def_reserved_size = tmp;
|
1380 |
|
|
if (tmp >= 0)
|
1381 |
|
|
sg_big_buff = tmp;
|
1382 |
|
|
return 1;
|
1383 |
|
|
} else {
|
1384 |
|
|
printk(KERN_WARNING "sg_def_reserved_size : usage "
|
1385 |
|
|
"sg_def_reserved_size=n (n could be 65536, 131072 or 262144)\n");
|
1386 |
|
|
return 0;
|
1387 |
|
|
}
|
1388 |
|
|
}
|
1389 |
|
|
|
1390 |
|
|
__setup("sg_def_reserved_size=", sg_def_reserved_size_setup);
|
1391 |
|
|
#endif
|
1392 |
|
|
|
1393 |
|
|
|
1394 |
|
|
static int sg_attach(Scsi_Device * scsidp)
|
1395 |
|
|
{
|
1396 |
|
|
Sg_device * sdp;
|
1397 |
|
|
unsigned long iflags;
|
1398 |
|
|
int k;
|
1399 |
|
|
|
1400 |
|
|
write_lock_irqsave(&sg_dev_arr_lock, iflags);
|
1401 |
|
|
if (sg_template.nr_dev >= sg_template.dev_max) { /* try to resize */
|
1402 |
|
|
Sg_device ** tmp_da;
|
1403 |
|
|
int tmp_dev_max = sg_template.nr_dev + SG_DEV_ARR_LUMP;
|
1404 |
|
|
|
1405 |
|
|
tmp_da = (Sg_device **)kmalloc(tmp_dev_max *
|
1406 |
|
|
sizeof(Sg_device *), GFP_ATOMIC);
|
1407 |
|
|
if (NULL == tmp_da) {
|
1408 |
|
|
scsidp->attached--;
|
1409 |
|
|
write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
|
1410 |
|
|
printk(KERN_ERR "sg_attach: device array cannot be resized\n");
|
1411 |
|
|
return 1;
|
1412 |
|
|
}
|
1413 |
|
|
memset(tmp_da, 0, tmp_dev_max * sizeof(Sg_device *));
|
1414 |
|
|
memcpy(tmp_da, sg_dev_arr, sg_template.dev_max * sizeof(Sg_device *));
|
1415 |
|
|
kfree((char *)sg_dev_arr);
|
1416 |
|
|
sg_dev_arr = tmp_da;
|
1417 |
|
|
sg_template.dev_max = tmp_dev_max;
|
1418 |
|
|
}
|
1419 |
|
|
|
1420 |
|
|
for(k = 0; k < sg_template.dev_max; k++)
|
1421 |
|
|
if(! sg_dev_arr[k]) break;
|
1422 |
|
|
if (k > MINORMASK) {
|
1423 |
|
|
scsidp->attached--;
|
1424 |
|
|
write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
|
1425 |
|
|
printk(KERN_WARNING "Unable to attach sg device <%d, %d, %d, %d>"
|
1426 |
|
|
" type=%d, minor number exceed %d\n", scsidp->host->host_no,
|
1427 |
|
|
scsidp->channel, scsidp->id, scsidp->lun, scsidp->type,
|
1428 |
|
|
MINORMASK);
|
1429 |
|
|
return 1;
|
1430 |
|
|
}
|
1431 |
|
|
if(k < sg_template.dev_max)
|
1432 |
|
|
sdp = (Sg_device *)kmalloc(sizeof(Sg_device), GFP_ATOMIC);
|
1433 |
|
|
else
|
1434 |
|
|
sdp = NULL;
|
1435 |
|
|
if (NULL == sdp) {
|
1436 |
|
|
scsidp->attached--;
|
1437 |
|
|
write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
|
1438 |
|
|
printk(KERN_ERR "sg_attach: Sg_device cannot be allocated\n");
|
1439 |
|
|
return 1;
|
1440 |
|
|
}
|
1441 |
|
|
|
1442 |
|
|
SCSI_LOG_TIMEOUT(3, printk("sg_attach: dev=%d \n", k));
|
1443 |
|
|
sdp->device = scsidp;
|
1444 |
|
|
init_waitqueue_head(&sdp->o_excl_wait);
|
1445 |
|
|
sdp->headfp= NULL;
|
1446 |
|
|
sdp->exclude = 0;
|
1447 |
|
|
sdp->sgdebug = 0;
|
1448 |
|
|
sdp->detached = 0;
|
1449 |
|
|
sdp->sg_tablesize = scsidp->host ? scsidp->host->sg_tablesize : 0;
|
1450 |
|
|
sdp->i_rdev = MKDEV(SCSI_GENERIC_MAJOR, k);
|
1451 |
|
|
sdp->de = devfs_register (scsidp->de, "generic", DEVFS_FL_DEFAULT,
|
1452 |
|
|
SCSI_GENERIC_MAJOR, k,
|
1453 |
|
|
S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
|
1454 |
|
|
&sg_fops, sdp);
|
1455 |
|
|
sg_template.nr_dev++;
|
1456 |
|
|
sg_dev_arr[k] = sdp;
|
1457 |
|
|
write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
|
1458 |
|
|
switch (scsidp->type) {
|
1459 |
|
|
case TYPE_DISK:
|
1460 |
|
|
case TYPE_MOD:
|
1461 |
|
|
case TYPE_ROM:
|
1462 |
|
|
case TYPE_WORM:
|
1463 |
|
|
case TYPE_TAPE: break;
|
1464 |
|
|
default:
|
1465 |
|
|
printk(KERN_NOTICE "Attached scsi generic sg%d at scsi%d, channel"
|
1466 |
|
|
" %d, id %d, lun %d, type %d\n", k, scsidp->host->host_no,
|
1467 |
|
|
scsidp->channel, scsidp->id, scsidp->lun, scsidp->type);
|
1468 |
|
|
}
|
1469 |
|
|
return 0;
|
1470 |
|
|
}
|
1471 |
|
|
|
1472 |
|
|
/* Called at 'finish' of init process, after all attaches */
|
1473 |
|
|
static void sg_finish(void)
|
1474 |
|
|
{ }
|
1475 |
|
|
|
1476 |
|
|
static void sg_detach(Scsi_Device * scsidp)
|
1477 |
|
|
{
|
1478 |
|
|
Sg_device * sdp;
|
1479 |
|
|
unsigned long iflags;
|
1480 |
|
|
Sg_fd * sfp;
|
1481 |
|
|
Sg_fd * tsfp;
|
1482 |
|
|
Sg_request * srp;
|
1483 |
|
|
Sg_request * tsrp;
|
1484 |
|
|
int k, delay;
|
1485 |
|
|
|
1486 |
|
|
if (NULL == sg_dev_arr)
|
1487 |
|
|
return;
|
1488 |
|
|
delay = 0;
|
1489 |
|
|
write_lock_irqsave(&sg_dev_arr_lock, iflags);
|
1490 |
|
|
for (k = 0; k < sg_template.dev_max; k++) {
|
1491 |
|
|
sdp = sg_dev_arr[k];
|
1492 |
|
|
if ((NULL == sdp) || (sdp->device != scsidp))
|
1493 |
|
|
continue; /* dirty but lowers nesting */
|
1494 |
|
|
if (sdp->headfp) {
|
1495 |
|
|
sdp->detached = 1;
|
1496 |
|
|
for (sfp = sdp->headfp; sfp; sfp = tsfp) {
|
1497 |
|
|
tsfp = sfp->nextfp;
|
1498 |
|
|
for (srp = sfp->headrp; srp; srp = tsrp) {
|
1499 |
|
|
tsrp = srp->nextrp;
|
1500 |
|
|
if (sfp->closed || (0 == srp->done))
|
1501 |
|
|
sg_finish_rem_req(srp);
|
1502 |
|
|
}
|
1503 |
|
|
if (sfp->closed) {
|
1504 |
|
|
sdp->device->access_count--;
|
1505 |
|
|
if (sg_template.module)
|
1506 |
|
|
__MOD_DEC_USE_COUNT(sg_template.module);
|
1507 |
|
|
if (sdp->device->host->hostt->module)
|
1508 |
|
|
__MOD_DEC_USE_COUNT(sdp->device->host->hostt->module);
|
1509 |
|
|
__sg_remove_sfp(sdp, sfp);
|
1510 |
|
|
}
|
1511 |
|
|
else {
|
1512 |
|
|
delay = 1;
|
1513 |
|
|
wake_up_interruptible(&sfp->read_wait);
|
1514 |
|
|
kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
|
1515 |
|
|
}
|
1516 |
|
|
}
|
1517 |
|
|
SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d, dirty\n", k));
|
1518 |
|
|
devfs_unregister (sdp->de);
|
1519 |
|
|
sdp->de = NULL;
|
1520 |
|
|
if (NULL == sdp->headfp) {
|
1521 |
|
|
kfree((char *)sdp);
|
1522 |
|
|
sg_dev_arr[k] = NULL;
|
1523 |
|
|
}
|
1524 |
|
|
}
|
1525 |
|
|
else { /* nothing active, simple case */
|
1526 |
|
|
SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d\n", k));
|
1527 |
|
|
devfs_unregister (sdp->de);
|
1528 |
|
|
kfree((char *)sdp);
|
1529 |
|
|
sg_dev_arr[k] = NULL;
|
1530 |
|
|
}
|
1531 |
|
|
scsidp->attached--;
|
1532 |
|
|
sg_template.nr_dev--;
|
1533 |
|
|
sg_template.dev_noticed--; /* from <dan@lectra.fr> */
|
1534 |
|
|
break;
|
1535 |
|
|
}
|
1536 |
|
|
write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
|
1537 |
|
|
if (delay)
|
1538 |
|
|
scsi_sleep(2); /* dirty detach so delay device destruction */
|
1539 |
|
|
}
|
1540 |
|
|
|
1541 |
|
|
MODULE_AUTHOR("Douglas Gilbert");
|
1542 |
|
|
MODULE_DESCRIPTION("SCSI generic (sg) driver");
|
1543 |
|
|
|
1544 |
|
|
#ifdef MODULE_LICENSE
|
1545 |
|
|
MODULE_LICENSE("GPL");
|
1546 |
|
|
#endif
|
1547 |
|
|
|
1548 |
|
|
MODULE_PARM(def_reserved_size, "i");
|
1549 |
|
|
MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
|
1550 |
|
|
|
1551 |
|
|
static int __init init_sg(void) {
|
1552 |
|
|
if (def_reserved_size >= 0)
|
1553 |
|
|
sg_big_buff = def_reserved_size;
|
1554 |
|
|
sg_template.module = THIS_MODULE;
|
1555 |
|
|
return scsi_register_module(MODULE_SCSI_DEV, &sg_template);
|
1556 |
|
|
}
|
1557 |
|
|
|
1558 |
|
|
static void __exit exit_sg( void)
|
1559 |
|
|
{
|
1560 |
|
|
#ifdef CONFIG_PROC_FS
|
1561 |
|
|
sg_proc_cleanup();
|
1562 |
|
|
#endif /* CONFIG_PROC_FS */
|
1563 |
|
|
scsi_unregister_module(MODULE_SCSI_DEV, &sg_template);
|
1564 |
|
|
devfs_unregister_chrdev(SCSI_GENERIC_MAJOR, "sg");
|
1565 |
|
|
if(sg_dev_arr != NULL) {
|
1566 |
|
|
kfree((char *)sg_dev_arr);
|
1567 |
|
|
sg_dev_arr = NULL;
|
1568 |
|
|
}
|
1569 |
|
|
sg_template.dev_max = 0;
|
1570 |
|
|
}
|
1571 |
|
|
|
1572 |
|
|
|
1573 |
|
|
static int sg_start_req(Sg_request * srp)
|
1574 |
|
|
{
|
1575 |
|
|
int res;
|
1576 |
|
|
Sg_fd * sfp = srp->parentfp;
|
1577 |
|
|
sg_io_hdr_t * hp = &srp->header;
|
1578 |
|
|
int dxfer_len = (int)hp->dxfer_len;
|
1579 |
|
|
int dxfer_dir = hp->dxfer_direction;
|
1580 |
|
|
Sg_scatter_hold * req_schp = &srp->data;
|
1581 |
|
|
Sg_scatter_hold * rsv_schp = &sfp->reserve;
|
1582 |
|
|
|
1583 |
|
|
SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len));
|
1584 |
|
|
if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
|
1585 |
|
|
return 0;
|
1586 |
|
|
if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) &&
|
1587 |
|
|
(dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) &&
|
1588 |
|
|
(! sfp->parentdp->device->host->unchecked_isa_dma)) {
|
1589 |
|
|
res = sg_build_dir(srp, sfp, dxfer_len);
|
1590 |
|
|
if (res <= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */
|
1591 |
|
|
return res;
|
1592 |
|
|
}
|
1593 |
|
|
if ((! sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen))
|
1594 |
|
|
sg_link_reserve(sfp, srp, dxfer_len);
|
1595 |
|
|
else {
|
1596 |
|
|
res = sg_build_indi(req_schp, sfp, dxfer_len);
|
1597 |
|
|
if (res) {
|
1598 |
|
|
sg_remove_scat(req_schp);
|
1599 |
|
|
return res;
|
1600 |
|
|
}
|
1601 |
|
|
}
|
1602 |
|
|
return 0;
|
1603 |
|
|
}
|
1604 |
|
|
|
1605 |
|
|
static void sg_finish_rem_req(Sg_request * srp)
|
1606 |
|
|
{
|
1607 |
|
|
Sg_fd * sfp = srp->parentfp;
|
1608 |
|
|
Sg_scatter_hold * req_schp = &srp->data;
|
1609 |
|
|
|
1610 |
|
|
SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n",
|
1611 |
|
|
(int)srp->res_used));
|
1612 |
|
|
sg_unmap_and(&srp->data, 1);
|
1613 |
|
|
if (srp->res_used)
|
1614 |
|
|
sg_unlink_reserve(sfp, srp);
|
1615 |
|
|
else
|
1616 |
|
|
sg_remove_scat(req_schp);
|
1617 |
|
|
sg_remove_request(sfp, srp);
|
1618 |
|
|
}
|
1619 |
|
|
|
1620 |
|
|
static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
|
1621 |
|
|
int tablesize)
|
1622 |
|
|
{
|
1623 |
|
|
int mem_src, ret_sz;
|
1624 |
|
|
int elem_sz = sizeof(struct scatterlist) + sizeof(char);
|
1625 |
|
|
/* scatter gather array, followed by mem_src_arr (array of chars) */
|
1626 |
|
|
int sg_bufflen = tablesize * elem_sz;
|
1627 |
|
|
int mx_sc_elems = tablesize;
|
1628 |
|
|
|
1629 |
|
|
mem_src = SG_HEAP_KMAL;
|
1630 |
|
|
schp->buffer = sg_malloc(sfp, sg_bufflen, &ret_sz, &mem_src);
|
1631 |
|
|
if (! schp->buffer)
|
1632 |
|
|
return -ENOMEM;
|
1633 |
|
|
else if (ret_sz != sg_bufflen) {
|
1634 |
|
|
sg_bufflen = ret_sz;
|
1635 |
|
|
mx_sc_elems = sg_bufflen / elem_sz;
|
1636 |
|
|
}
|
1637 |
|
|
schp->buffer_mem_src = (char)mem_src;
|
1638 |
|
|
schp->sglist_len = sg_bufflen;
|
1639 |
|
|
memset(schp->buffer, 0, sg_bufflen);
|
1640 |
|
|
return mx_sc_elems; /* number of scat_gath elements allocated */
|
1641 |
|
|
}
|
1642 |
|
|
|
1643 |
|
|
static void sg_unmap_and(Sg_scatter_hold * schp, int free_also)
|
1644 |
|
|
{
|
1645 |
|
|
#ifdef SG_ALLOW_DIO_CODE
|
1646 |
|
|
int nbhs = 0;
|
1647 |
|
|
|
1648 |
|
|
if (schp && schp->kiobp) {
|
1649 |
|
|
if (schp->mapped) {
|
1650 |
|
|
unmap_kiobuf(schp->kiobp);
|
1651 |
|
|
schp->mapped = 0;
|
1652 |
|
|
}
|
1653 |
|
|
if (free_also) {
|
1654 |
|
|
sg_free_kiovec(1, &schp->kiobp, &nbhs);
|
1655 |
|
|
schp->kiobp = NULL;
|
1656 |
|
|
}
|
1657 |
|
|
}
|
1658 |
|
|
#endif
|
1659 |
|
|
}
|
1660 |
|
|
|
1661 |
|
|
static int sg_build_dir(Sg_request * srp, Sg_fd * sfp, int dxfer_len)
|
1662 |
|
|
{
|
1663 |
|
|
#ifdef SG_ALLOW_DIO_CODE
|
1664 |
|
|
int res, k, split, offset, num, mx_sc_elems, rem_sz;
|
1665 |
|
|
struct kiobuf * kp;
|
1666 |
|
|
char * mem_src_arr;
|
1667 |
|
|
struct scatterlist * sclp;
|
1668 |
|
|
unsigned long addr, prev_addr;
|
1669 |
|
|
sg_io_hdr_t * hp = &srp->header;
|
1670 |
|
|
Sg_scatter_hold * schp = &srp->data;
|
1671 |
|
|
int sg_tablesize = sfp->parentdp->sg_tablesize;
|
1672 |
|
|
int nbhs = 0;
|
1673 |
|
|
|
1674 |
|
|
res = sg_alloc_kiovec(1, &schp->kiobp, &nbhs);
|
1675 |
|
|
if (0 != res) {
|
1676 |
|
|
SCSI_LOG_TIMEOUT(5, printk("sg_build_dir: sg_alloc_kiovec res=%d\n",
|
1677 |
|
|
res));
|
1678 |
|
|
return 1;
|
1679 |
|
|
}
|
1680 |
|
|
res = map_user_kiobuf((SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0,
|
1681 |
|
|
schp->kiobp, (unsigned long)hp->dxferp, dxfer_len);
|
1682 |
|
|
if (0 != res) {
|
1683 |
|
|
SCSI_LOG_TIMEOUT(5,
|
1684 |
|
|
printk("sg_build_dir: map_user_kiobuf res=%d\n", res));
|
1685 |
|
|
sg_unmap_and(schp, 1);
|
1686 |
|
|
return 1;
|
1687 |
|
|
}
|
1688 |
|
|
schp->mapped = 1;
|
1689 |
|
|
kp = schp->kiobp;
|
1690 |
|
|
prev_addr = (unsigned long) page_address(kp->maplist[0]);
|
1691 |
|
|
for (k = 1, split = 0; k < kp->nr_pages; ++k, prev_addr = addr) {
|
1692 |
|
|
addr = (unsigned long) page_address(kp->maplist[k]);
|
1693 |
|
|
if ((prev_addr + PAGE_SIZE) != addr) {
|
1694 |
|
|
split = k;
|
1695 |
|
|
break;
|
1696 |
|
|
}
|
1697 |
|
|
}
|
1698 |
|
|
if (! split) {
|
1699 |
|
|
schp->k_use_sg = 0;
|
1700 |
|
|
schp->buffer = page_address(kp->maplist[0]) + kp->offset;
|
1701 |
|
|
schp->bufflen = dxfer_len;
|
1702 |
|
|
schp->buffer_mem_src = SG_USER_MEM;
|
1703 |
|
|
schp->b_malloc_len = dxfer_len;
|
1704 |
|
|
hp->info |= SG_INFO_DIRECT_IO;
|
1705 |
|
|
return 0;
|
1706 |
|
|
}
|
1707 |
|
|
mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
|
1708 |
|
|
if (mx_sc_elems <= 1) {
|
1709 |
|
|
sg_unmap_and(schp, 1);
|
1710 |
|
|
sg_remove_scat(schp);
|
1711 |
|
|
return 1;
|
1712 |
|
|
}
|
1713 |
|
|
mem_src_arr = schp->buffer + (mx_sc_elems * sizeof(struct scatterlist));
|
1714 |
|
|
for (k = 0, sclp = schp->buffer, rem_sz = dxfer_len;
|
1715 |
|
|
(rem_sz > 0) && (k < mx_sc_elems);
|
1716 |
|
|
++k, ++sclp) {
|
1717 |
|
|
offset = (0 == k) ? kp->offset : 0;
|
1718 |
|
|
num = (rem_sz > (PAGE_SIZE - offset)) ? (PAGE_SIZE - offset) :
|
1719 |
|
|
rem_sz;
|
1720 |
|
|
sclp->address = page_address(kp->maplist[k]) + offset;
|
1721 |
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,13)
|
1722 |
|
|
sclp->page = NULL;
|
1723 |
|
|
#endif
|
1724 |
|
|
sclp->length = num;
|
1725 |
|
|
mem_src_arr[k] = SG_USER_MEM;
|
1726 |
|
|
rem_sz -= num;
|
1727 |
|
|
SCSI_LOG_TIMEOUT(5,
|
1728 |
|
|
printk("sg_build_dir: k=%d, a=0x%p, len=%d, ms=%d\n",
|
1729 |
|
|
k, sclp->address, num, mem_src_arr[k]));
|
1730 |
|
|
}
|
1731 |
|
|
schp->k_use_sg = k;
|
1732 |
|
|
SCSI_LOG_TIMEOUT(5,
|
1733 |
|
|
printk("sg_build_dir: k_use_sg=%d, rem_sz=%d\n", k, rem_sz));
|
1734 |
|
|
schp->bufflen = dxfer_len;
|
1735 |
|
|
if (rem_sz > 0) { /* must have failed */
|
1736 |
|
|
sg_unmap_and(schp, 1);
|
1737 |
|
|
sg_remove_scat(schp);
|
1738 |
|
|
return 1; /* out of scatter gather elements, try indirect */
|
1739 |
|
|
}
|
1740 |
|
|
hp->info |= SG_INFO_DIRECT_IO;
|
1741 |
|
|
return 0;
|
1742 |
|
|
#else
|
1743 |
|
|
return 1;
|
1744 |
|
|
#endif /* SG_ALLOW_DIO_CODE */
|
1745 |
|
|
}
|
1746 |
|
|
|
1747 |
|
|
static int sg_build_indi(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
|
1748 |
|
|
{
|
1749 |
|
|
int ret_sz, mem_src;
|
1750 |
|
|
int blk_size = buff_size;
|
1751 |
|
|
char * p = NULL;
|
1752 |
|
|
|
1753 |
|
|
if ((blk_size < 0) || (! sfp))
|
1754 |
|
|
return -EFAULT;
|
1755 |
|
|
if (0 == blk_size)
|
1756 |
|
|
++blk_size; /* don't know why */
|
1757 |
|
|
/* round request up to next highest SG_SECTOR_SZ byte boundary */
|
1758 |
|
|
blk_size = (blk_size + SG_SECTOR_MSK) & (~SG_SECTOR_MSK);
|
1759 |
|
|
SCSI_LOG_TIMEOUT(4, printk("sg_build_indi: buff_size=%d, blk_size=%d\n",
|
1760 |
|
|
buff_size, blk_size));
|
1761 |
|
|
if (blk_size <= SG_SCATTER_SZ) {
|
1762 |
|
|
mem_src = SG_HEAP_PAGE;
|
1763 |
|
|
p = sg_malloc(sfp, blk_size, &ret_sz, &mem_src);
|
1764 |
|
|
if (! p)
|
1765 |
|
|
return -ENOMEM;
|
1766 |
|
|
if (blk_size == ret_sz) { /* got it on the first attempt */
|
1767 |
|
|
schp->k_use_sg = 0;
|
1768 |
|
|
schp->buffer = p;
|
1769 |
|
|
schp->bufflen = blk_size;
|
1770 |
|
|
schp->buffer_mem_src = (char)mem_src;
|
1771 |
|
|
schp->b_malloc_len = blk_size;
|
1772 |
|
|
return 0;
|
1773 |
|
|
}
|
1774 |
|
|
}
|
1775 |
|
|
else {
|
1776 |
|
|
mem_src = SG_HEAP_PAGE;
|
1777 |
|
|
p = sg_malloc(sfp, SG_SCATTER_SZ, &ret_sz, &mem_src);
|
1778 |
|
|
if (! p)
|
1779 |
|
|
return -ENOMEM;
|
1780 |
|
|
}
|
1781 |
|
|
/* Want some local declarations, so start new block ... */
|
1782 |
|
|
{ /* lets try and build a scatter gather list */
|
1783 |
|
|
struct scatterlist * sclp;
|
1784 |
|
|
int k, rem_sz, num;
|
1785 |
|
|
int mx_sc_elems;
|
1786 |
|
|
int sg_tablesize = sfp->parentdp->sg_tablesize;
|
1787 |
|
|
int first = 1;
|
1788 |
|
|
char * mem_src_arr;
|
1789 |
|
|
|
1790 |
|
|
/* N.B. ret_sz and mem_src carried into this block ... */
|
1791 |
|
|
mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
|
1792 |
|
|
if (mx_sc_elems < 0)
|
1793 |
|
|
return mx_sc_elems; /* most likely -ENOMEM */
|
1794 |
|
|
mem_src_arr = schp->buffer +
|
1795 |
|
|
(mx_sc_elems * sizeof(struct scatterlist));
|
1796 |
|
|
|
1797 |
|
|
for (k = 0, sclp = schp->buffer, rem_sz = blk_size;
|
1798 |
|
|
(rem_sz > 0) && (k < mx_sc_elems);
|
1799 |
|
|
++k, rem_sz -= ret_sz, ++sclp) {
|
1800 |
|
|
if (first)
|
1801 |
|
|
first = 0;
|
1802 |
|
|
else {
|
1803 |
|
|
num = (rem_sz > SG_SCATTER_SZ) ? SG_SCATTER_SZ : rem_sz;
|
1804 |
|
|
mem_src = SG_HEAP_PAGE;
|
1805 |
|
|
p = sg_malloc(sfp, num, &ret_sz, &mem_src);
|
1806 |
|
|
if (! p)
|
1807 |
|
|
break;
|
1808 |
|
|
}
|
1809 |
|
|
sclp->address = p;
|
1810 |
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,13)
|
1811 |
|
|
sclp->page = NULL;
|
1812 |
|
|
#endif
|
1813 |
|
|
sclp->length = ret_sz;
|
1814 |
|
|
mem_src_arr[k] = mem_src;
|
1815 |
|
|
|
1816 |
|
|
SCSI_LOG_TIMEOUT(5,
|
1817 |
|
|
printk("sg_build_build: k=%d, a=0x%p, len=%d, ms=%d\n",
|
1818 |
|
|
k, sclp->address, ret_sz, mem_src));
|
1819 |
|
|
} /* end of for loop */
|
1820 |
|
|
schp->k_use_sg = k;
|
1821 |
|
|
SCSI_LOG_TIMEOUT(5,
|
1822 |
|
|
printk("sg_build_indi: k_use_sg=%d, rem_sz=%d\n", k, rem_sz));
|
1823 |
|
|
schp->bufflen = blk_size;
|
1824 |
|
|
if (rem_sz > 0) /* must have failed */
|
1825 |
|
|
return -ENOMEM;
|
1826 |
|
|
}
|
1827 |
|
|
return 0;
|
1828 |
|
|
}
|
1829 |
|
|
|
1830 |
|
|
static int sg_write_xfer(Sg_request * srp)
|
1831 |
|
|
{
|
1832 |
|
|
sg_io_hdr_t * hp = &srp->header;
|
1833 |
|
|
Sg_scatter_hold * schp = &srp->data;
|
1834 |
|
|
int num_xfer = 0;
|
1835 |
|
|
int j, k, onum, usglen, ksglen, res, ok;
|
1836 |
|
|
int iovec_count = (int)hp->iovec_count;
|
1837 |
|
|
int dxfer_dir = hp->dxfer_direction;
|
1838 |
|
|
unsigned char * p;
|
1839 |
|
|
unsigned char * up;
|
1840 |
|
|
int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
|
1841 |
|
|
|
1842 |
|
|
if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) ||
|
1843 |
|
|
(SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
|
1844 |
|
|
num_xfer = (int)(new_interface ? hp->dxfer_len : hp->flags);
|
1845 |
|
|
if (schp->bufflen < num_xfer)
|
1846 |
|
|
num_xfer = schp->bufflen;
|
1847 |
|
|
}
|
1848 |
|
|
if ((num_xfer <= 0) ||
|
1849 |
|
|
(new_interface && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
|
1850 |
|
|
return 0;
|
1851 |
|
|
|
1852 |
|
|
SCSI_LOG_TIMEOUT(4,
|
1853 |
|
|
printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
|
1854 |
|
|
num_xfer, iovec_count, schp->k_use_sg));
|
1855 |
|
|
if (iovec_count) {
|
1856 |
|
|
onum = iovec_count;
|
1857 |
|
|
if ((k = verify_area(VERIFY_READ, hp->dxferp,
|
1858 |
|
|
SZ_SG_IOVEC * onum)))
|
1859 |
|
|
return k;
|
1860 |
|
|
}
|
1861 |
|
|
else
|
1862 |
|
|
onum = 1;
|
1863 |
|
|
|
1864 |
|
|
if (0 == schp->k_use_sg) { /* kernel has single buffer */
|
1865 |
|
|
if (SG_USER_MEM != schp->buffer_mem_src) { /* else nothing to do */
|
1866 |
|
|
|
1867 |
|
|
for (j = 0, p = schp->buffer; j < onum; ++j) {
|
1868 |
|
|
res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
|
1869 |
|
|
if (res) return res;
|
1870 |
|
|
usglen = (num_xfer > usglen) ? usglen : num_xfer;
|
1871 |
|
|
__copy_from_user(p, up, usglen);
|
1872 |
|
|
p += usglen;
|
1873 |
|
|
num_xfer -= usglen;
|
1874 |
|
|
if (num_xfer <= 0)
|
1875 |
|
|
return 0;
|
1876 |
|
|
}
|
1877 |
|
|
}
|
1878 |
|
|
}
|
1879 |
|
|
else { /* kernel using scatter gather list */
|
1880 |
|
|
struct scatterlist * sclp = (struct scatterlist *)schp->buffer;
|
1881 |
|
|
char * mem_src_arr = sg_get_sgat_msa(schp);
|
1882 |
|
|
ksglen = (int)sclp->length;
|
1883 |
|
|
p = sclp->address;
|
1884 |
|
|
|
1885 |
|
|
for (j = 0, k = 0; j < onum; ++j) {
|
1886 |
|
|
res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
|
1887 |
|
|
if (res) return res;
|
1888 |
|
|
|
1889 |
|
|
for ( ; p; ++sclp, ksglen = (int)sclp->length, p = sclp->address) {
|
1890 |
|
|
ok = (SG_USER_MEM != mem_src_arr[k]);
|
1891 |
|
|
if (usglen <= 0)
|
1892 |
|
|
break;
|
1893 |
|
|
if (ksglen > usglen) {
|
1894 |
|
|
if (usglen >= num_xfer) {
|
1895 |
|
|
if (ok) __copy_from_user(p, up, num_xfer);
|
1896 |
|
|
return 0;
|
1897 |
|
|
}
|
1898 |
|
|
if (ok) __copy_from_user(p, up, usglen);
|
1899 |
|
|
p += usglen;
|
1900 |
|
|
ksglen -= usglen;
|
1901 |
|
|
break;
|
1902 |
|
|
}
|
1903 |
|
|
else {
|
1904 |
|
|
if (ksglen >= num_xfer) {
|
1905 |
|
|
if (ok) __copy_from_user(p, up, num_xfer);
|
1906 |
|
|
return 0;
|
1907 |
|
|
}
|
1908 |
|
|
if (ok) __copy_from_user(p, up, ksglen);
|
1909 |
|
|
up += ksglen;
|
1910 |
|
|
usglen -= ksglen;
|
1911 |
|
|
}
|
1912 |
|
|
++k;
|
1913 |
|
|
if (k >= schp->k_use_sg)
|
1914 |
|
|
return 0;
|
1915 |
|
|
}
|
1916 |
|
|
}
|
1917 |
|
|
}
|
1918 |
|
|
return 0;
|
1919 |
|
|
}
|
1920 |
|
|
|
1921 |
|
|
static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
|
1922 |
|
|
int wr_xf, int * countp, unsigned char ** up)
|
1923 |
|
|
{
|
1924 |
|
|
int num_xfer = (int)hp->dxfer_len;
|
1925 |
|
|
unsigned char * p;
|
1926 |
|
|
int count, k;
|
1927 |
|
|
sg_iovec_t u_iovec;
|
1928 |
|
|
|
1929 |
|
|
if (0 == sg_num) {
|
1930 |
|
|
p = (unsigned char *)hp->dxferp;
|
1931 |
|
|
if (wr_xf && ('\0' == hp->interface_id))
|
1932 |
|
|
count = (int)hp->flags; /* holds "old" input_size */
|
1933 |
|
|
else
|
1934 |
|
|
count = num_xfer;
|
1935 |
|
|
}
|
1936 |
|
|
else {
|
1937 |
|
|
__copy_from_user(&u_iovec,
|
1938 |
|
|
(unsigned char *)hp->dxferp + (ind * SZ_SG_IOVEC),
|
1939 |
|
|
SZ_SG_IOVEC);
|
1940 |
|
|
p = (unsigned char *)u_iovec.iov_base;
|
1941 |
|
|
count = (int)u_iovec.iov_len;
|
1942 |
|
|
}
|
1943 |
|
|
if ((k = verify_area(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count)))
|
1944 |
|
|
return k;
|
1945 |
|
|
if (up) *up = p;
|
1946 |
|
|
if (countp) *countp = count;
|
1947 |
|
|
return 0;
|
1948 |
|
|
}
|
1949 |
|
|
|
1950 |
|
|
static char * sg_get_sgat_msa(Sg_scatter_hold * schp)
|
1951 |
|
|
{
|
1952 |
|
|
int elem_sz = sizeof(struct scatterlist) + sizeof(char);
|
1953 |
|
|
int mx_sc_elems = schp->sglist_len / elem_sz;
|
1954 |
|
|
return schp->buffer + (sizeof(struct scatterlist) * mx_sc_elems);
|
1955 |
|
|
}
|
1956 |
|
|
|
1957 |
|
|
static void sg_remove_scat(Sg_scatter_hold * schp)
|
1958 |
|
|
{
|
1959 |
|
|
SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n",
|
1960 |
|
|
schp->k_use_sg));
|
1961 |
|
|
if (schp->buffer && schp->sglist_len) {
|
1962 |
|
|
int k, mem_src;
|
1963 |
|
|
struct scatterlist * sclp = (struct scatterlist *)schp->buffer;
|
1964 |
|
|
char * mem_src_arr = sg_get_sgat_msa(schp);
|
1965 |
|
|
|
1966 |
|
|
for (k = 0; (k < schp->k_use_sg) && sclp->address; ++k, ++sclp) {
|
1967 |
|
|
mem_src = mem_src_arr[k];
|
1968 |
|
|
SCSI_LOG_TIMEOUT(5,
|
1969 |
|
|
printk("sg_remove_scat: k=%d, a=0x%p, len=%d, ms=%d\n",
|
1970 |
|
|
k, sclp->address, sclp->length, mem_src));
|
1971 |
|
|
sg_free(sclp->address, sclp->length, mem_src);
|
1972 |
|
|
sclp->address = NULL;
|
1973 |
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,13)
|
1974 |
|
|
sclp->page = NULL;
|
1975 |
|
|
#endif
|
1976 |
|
|
sclp->length = 0;
|
1977 |
|
|
}
|
1978 |
|
|
sg_free(schp->buffer, schp->sglist_len, schp->buffer_mem_src);
|
1979 |
|
|
}
|
1980 |
|
|
else if (schp->buffer)
|
1981 |
|
|
sg_free(schp->buffer, schp->b_malloc_len, schp->buffer_mem_src);
|
1982 |
|
|
memset(schp, 0, sizeof(*schp));
|
1983 |
|
|
}
|
1984 |
|
|
|
1985 |
|
|
static int sg_read_xfer(Sg_request * srp)
|
1986 |
|
|
{
|
1987 |
|
|
sg_io_hdr_t * hp = &srp->header;
|
1988 |
|
|
Sg_scatter_hold * schp = &srp->data;
|
1989 |
|
|
int num_xfer = 0;
|
1990 |
|
|
int j, k, onum, usglen, ksglen, res, ok;
|
1991 |
|
|
int iovec_count = (int)hp->iovec_count;
|
1992 |
|
|
int dxfer_dir = hp->dxfer_direction;
|
1993 |
|
|
unsigned char * p;
|
1994 |
|
|
unsigned char * up;
|
1995 |
|
|
int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
|
1996 |
|
|
|
1997 |
|
|
if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir) ||
|
1998 |
|
|
(SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
|
1999 |
|
|
num_xfer = hp->dxfer_len;
|
2000 |
|
|
if (schp->bufflen < num_xfer)
|
2001 |
|
|
num_xfer = schp->bufflen;
|
2002 |
|
|
}
|
2003 |
|
|
if ((num_xfer <= 0) ||
|
2004 |
|
|
(new_interface && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
|
2005 |
|
|
return 0;
|
2006 |
|
|
|
2007 |
|
|
SCSI_LOG_TIMEOUT(4,
|
2008 |
|
|
printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
|
2009 |
|
|
num_xfer, iovec_count, schp->k_use_sg));
|
2010 |
|
|
if (iovec_count) {
|
2011 |
|
|
onum = iovec_count;
|
2012 |
|
|
if ((k = verify_area(VERIFY_READ, hp->dxferp,
|
2013 |
|
|
SZ_SG_IOVEC * onum)))
|
2014 |
|
|
return k;
|
2015 |
|
|
}
|
2016 |
|
|
else
|
2017 |
|
|
onum = 1;
|
2018 |
|
|
|
2019 |
|
|
if (0 == schp->k_use_sg) { /* kernel has single buffer */
|
2020 |
|
|
if (SG_USER_MEM != schp->buffer_mem_src) { /* else nothing to do */
|
2021 |
|
|
|
2022 |
|
|
for (j = 0, p = schp->buffer; j < onum; ++j) {
|
2023 |
|
|
res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
|
2024 |
|
|
if (res) return res;
|
2025 |
|
|
usglen = (num_xfer > usglen) ? usglen : num_xfer;
|
2026 |
|
|
__copy_to_user(up, p, usglen);
|
2027 |
|
|
p += usglen;
|
2028 |
|
|
num_xfer -= usglen;
|
2029 |
|
|
if (num_xfer <= 0)
|
2030 |
|
|
return 0;
|
2031 |
|
|
}
|
2032 |
|
|
}
|
2033 |
|
|
}
|
2034 |
|
|
else { /* kernel using scatter gather list */
|
2035 |
|
|
struct scatterlist * sclp = (struct scatterlist *)schp->buffer;
|
2036 |
|
|
char * mem_src_arr = sg_get_sgat_msa(schp);
|
2037 |
|
|
ksglen = (int)sclp->length;
|
2038 |
|
|
p = sclp->address;
|
2039 |
|
|
|
2040 |
|
|
for (j = 0, k = 0; j < onum; ++j) {
|
2041 |
|
|
res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
|
2042 |
|
|
if (res) return res;
|
2043 |
|
|
|
2044 |
|
|
for ( ; p; ++sclp, ksglen = (int)sclp->length, p = sclp->address) {
|
2045 |
|
|
ok = (SG_USER_MEM != mem_src_arr[k]);
|
2046 |
|
|
if (usglen <= 0)
|
2047 |
|
|
break;
|
2048 |
|
|
if (ksglen > usglen) {
|
2049 |
|
|
if (usglen >= num_xfer) {
|
2050 |
|
|
if (ok) __copy_to_user(up, p, num_xfer);
|
2051 |
|
|
return 0;
|
2052 |
|
|
}
|
2053 |
|
|
if (ok) __copy_to_user(up, p, usglen);
|
2054 |
|
|
p += usglen;
|
2055 |
|
|
ksglen -= usglen;
|
2056 |
|
|
break;
|
2057 |
|
|
}
|
2058 |
|
|
else {
|
2059 |
|
|
if (ksglen >= num_xfer) {
|
2060 |
|
|
if (ok) __copy_to_user(up, p, num_xfer);
|
2061 |
|
|
return 0;
|
2062 |
|
|
}
|
2063 |
|
|
if (ok) __copy_to_user(up, p, ksglen);
|
2064 |
|
|
up += ksglen;
|
2065 |
|
|
usglen -= ksglen;
|
2066 |
|
|
}
|
2067 |
|
|
++k;
|
2068 |
|
|
if (k >= schp->k_use_sg)
|
2069 |
|
|
return 0;
|
2070 |
|
|
}
|
2071 |
|
|
}
|
2072 |
|
|
}
|
2073 |
|
|
return 0;
|
2074 |
|
|
}
|
2075 |
|
|
|
2076 |
|
|
static void sg_read_oxfer(Sg_request * srp, char * outp, int num_read_xfer)
|
2077 |
|
|
{
|
2078 |
|
|
Sg_scatter_hold * schp = &srp->data;
|
2079 |
|
|
|
2080 |
|
|
SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
|
2081 |
|
|
num_read_xfer));
|
2082 |
|
|
if ((! outp) || (num_read_xfer <= 0))
|
2083 |
|
|
return;
|
2084 |
|
|
if(schp->k_use_sg > 0) {
|
2085 |
|
|
int k, num;
|
2086 |
|
|
struct scatterlist * sclp = (struct scatterlist *)schp->buffer;
|
2087 |
|
|
|
2088 |
|
|
for (k = 0; (k < schp->k_use_sg) && sclp->address; ++k, ++sclp) {
|
2089 |
|
|
num = (int)sclp->length;
|
2090 |
|
|
if (num > num_read_xfer) {
|
2091 |
|
|
__copy_to_user(outp, sclp->address, num_read_xfer);
|
2092 |
|
|
break;
|
2093 |
|
|
}
|
2094 |
|
|
else {
|
2095 |
|
|
__copy_to_user(outp, sclp->address, num);
|
2096 |
|
|
num_read_xfer -= num;
|
2097 |
|
|
if (num_read_xfer <= 0)
|
2098 |
|
|
break;
|
2099 |
|
|
outp += num;
|
2100 |
|
|
}
|
2101 |
|
|
}
|
2102 |
|
|
}
|
2103 |
|
|
else
|
2104 |
|
|
__copy_to_user(outp, schp->buffer, num_read_xfer);
|
2105 |
|
|
}
|
2106 |
|
|
|
2107 |
|
|
static void sg_build_reserve(Sg_fd * sfp, int req_size)
|
2108 |
|
|
{
|
2109 |
|
|
Sg_scatter_hold * schp = &sfp->reserve;
|
2110 |
|
|
|
2111 |
|
|
SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size));
|
2112 |
|
|
do {
|
2113 |
|
|
if (req_size < PAGE_SIZE)
|
2114 |
|
|
req_size = PAGE_SIZE;
|
2115 |
|
|
if (0 == sg_build_indi(schp, sfp, req_size))
|
2116 |
|
|
return;
|
2117 |
|
|
else
|
2118 |
|
|
sg_remove_scat(schp);
|
2119 |
|
|
req_size >>= 1; /* divide by 2 */
|
2120 |
|
|
} while (req_size > (PAGE_SIZE / 2));
|
2121 |
|
|
}
|
2122 |
|
|
|
2123 |
|
|
static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
|
2124 |
|
|
{
|
2125 |
|
|
Sg_scatter_hold * req_schp = &srp->data;
|
2126 |
|
|
Sg_scatter_hold * rsv_schp = &sfp->reserve;
|
2127 |
|
|
|
2128 |
|
|
srp->res_used = 1;
|
2129 |
|
|
SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
|
2130 |
|
|
size = (size + 1) & (~1); /* round to even for aha1542 */
|
2131 |
|
|
if (rsv_schp->k_use_sg > 0) {
|
2132 |
|
|
int k, num;
|
2133 |
|
|
int rem = size;
|
2134 |
|
|
struct scatterlist * sclp = (struct scatterlist *)rsv_schp->buffer;
|
2135 |
|
|
|
2136 |
|
|
for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sclp) {
|
2137 |
|
|
num = (int)sclp->length;
|
2138 |
|
|
if (rem <= num) {
|
2139 |
|
|
if (0 == k) {
|
2140 |
|
|
req_schp->k_use_sg = 0;
|
2141 |
|
|
req_schp->buffer = sclp->address;
|
2142 |
|
|
}
|
2143 |
|
|
else {
|
2144 |
|
|
sfp->save_scat_len = num;
|
2145 |
|
|
sclp->length = (unsigned)rem;
|
2146 |
|
|
req_schp->k_use_sg = k + 1;
|
2147 |
|
|
req_schp->sglist_len = rsv_schp->sglist_len;
|
2148 |
|
|
req_schp->buffer = rsv_schp->buffer;
|
2149 |
|
|
}
|
2150 |
|
|
req_schp->bufflen = size;
|
2151 |
|
|
req_schp->buffer_mem_src = rsv_schp->buffer_mem_src;
|
2152 |
|
|
req_schp->b_malloc_len = rsv_schp->b_malloc_len;
|
2153 |
|
|
break;
|
2154 |
|
|
}
|
2155 |
|
|
else
|
2156 |
|
|
rem -= num;
|
2157 |
|
|
}
|
2158 |
|
|
if (k >= rsv_schp->k_use_sg)
|
2159 |
|
|
SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n"));
|
2160 |
|
|
}
|
2161 |
|
|
else {
|
2162 |
|
|
req_schp->k_use_sg = 0;
|
2163 |
|
|
req_schp->bufflen = size;
|
2164 |
|
|
req_schp->buffer = rsv_schp->buffer;
|
2165 |
|
|
req_schp->buffer_mem_src = rsv_schp->buffer_mem_src;
|
2166 |
|
|
req_schp->b_malloc_len = rsv_schp->b_malloc_len;
|
2167 |
|
|
}
|
2168 |
|
|
}
|
2169 |
|
|
|
2170 |
|
|
static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
|
2171 |
|
|
{
|
2172 |
|
|
Sg_scatter_hold * req_schp = &srp->data;
|
2173 |
|
|
Sg_scatter_hold * rsv_schp = &sfp->reserve;
|
2174 |
|
|
|
2175 |
|
|
SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
|
2176 |
|
|
(int)req_schp->k_use_sg));
|
2177 |
|
|
if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) {
|
2178 |
|
|
struct scatterlist * sclp = (struct scatterlist *)rsv_schp->buffer;
|
2179 |
|
|
|
2180 |
|
|
if (sfp->save_scat_len > 0)
|
2181 |
|
|
(sclp + (req_schp->k_use_sg - 1))->length =
|
2182 |
|
|
(unsigned)sfp->save_scat_len;
|
2183 |
|
|
else
|
2184 |
|
|
SCSI_LOG_TIMEOUT(1, printk(
|
2185 |
|
|
"sg_unlink_reserve: BAD save_scat_len\n"));
|
2186 |
|
|
}
|
2187 |
|
|
req_schp->k_use_sg = 0;
|
2188 |
|
|
req_schp->bufflen = 0;
|
2189 |
|
|
req_schp->buffer = NULL;
|
2190 |
|
|
req_schp->sglist_len = 0;
|
2191 |
|
|
sfp->save_scat_len = 0;
|
2192 |
|
|
srp->res_used = 0;
|
2193 |
|
|
}
|
2194 |
|
|
|
2195 |
|
|
static Sg_request * sg_get_rq_mark(Sg_fd * sfp, int pack_id)
|
2196 |
|
|
{
|
2197 |
|
|
Sg_request * resp;
|
2198 |
|
|
unsigned long iflags;
|
2199 |
|
|
|
2200 |
|
|
write_lock_irqsave(&sfp->rq_list_lock, iflags);
|
2201 |
|
|
for (resp = sfp->headrp; resp; resp = resp->nextrp) {
|
2202 |
|
|
/* look for requests that are ready + not SG_IO owned */
|
2203 |
|
|
if ((1 == resp->done) && (! resp->sg_io_owned) &&
|
2204 |
|
|
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
|
2205 |
|
|
resp->done = 2; /* guard against other readers */
|
2206 |
|
|
break;
|
2207 |
|
|
}
|
2208 |
|
|
}
|
2209 |
|
|
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
|
2210 |
|
|
return resp;
|
2211 |
|
|
}
|
2212 |
|
|
|
2213 |
|
|
#ifdef CONFIG_PROC_FS
|
2214 |
|
|
static Sg_request * sg_get_nth_request(Sg_fd * sfp, int nth)
|
2215 |
|
|
{
|
2216 |
|
|
Sg_request * resp;
|
2217 |
|
|
unsigned long iflags;
|
2218 |
|
|
int k;
|
2219 |
|
|
|
2220 |
|
|
read_lock_irqsave(&sfp->rq_list_lock, iflags);
|
2221 |
|
|
for (k = 0, resp = sfp->headrp; resp && (k < nth);
|
2222 |
|
|
++k, resp = resp->nextrp)
|
2223 |
|
|
;
|
2224 |
|
|
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
|
2225 |
|
|
return resp;
|
2226 |
|
|
}
|
2227 |
|
|
#endif
|
2228 |
|
|
|
2229 |
|
|
/* always adds to end of list */
|
2230 |
|
|
static Sg_request * sg_add_request(Sg_fd * sfp)
|
2231 |
|
|
{
|
2232 |
|
|
int k;
|
2233 |
|
|
unsigned long iflags;
|
2234 |
|
|
Sg_request * resp;
|
2235 |
|
|
Sg_request * rp = sfp->req_arr;
|
2236 |
|
|
|
2237 |
|
|
write_lock_irqsave(&sfp->rq_list_lock, iflags);
|
2238 |
|
|
resp = sfp->headrp;
|
2239 |
|
|
if (! resp) {
|
2240 |
|
|
memset(rp, 0, sizeof(Sg_request));
|
2241 |
|
|
rp->parentfp = sfp;
|
2242 |
|
|
resp = rp;
|
2243 |
|
|
sfp->headrp = resp;
|
2244 |
|
|
}
|
2245 |
|
|
else {
|
2246 |
|
|
if (0 == sfp->cmd_q)
|
2247 |
|
|
resp = NULL; /* command queuing disallowed */
|
2248 |
|
|
else {
|
2249 |
|
|
for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
|
2250 |
|
|
if (! rp->parentfp)
|
2251 |
|
|
break;
|
2252 |
|
|
}
|
2253 |
|
|
if (k < SG_MAX_QUEUE) {
|
2254 |
|
|
memset(rp, 0, sizeof(Sg_request));
|
2255 |
|
|
rp->parentfp = sfp;
|
2256 |
|
|
while (resp->nextrp)
|
2257 |
|
|
resp = resp->nextrp;
|
2258 |
|
|
resp->nextrp = rp;
|
2259 |
|
|
resp = rp;
|
2260 |
|
|
}
|
2261 |
|
|
else
|
2262 |
|
|
resp = NULL;
|
2263 |
|
|
}
|
2264 |
|
|
}
|
2265 |
|
|
if (resp) {
|
2266 |
|
|
resp->nextrp = NULL;
|
2267 |
|
|
resp->header.duration = jiffies;
|
2268 |
|
|
resp->my_cmdp = NULL;
|
2269 |
|
|
resp->data.kiobp = NULL;
|
2270 |
|
|
}
|
2271 |
|
|
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
|
2272 |
|
|
return resp;
|
2273 |
|
|
}
|
2274 |
|
|
|
2275 |
|
|
/* Return of 1 for found; 0 for not found */
|
2276 |
|
|
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp)
|
2277 |
|
|
{
|
2278 |
|
|
Sg_request * prev_rp;
|
2279 |
|
|
Sg_request * rp;
|
2280 |
|
|
unsigned long iflags;
|
2281 |
|
|
int res = 0;
|
2282 |
|
|
|
2283 |
|
|
if ((! sfp) || (! srp) || (! sfp->headrp))
|
2284 |
|
|
return res;
|
2285 |
|
|
write_lock_irqsave(&sfp->rq_list_lock, iflags);
|
2286 |
|
|
prev_rp = sfp->headrp;
|
2287 |
|
|
if (srp == prev_rp) {
|
2288 |
|
|
sfp->headrp = prev_rp->nextrp;
|
2289 |
|
|
prev_rp->parentfp = NULL;
|
2290 |
|
|
res = 1;
|
2291 |
|
|
}
|
2292 |
|
|
else {
|
2293 |
|
|
while ((rp = prev_rp->nextrp)) {
|
2294 |
|
|
if (srp == rp) {
|
2295 |
|
|
prev_rp->nextrp = rp->nextrp;
|
2296 |
|
|
rp->parentfp = NULL;
|
2297 |
|
|
res = 1;
|
2298 |
|
|
break;
|
2299 |
|
|
}
|
2300 |
|
|
prev_rp = rp;
|
2301 |
|
|
}
|
2302 |
|
|
}
|
2303 |
|
|
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
|
2304 |
|
|
return res;
|
2305 |
|
|
}
|
2306 |
|
|
|
2307 |
|
|
#ifdef CONFIG_PROC_FS
|
2308 |
|
|
static Sg_fd * sg_get_nth_sfp(Sg_device * sdp, int nth)
|
2309 |
|
|
{
|
2310 |
|
|
Sg_fd * resp;
|
2311 |
|
|
unsigned long iflags;
|
2312 |
|
|
int k;
|
2313 |
|
|
|
2314 |
|
|
read_lock_irqsave(&sg_dev_arr_lock, iflags);
|
2315 |
|
|
for (k = 0, resp = sdp->headfp; resp && (k < nth);
|
2316 |
|
|
++k, resp = resp->nextfp)
|
2317 |
|
|
;
|
2318 |
|
|
read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
|
2319 |
|
|
return resp;
|
2320 |
|
|
}
|
2321 |
|
|
#endif
|
2322 |
|
|
|
2323 |
|
|
static Sg_fd * sg_add_sfp(Sg_device * sdp, int dev)
|
2324 |
|
|
{
|
2325 |
|
|
Sg_fd * sfp;
|
2326 |
|
|
unsigned long iflags;
|
2327 |
|
|
|
2328 |
|
|
sfp = (Sg_fd *)sg_low_malloc(sizeof(Sg_fd), 0, SG_HEAP_KMAL, 0);
|
2329 |
|
|
if (! sfp)
|
2330 |
|
|
return NULL;
|
2331 |
|
|
memset(sfp, 0, sizeof(Sg_fd));
|
2332 |
|
|
sfp->fd_mem_src = SG_HEAP_KMAL;
|
2333 |
|
|
init_waitqueue_head(&sfp->read_wait);
|
2334 |
|
|
sfp->rq_list_lock = RW_LOCK_UNLOCKED;
|
2335 |
|
|
|
2336 |
|
|
sfp->timeout = SG_DEFAULT_TIMEOUT;
|
2337 |
|
|
sfp->force_packid = SG_DEF_FORCE_PACK_ID;
|
2338 |
|
|
sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
|
2339 |
|
|
sdp->device->host->unchecked_isa_dma : 1;
|
2340 |
|
|
sfp->cmd_q = SG_DEF_COMMAND_Q;
|
2341 |
|
|
sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
|
2342 |
|
|
sfp->parentdp = sdp;
|
2343 |
|
|
write_lock_irqsave(&sg_dev_arr_lock, iflags);
|
2344 |
|
|
if (! sdp->headfp)
|
2345 |
|
|
sdp->headfp = sfp;
|
2346 |
|
|
else { /* add to tail of existing list */
|
2347 |
|
|
Sg_fd * pfp = sdp->headfp;
|
2348 |
|
|
while (pfp->nextfp)
|
2349 |
|
|
pfp = pfp->nextfp;
|
2350 |
|
|
pfp->nextfp = sfp;
|
2351 |
|
|
}
|
2352 |
|
|
write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
|
2353 |
|
|
SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p, m_s=%d\n",
|
2354 |
|
|
sfp, (int)sfp->fd_mem_src));
|
2355 |
|
|
sg_build_reserve(sfp, sg_big_buff);
|
2356 |
|
|
SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
|
2357 |
|
|
sfp->reserve.bufflen, sfp->reserve.k_use_sg));
|
2358 |
|
|
return sfp;
|
2359 |
|
|
}
|
2360 |
|
|
|
2361 |
|
|
static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
|
2362 |
|
|
{
|
2363 |
|
|
Sg_fd * fp;
|
2364 |
|
|
Sg_fd * prev_fp;
|
2365 |
|
|
|
2366 |
|
|
prev_fp = sdp->headfp;
|
2367 |
|
|
if (sfp == prev_fp)
|
2368 |
|
|
sdp->headfp = prev_fp->nextfp;
|
2369 |
|
|
else {
|
2370 |
|
|
while ((fp = prev_fp->nextfp)) {
|
2371 |
|
|
if (sfp == fp) {
|
2372 |
|
|
prev_fp->nextfp = fp->nextfp;
|
2373 |
|
|
break;
|
2374 |
|
|
}
|
2375 |
|
|
prev_fp = fp;
|
2376 |
|
|
}
|
2377 |
|
|
}
|
2378 |
|
|
if (sfp->reserve.bufflen > 0) {
|
2379 |
|
|
SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
|
2380 |
|
|
(int)sfp->reserve.bufflen, (int)sfp->reserve.k_use_sg));
|
2381 |
|
|
if (sfp->mmap_called)
|
2382 |
|
|
sg_rb_correct4mmap(&sfp->reserve, 0); /* undo correction */
|
2383 |
|
|
sg_remove_scat(&sfp->reserve);
|
2384 |
|
|
}
|
2385 |
|
|
sfp->parentdp = NULL;
|
2386 |
|
|
SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp: sfp=0x%p\n", sfp));
|
2387 |
|
|
sg_low_free((char *)sfp, sizeof(Sg_fd), sfp->fd_mem_src);
|
2388 |
|
|
}
|
2389 |
|
|
|
2390 |
|
|
/* Returns 0 in normal case, 1 when detached and sdp object removed */
|
2391 |
|
|
static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
|
2392 |
|
|
{
|
2393 |
|
|
Sg_request * srp;
|
2394 |
|
|
Sg_request * tsrp;
|
2395 |
|
|
int dirty = 0;
|
2396 |
|
|
int res = 0;
|
2397 |
|
|
|
2398 |
|
|
for (srp = sfp->headrp; srp; srp = tsrp) {
|
2399 |
|
|
tsrp = srp->nextrp;
|
2400 |
|
|
if (srp->done)
|
2401 |
|
|
sg_finish_rem_req(srp);
|
2402 |
|
|
else
|
2403 |
|
|
++dirty;
|
2404 |
|
|
}
|
2405 |
|
|
if (0 == dirty) {
|
2406 |
|
|
unsigned long iflags;
|
2407 |
|
|
|
2408 |
|
|
write_lock_irqsave(&sg_dev_arr_lock, iflags);
|
2409 |
|
|
__sg_remove_sfp(sdp, sfp);
|
2410 |
|
|
if (sdp->detached && (NULL == sdp->headfp)) {
|
2411 |
|
|
int k, maxd;
|
2412 |
|
|
|
2413 |
|
|
maxd = sg_template.dev_max;
|
2414 |
|
|
for (k = 0; k < maxd; ++k) {
|
2415 |
|
|
if (sdp == sg_dev_arr[k])
|
2416 |
|
|
break;
|
2417 |
|
|
}
|
2418 |
|
|
if (k < maxd)
|
2419 |
|
|
sg_dev_arr[k] = NULL;
|
2420 |
|
|
kfree((char *)sdp);
|
2421 |
|
|
res = 1;
|
2422 |
|
|
}
|
2423 |
|
|
write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
|
2424 |
|
|
}
|
2425 |
|
|
else {
|
2426 |
|
|
sfp->closed = 1; /* flag dirty state on this fd */
|
2427 |
|
|
sdp->device->access_count++;
|
2428 |
|
|
/* MOD_INC's to inhibit unloading sg and associated adapter driver */
|
2429 |
|
|
if (sg_template.module)
|
2430 |
|
|
__MOD_INC_USE_COUNT(sg_template.module);
|
2431 |
|
|
if (sdp->device->host->hostt->module)
|
2432 |
|
|
__MOD_INC_USE_COUNT(sdp->device->host->hostt->module);
|
2433 |
|
|
SCSI_LOG_TIMEOUT(1, printk(
|
2434 |
|
|
"sg_remove_sfp: worrisome, %d writes pending\n", dirty));
|
2435 |
|
|
}
|
2436 |
|
|
return res;
|
2437 |
|
|
}
|
2438 |
|
|
|
2439 |
|
|
static int sg_res_in_use(Sg_fd * sfp)
|
2440 |
|
|
{
|
2441 |
|
|
const Sg_request * srp;
|
2442 |
|
|
unsigned long iflags;
|
2443 |
|
|
|
2444 |
|
|
read_lock_irqsave(&sfp->rq_list_lock, iflags);
|
2445 |
|
|
for (srp = sfp->headrp; srp; srp = srp->nextrp)
|
2446 |
|
|
if (srp->res_used) break;
|
2447 |
|
|
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
|
2448 |
|
|
return srp ? 1 : 0;
|
2449 |
|
|
}
|
2450 |
|
|
|
2451 |
|
|
/* If retSzp==NULL want exact size or fail */
|
2452 |
|
|
static char * sg_low_malloc(int rqSz, int lowDma, int mem_src, int * retSzp)
|
2453 |
|
|
{
|
2454 |
|
|
char * resp = NULL;
|
2455 |
|
|
int page_mask = lowDma ? (GFP_ATOMIC | GFP_DMA) : GFP_ATOMIC;
|
2456 |
|
|
|
2457 |
|
|
if (rqSz <= 0)
|
2458 |
|
|
return resp;
|
2459 |
|
|
if (SG_HEAP_KMAL == mem_src) {
|
2460 |
|
|
resp = kmalloc(rqSz, page_mask);
|
2461 |
|
|
if (resp) {
|
2462 |
|
|
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
|
2463 |
|
|
memset(resp, 0, rqSz);
|
2464 |
|
|
if (retSzp) *retSzp = rqSz;
|
2465 |
|
|
}
|
2466 |
|
|
return resp;
|
2467 |
|
|
}
|
2468 |
|
|
if (SG_HEAP_POOL == mem_src) {
|
2469 |
|
|
int num_sect = rqSz / SG_SECTOR_SZ;
|
2470 |
|
|
|
2471 |
|
|
if (0 != (rqSz & SG_SECTOR_MSK)) {
|
2472 |
|
|
if (! retSzp)
|
2473 |
|
|
return resp;
|
2474 |
|
|
++num_sect;
|
2475 |
|
|
rqSz = num_sect * SG_SECTOR_SZ;
|
2476 |
|
|
}
|
2477 |
|
|
while (num_sect > 0) {
|
2478 |
|
|
if ((num_sect <= sg_pool_secs_avail) &&
|
2479 |
|
|
(scsi_dma_free_sectors > (SG_LOW_POOL_THRESHHOLD + num_sect))) {
|
2480 |
|
|
resp = scsi_malloc(rqSz);
|
2481 |
|
|
if (resp) {
|
2482 |
|
|
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
|
2483 |
|
|
memset(resp, 0, rqSz);
|
2484 |
|
|
if (retSzp) *retSzp = rqSz;
|
2485 |
|
|
sg_pool_secs_avail -= num_sect;
|
2486 |
|
|
return resp;
|
2487 |
|
|
}
|
2488 |
|
|
}
|
2489 |
|
|
if (! retSzp)
|
2490 |
|
|
return resp;
|
2491 |
|
|
num_sect /= 2; /* try half as many */
|
2492 |
|
|
rqSz = num_sect * SG_SECTOR_SZ;
|
2493 |
|
|
}
|
2494 |
|
|
}
|
2495 |
|
|
else if (SG_HEAP_PAGE == mem_src) {
|
2496 |
|
|
int order, a_size;
|
2497 |
|
|
int resSz = rqSz;
|
2498 |
|
|
|
2499 |
|
|
for (order = 0, a_size = PAGE_SIZE;
|
2500 |
|
|
a_size < rqSz; order++, a_size <<= 1)
|
2501 |
|
|
;
|
2502 |
|
|
resp = (char *)__get_free_pages(page_mask, order);
|
2503 |
|
|
while ((! resp) && order && retSzp) {
|
2504 |
|
|
--order;
|
2505 |
|
|
a_size >>= 1; /* divide by 2, until PAGE_SIZE */
|
2506 |
|
|
resp = (char *)__get_free_pages(page_mask, order); /* try half */
|
2507 |
|
|
resSz = a_size;
|
2508 |
|
|
}
|
2509 |
|
|
if (resp) {
|
2510 |
|
|
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
|
2511 |
|
|
memset(resp, 0, resSz);
|
2512 |
|
|
if (retSzp) *retSzp = resSz;
|
2513 |
|
|
}
|
2514 |
|
|
}
|
2515 |
|
|
else
|
2516 |
|
|
printk(KERN_ERR "sg_low_malloc: bad mem_src=%d, rqSz=%df\n",
|
2517 |
|
|
mem_src, rqSz);
|
2518 |
|
|
return resp;
|
2519 |
|
|
}
|
2520 |
|
|
|
2521 |
|
|
static char * sg_malloc(const Sg_fd * sfp, int size, int * retSzp,
|
2522 |
|
|
int * mem_srcp)
|
2523 |
|
|
{
|
2524 |
|
|
char * resp = NULL;
|
2525 |
|
|
|
2526 |
|
|
if (retSzp) *retSzp = size;
|
2527 |
|
|
if (size <= 0)
|
2528 |
|
|
;
|
2529 |
|
|
else {
|
2530 |
|
|
int low_dma = sfp->low_dma;
|
2531 |
|
|
int l_ms = -1; /* invalid value */
|
2532 |
|
|
|
2533 |
|
|
switch (*mem_srcp)
|
2534 |
|
|
{
|
2535 |
|
|
case SG_HEAP_PAGE:
|
2536 |
|
|
l_ms = (size < PAGE_SIZE) ? SG_HEAP_POOL : SG_HEAP_PAGE;
|
2537 |
|
|
resp = sg_low_malloc(size, low_dma, l_ms, 0);
|
2538 |
|
|
if (resp)
|
2539 |
|
|
break;
|
2540 |
|
|
resp = sg_low_malloc(size, low_dma, l_ms, &size);
|
2541 |
|
|
if (! resp) {
|
2542 |
|
|
l_ms = (SG_HEAP_POOL == l_ms) ? SG_HEAP_PAGE : SG_HEAP_POOL;
|
2543 |
|
|
resp = sg_low_malloc(size, low_dma, l_ms, &size);
|
2544 |
|
|
if (! resp) {
|
2545 |
|
|
l_ms = SG_HEAP_KMAL;
|
2546 |
|
|
resp = sg_low_malloc(size, low_dma, l_ms, &size);
|
2547 |
|
|
}
|
2548 |
|
|
}
|
2549 |
|
|
if (resp && retSzp) *retSzp = size;
|
2550 |
|
|
break;
|
2551 |
|
|
case SG_HEAP_KMAL:
|
2552 |
|
|
l_ms = SG_HEAP_KMAL; /* was SG_HEAP_PAGE */
|
2553 |
|
|
resp = sg_low_malloc(size, low_dma, l_ms, 0);
|
2554 |
|
|
if (resp)
|
2555 |
|
|
break;
|
2556 |
|
|
l_ms = SG_HEAP_POOL;
|
2557 |
|
|
resp = sg_low_malloc(size, low_dma, l_ms, &size);
|
2558 |
|
|
if (resp && retSzp) *retSzp = size;
|
2559 |
|
|
break;
|
2560 |
|
|
default:
|
2561 |
|
|
SCSI_LOG_TIMEOUT(1, printk("sg_malloc: bad ms=%d\n", *mem_srcp));
|
2562 |
|
|
break;
|
2563 |
|
|
}
|
2564 |
|
|
if (resp) *mem_srcp = l_ms;
|
2565 |
|
|
}
|
2566 |
|
|
SCSI_LOG_TIMEOUT(6, printk("sg_malloc: size=%d, ms=%d, ret=0x%p\n",
|
2567 |
|
|
size, *mem_srcp, resp));
|
2568 |
|
|
return resp;
|
2569 |
|
|
}
|
2570 |
|
|
|
2571 |
|
|
static inline int sg_alloc_kiovec(int nr, struct kiobuf **bufp, int *szp)
|
2572 |
|
|
{
|
2573 |
|
|
#if SG_NEW_KIOVEC
|
2574 |
|
|
return alloc_kiovec_sz(nr, bufp, szp);
|
2575 |
|
|
#else
|
2576 |
|
|
return alloc_kiovec(nr, bufp);
|
2577 |
|
|
#endif
|
2578 |
|
|
}
|
2579 |
|
|
|
2580 |
|
|
static void sg_low_free(char * buff, int size, int mem_src)
|
2581 |
|
|
{
|
2582 |
|
|
if (! buff) return;
|
2583 |
|
|
switch (mem_src) {
|
2584 |
|
|
case SG_HEAP_POOL:
|
2585 |
|
|
{
|
2586 |
|
|
int num_sect = size / SG_SECTOR_SZ;
|
2587 |
|
|
|
2588 |
|
|
scsi_free(buff, size);
|
2589 |
|
|
sg_pool_secs_avail += num_sect;
|
2590 |
|
|
}
|
2591 |
|
|
break;
|
2592 |
|
|
case SG_HEAP_KMAL:
|
2593 |
|
|
kfree(buff); /* size not used */
|
2594 |
|
|
break;
|
2595 |
|
|
case SG_HEAP_PAGE:
|
2596 |
|
|
{
|
2597 |
|
|
int order, a_size;
|
2598 |
|
|
for (order = 0, a_size = PAGE_SIZE;
|
2599 |
|
|
a_size < size; order++, a_size <<= 1)
|
2600 |
|
|
;
|
2601 |
|
|
free_pages((unsigned long)buff, order);
|
2602 |
|
|
}
|
2603 |
|
|
break;
|
2604 |
|
|
case SG_USER_MEM:
|
2605 |
|
|
break; /* nothing to do */
|
2606 |
|
|
default:
|
2607 |
|
|
printk(KERN_ERR "sg_low_free: bad mem_src=%d, buff=0x%p, rqSz=%d\n",
|
2608 |
|
|
mem_src, buff, size);
|
2609 |
|
|
break;
|
2610 |
|
|
}
|
2611 |
|
|
}
|
2612 |
|
|
|
2613 |
|
|
static void sg_free(char * buff, int size, int mem_src)
|
2614 |
|
|
{
|
2615 |
|
|
SCSI_LOG_TIMEOUT(6,
|
2616 |
|
|
printk("sg_free: buff=0x%p, size=%d\n", buff, size));
|
2617 |
|
|
if ((! buff) || (size <= 0))
|
2618 |
|
|
;
|
2619 |
|
|
else
|
2620 |
|
|
sg_low_free(buff, size, mem_src);
|
2621 |
|
|
}
|
2622 |
|
|
|
2623 |
|
|
static inline void sg_free_kiovec(int nr, struct kiobuf **bufp, int *szp)
|
2624 |
|
|
{
|
2625 |
|
|
#if SG_NEW_KIOVEC
|
2626 |
|
|
free_kiovec_sz(nr, bufp, szp);
|
2627 |
|
|
#else
|
2628 |
|
|
free_kiovec(nr, bufp);
|
2629 |
|
|
#endif
|
2630 |
|
|
}
|
2631 |
|
|
|
2632 |
|
|
static int sg_ms_to_jif(unsigned int msecs)
|
2633 |
|
|
{
|
2634 |
|
|
if ((UINT_MAX / 2U) < msecs)
|
2635 |
|
|
return INT_MAX; /* special case, set largest possible */
|
2636 |
|
|
else
|
2637 |
|
|
return ((int)msecs < (INT_MAX / 1000)) ? (((int)msecs * HZ) / 1000)
|
2638 |
|
|
: (((int)msecs / 1000) * HZ);
|
2639 |
|
|
}
|
2640 |
|
|
|
2641 |
|
|
static inline unsigned sg_jif_to_ms(int jifs)
|
2642 |
|
|
{
|
2643 |
|
|
if (jifs <= 0)
|
2644 |
|
|
return 0U;
|
2645 |
|
|
else {
|
2646 |
|
|
unsigned int j = (unsigned int)jifs;
|
2647 |
|
|
return (j < (UINT_MAX / 1000)) ? ((j * 1000) / HZ) : ((j / HZ) * 1000);
|
2648 |
|
|
}
|
2649 |
|
|
}
|
2650 |
|
|
|
2651 |
|
|
static unsigned char allow_ops[] = {TEST_UNIT_READY, REQUEST_SENSE,
|
2652 |
|
|
INQUIRY, READ_CAPACITY, READ_BUFFER, READ_6, READ_10, READ_12,
|
2653 |
|
|
MODE_SENSE, MODE_SENSE_10, LOG_SENSE};
|
2654 |
|
|
|
2655 |
|
|
static int sg_allow_access(unsigned char opcode, char dev_type)
|
2656 |
|
|
{
|
2657 |
|
|
int k;
|
2658 |
|
|
|
2659 |
|
|
if (TYPE_SCANNER == dev_type) /* TYPE_ROM maybe burner */
|
2660 |
|
|
return 1;
|
2661 |
|
|
for (k = 0; k < sizeof(allow_ops); ++k) {
|
2662 |
|
|
if (opcode == allow_ops[k])
|
2663 |
|
|
return 1;
|
2664 |
|
|
}
|
2665 |
|
|
return 0;
|
2666 |
|
|
}
|
2667 |
|
|
|
2668 |
|
|
|
2669 |
|
|
#ifdef CONFIG_PROC_FS
|
2670 |
|
|
static int sg_last_dev()
|
2671 |
|
|
{
|
2672 |
|
|
int k;
|
2673 |
|
|
unsigned long iflags;
|
2674 |
|
|
|
2675 |
|
|
read_lock_irqsave(&sg_dev_arr_lock, iflags);
|
2676 |
|
|
for (k = sg_template.dev_max - 1; k >= 0; --k)
|
2677 |
|
|
if (sg_dev_arr[k] && sg_dev_arr[k]->device) break;
|
2678 |
|
|
read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
|
2679 |
|
|
return k + 1; /* origin 1 */
|
2680 |
|
|
}
|
2681 |
|
|
#endif
|
2682 |
|
|
|
2683 |
|
|
static Sg_device * sg_get_dev(int dev)
|
2684 |
|
|
{
|
2685 |
|
|
Sg_device * sdp = NULL;
|
2686 |
|
|
unsigned long iflags;
|
2687 |
|
|
|
2688 |
|
|
if (sg_dev_arr && (dev >= 0))
|
2689 |
|
|
{
|
2690 |
|
|
read_lock_irqsave(&sg_dev_arr_lock, iflags);
|
2691 |
|
|
if (dev < sg_template.dev_max)
|
2692 |
|
|
sdp = sg_dev_arr[dev];
|
2693 |
|
|
read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
|
2694 |
|
|
}
|
2695 |
|
|
return sdp;
|
2696 |
|
|
}
|
2697 |
|
|
|
2698 |
|
|
#ifdef CONFIG_PROC_FS
|
2699 |
|
|
|
2700 |
|
|
static struct proc_dir_entry * sg_proc_sgp = NULL;
|
2701 |
|
|
|
2702 |
|
|
static char sg_proc_sg_dirname[] = "sg";
|
2703 |
|
|
static const char * sg_proc_leaf_names[] = {"allow_dio", "def_reserved_size",
|
2704 |
|
|
"debug", "devices", "device_hdr", "device_strs",
|
2705 |
|
|
"hosts", "host_hdr", "host_strs", "version"};
|
2706 |
|
|
|
2707 |
|
|
static int sg_proc_adio_read(char * buffer, char ** start, off_t offset,
|
2708 |
|
|
int size, int * eof, void * data);
|
2709 |
|
|
static int sg_proc_adio_info(char * buffer, int * len, off_t * begin,
|
2710 |
|
|
off_t offset, int size);
|
2711 |
|
|
static int sg_proc_adio_write(struct file * filp, const char * buffer,
|
2712 |
|
|
unsigned long count, void * data);
|
2713 |
|
|
static int sg_proc_dressz_read(char * buffer, char ** start, off_t offset,
|
2714 |
|
|
int size, int * eof, void * data);
|
2715 |
|
|
static int sg_proc_dressz_info(char * buffer, int * len, off_t * begin,
|
2716 |
|
|
off_t offset, int size);
|
2717 |
|
|
static int sg_proc_dressz_write(struct file * filp, const char * buffer,
|
2718 |
|
|
unsigned long count, void * data);
|
2719 |
|
|
static int sg_proc_debug_read(char * buffer, char ** start, off_t offset,
|
2720 |
|
|
int size, int * eof, void * data);
|
2721 |
|
|
static int sg_proc_debug_info(char * buffer, int * len, off_t * begin,
|
2722 |
|
|
off_t offset, int size);
|
2723 |
|
|
static int sg_proc_dev_read(char * buffer, char ** start, off_t offset,
|
2724 |
|
|
int size, int * eof, void * data);
|
2725 |
|
|
static int sg_proc_dev_info(char * buffer, int * len, off_t * begin,
|
2726 |
|
|
off_t offset, int size);
|
2727 |
|
|
static int sg_proc_devhdr_read(char * buffer, char ** start, off_t offset,
|
2728 |
|
|
int size, int * eof, void * data);
|
2729 |
|
|
static int sg_proc_devhdr_info(char * buffer, int * len, off_t * begin,
|
2730 |
|
|
off_t offset, int size);
|
2731 |
|
|
static int sg_proc_devstrs_read(char * buffer, char ** start, off_t offset,
|
2732 |
|
|
int size, int * eof, void * data);
|
2733 |
|
|
static int sg_proc_devstrs_info(char * buffer, int * len, off_t * begin,
|
2734 |
|
|
off_t offset, int size);
|
2735 |
|
|
static int sg_proc_host_read(char * buffer, char ** start, off_t offset,
|
2736 |
|
|
int size, int * eof, void * data);
|
2737 |
|
|
static int sg_proc_host_info(char * buffer, int * len, off_t * begin,
|
2738 |
|
|
off_t offset, int size);
|
2739 |
|
|
static int sg_proc_hosthdr_read(char * buffer, char ** start, off_t offset,
|
2740 |
|
|
int size, int * eof, void * data);
|
2741 |
|
|
static int sg_proc_hosthdr_info(char * buffer, int * len, off_t * begin,
|
2742 |
|
|
off_t offset, int size);
|
2743 |
|
|
static int sg_proc_hoststrs_read(char * buffer, char ** start, off_t offset,
|
2744 |
|
|
int size, int * eof, void * data);
|
2745 |
|
|
static int sg_proc_hoststrs_info(char * buffer, int * len, off_t * begin,
|
2746 |
|
|
off_t offset, int size);
|
2747 |
|
|
static int sg_proc_version_read(char * buffer, char ** start, off_t offset,
|
2748 |
|
|
int size, int * eof, void * data);
|
2749 |
|
|
static int sg_proc_version_info(char * buffer, int * len, off_t * begin,
|
2750 |
|
|
off_t offset, int size);
|
2751 |
|
|
static read_proc_t * sg_proc_leaf_reads[] = {
|
2752 |
|
|
sg_proc_adio_read, sg_proc_dressz_read, sg_proc_debug_read,
|
2753 |
|
|
sg_proc_dev_read, sg_proc_devhdr_read, sg_proc_devstrs_read,
|
2754 |
|
|
sg_proc_host_read, sg_proc_hosthdr_read, sg_proc_hoststrs_read,
|
2755 |
|
|
sg_proc_version_read};
|
2756 |
|
|
static write_proc_t * sg_proc_leaf_writes[] = {
|
2757 |
|
|
sg_proc_adio_write, sg_proc_dressz_write, 0, 0, 0, 0, 0, 0, 0, 0};
|
2758 |
|
|
|
2759 |
|
|
#define PRINT_PROC(fmt,args...) \
|
2760 |
|
|
do { \
|
2761 |
|
|
*len += sprintf(buffer + *len, fmt, ##args); \
|
2762 |
|
|
if (*begin + *len > offset + size) \
|
2763 |
|
|
return 0; \
|
2764 |
|
|
if (*begin + *len < offset) { \
|
2765 |
|
|
*begin += *len; \
|
2766 |
|
|
*len = 0; \
|
2767 |
|
|
} \
|
2768 |
|
|
} while(0)
|
2769 |
|
|
|
2770 |
|
|
#define SG_PROC_READ_FN(infofp) \
|
2771 |
|
|
do { \
|
2772 |
|
|
int len = 0; \
|
2773 |
|
|
off_t begin = 0; \
|
2774 |
|
|
*eof = infofp(buffer, &len, &begin, offset, size); \
|
2775 |
|
|
if (offset >= (begin + len)) \
|
2776 |
|
|
return 0; \
|
2777 |
|
|
*start = buffer + offset - begin; \
|
2778 |
|
|
return (size < (begin + len - offset)) ? \
|
2779 |
|
|
size : begin + len - offset; \
|
2780 |
|
|
} while(0)
|
2781 |
|
|
|
2782 |
|
|
|
2783 |
|
|
static int sg_proc_init()
|
2784 |
|
|
{
|
2785 |
|
|
int k, mask;
|
2786 |
|
|
int leaves = sizeof(sg_proc_leaf_names) / sizeof(sg_proc_leaf_names[0]);
|
2787 |
|
|
struct proc_dir_entry * pdep;
|
2788 |
|
|
|
2789 |
|
|
if (! proc_scsi)
|
2790 |
|
|
return 1;
|
2791 |
|
|
sg_proc_sgp = create_proc_entry(sg_proc_sg_dirname,
|
2792 |
|
|
S_IFDIR | S_IRUGO | S_IXUGO, proc_scsi);
|
2793 |
|
|
if (! sg_proc_sgp)
|
2794 |
|
|
return 1;
|
2795 |
|
|
for (k = 0; k < leaves; ++k) {
|
2796 |
|
|
mask = sg_proc_leaf_writes[k] ? S_IRUGO | S_IWUSR : S_IRUGO;
|
2797 |
|
|
pdep = create_proc_entry(sg_proc_leaf_names[k], mask, sg_proc_sgp);
|
2798 |
|
|
if (pdep) {
|
2799 |
|
|
pdep->read_proc = sg_proc_leaf_reads[k];
|
2800 |
|
|
if (sg_proc_leaf_writes[k])
|
2801 |
|
|
pdep->write_proc = sg_proc_leaf_writes[k];
|
2802 |
|
|
}
|
2803 |
|
|
}
|
2804 |
|
|
return 0;
|
2805 |
|
|
}
|
2806 |
|
|
|
2807 |
|
|
static void sg_proc_cleanup()
|
2808 |
|
|
{
|
2809 |
|
|
int k;
|
2810 |
|
|
int leaves = sizeof(sg_proc_leaf_names) / sizeof(sg_proc_leaf_names[0]);
|
2811 |
|
|
|
2812 |
|
|
if ((! proc_scsi) || (! sg_proc_sgp))
|
2813 |
|
|
return;
|
2814 |
|
|
for (k = 0; k < leaves; ++k)
|
2815 |
|
|
remove_proc_entry(sg_proc_leaf_names[k], sg_proc_sgp);
|
2816 |
|
|
remove_proc_entry(sg_proc_sg_dirname, proc_scsi);
|
2817 |
|
|
}
|
2818 |
|
|
|
2819 |
|
|
static int sg_proc_adio_read(char * buffer, char ** start, off_t offset,
|
2820 |
|
|
int size, int * eof, void * data)
|
2821 |
|
|
{ SG_PROC_READ_FN(sg_proc_adio_info); }
|
2822 |
|
|
|
2823 |
|
|
static int sg_proc_adio_info(char * buffer, int * len, off_t * begin,
|
2824 |
|
|
off_t offset, int size)
|
2825 |
|
|
{
|
2826 |
|
|
PRINT_PROC("%d\n", sg_allow_dio);
|
2827 |
|
|
return 1;
|
2828 |
|
|
}
|
2829 |
|
|
|
2830 |
|
|
static int sg_proc_adio_write(struct file * filp, const char * buffer,
|
2831 |
|
|
unsigned long count, void * data)
|
2832 |
|
|
{
|
2833 |
|
|
int num;
|
2834 |
|
|
char buff[11];
|
2835 |
|
|
|
2836 |
|
|
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
|
2837 |
|
|
return -EACCES;
|
2838 |
|
|
num = (count < 10) ? count : 10;
|
2839 |
|
|
copy_from_user(buff, buffer, num);
|
2840 |
|
|
buff[num] = '\0';
|
2841 |
|
|
sg_allow_dio = simple_strtoul(buff, 0, 10) ? 1 : 0;
|
2842 |
|
|
return count;
|
2843 |
|
|
}
|
2844 |
|
|
|
2845 |
|
|
static int sg_proc_dressz_read(char * buffer, char ** start, off_t offset,
|
2846 |
|
|
int size, int * eof, void * data)
|
2847 |
|
|
{ SG_PROC_READ_FN(sg_proc_dressz_info); }
|
2848 |
|
|
|
2849 |
|
|
static int sg_proc_dressz_info(char * buffer, int * len, off_t * begin,
|
2850 |
|
|
off_t offset, int size)
|
2851 |
|
|
{
|
2852 |
|
|
PRINT_PROC("%d\n", sg_big_buff);
|
2853 |
|
|
return 1;
|
2854 |
|
|
}
|
2855 |
|
|
|
2856 |
|
|
static int sg_proc_dressz_write(struct file * filp, const char * buffer,
|
2857 |
|
|
unsigned long count, void * data)
|
2858 |
|
|
{
|
2859 |
|
|
int num;
|
2860 |
|
|
unsigned long k = ULONG_MAX;
|
2861 |
|
|
char buff[11];
|
2862 |
|
|
|
2863 |
|
|
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
|
2864 |
|
|
return -EACCES;
|
2865 |
|
|
num = (count < 10) ? count : 10;
|
2866 |
|
|
copy_from_user(buff, buffer, num);
|
2867 |
|
|
buff[num] = '\0';
|
2868 |
|
|
k = simple_strtoul(buff, 0, 10);
|
2869 |
|
|
if (k <= 1048576) {
|
2870 |
|
|
sg_big_buff = k;
|
2871 |
|
|
return count;
|
2872 |
|
|
}
|
2873 |
|
|
return -ERANGE;
|
2874 |
|
|
}
|
2875 |
|
|
|
2876 |
|
|
static int sg_proc_debug_read(char * buffer, char ** start, off_t offset,
|
2877 |
|
|
int size, int * eof, void * data)
|
2878 |
|
|
{ SG_PROC_READ_FN(sg_proc_debug_info); }
|
2879 |
|
|
|
2880 |
|
|
static int sg_proc_debug_info(char * buffer, int * len, off_t * begin,
|
2881 |
|
|
off_t offset, int size)
|
2882 |
|
|
{
|
2883 |
|
|
Sg_device * sdp;
|
2884 |
|
|
const sg_io_hdr_t * hp;
|
2885 |
|
|
int j, max_dev, new_interface;
|
2886 |
|
|
|
2887 |
|
|
if (NULL == sg_dev_arr) {
|
2888 |
|
|
PRINT_PROC("sg_dev_arr NULL, driver not initialized\n");
|
2889 |
|
|
return 1;
|
2890 |
|
|
}
|
2891 |
|
|
max_dev = sg_last_dev();
|
2892 |
|
|
PRINT_PROC("dev_max(currently)=%d max_active_device=%d (origin 1)\n",
|
2893 |
|
|
sg_template.dev_max, max_dev);
|
2894 |
|
|
PRINT_PROC(" scsi_dma_free_sectors=%u sg_pool_secs_aval=%d "
|
2895 |
|
|
"def_reserved_size=%d\n",
|
2896 |
|
|
scsi_dma_free_sectors, sg_pool_secs_avail, sg_big_buff);
|
2897 |
|
|
for (j = 0; j < max_dev; ++j) {
|
2898 |
|
|
if ((sdp = sg_get_dev(j))) {
|
2899 |
|
|
Sg_fd * fp;
|
2900 |
|
|
Sg_request * srp;
|
2901 |
|
|
struct scsi_device * scsidp;
|
2902 |
|
|
int dev, k, m, blen, usg;
|
2903 |
|
|
|
2904 |
|
|
scsidp = sdp->device;
|
2905 |
|
|
if (NULL == scsidp) {
|
2906 |
|
|
PRINT_PROC("device %d detached ??\n", j);
|
2907 |
|
|
continue;
|
2908 |
|
|
}
|
2909 |
|
|
dev = MINOR(sdp->i_rdev);
|
2910 |
|
|
|
2911 |
|
|
if (sg_get_nth_sfp(sdp, 0)) {
|
2912 |
|
|
PRINT_PROC(" >>> device=sg%d ", dev);
|
2913 |
|
|
if (sdp->detached)
|
2914 |
|
|
PRINT_PROC("detached pending close ");
|
2915 |
|
|
else
|
2916 |
|
|
PRINT_PROC("scsi%d chan=%d id=%d lun=%d em=%d",
|
2917 |
|
|
scsidp->host->host_no, scsidp->channel,
|
2918 |
|
|
scsidp->id, scsidp->lun, scsidp->host->hostt->emulated);
|
2919 |
|
|
PRINT_PROC(" sg_tablesize=%d excl=%d\n", sdp->sg_tablesize,
|
2920 |
|
|
sdp->exclude);
|
2921 |
|
|
}
|
2922 |
|
|
for (k = 0; (fp = sg_get_nth_sfp(sdp, k)); ++k) {
|
2923 |
|
|
PRINT_PROC(" FD(%d): timeout=%dms bufflen=%d "
|
2924 |
|
|
"(res)sgat=%d low_dma=%d\n", k + 1,
|
2925 |
|
|
sg_jif_to_ms(fp->timeout), fp->reserve.bufflen,
|
2926 |
|
|
(int)fp->reserve.k_use_sg, (int)fp->low_dma);
|
2927 |
|
|
PRINT_PROC(" cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n",
|
2928 |
|
|
(int)fp->cmd_q, (int)fp->force_packid,
|
2929 |
|
|
(int)fp->keep_orphan, (int)fp->closed);
|
2930 |
|
|
for (m = 0; (srp = sg_get_nth_request(fp, m)); ++m) {
|
2931 |
|
|
hp = &srp->header;
|
2932 |
|
|
new_interface = (hp->interface_id == '\0') ? 0 : 1;
|
2933 |
|
|
/* stop indenting so far ... */
|
2934 |
|
|
PRINT_PROC(srp->res_used ? ((new_interface &&
|
2935 |
|
|
(SG_FLAG_MMAP_IO & hp->flags)) ? " mmap>> " : " rb>> ") :
|
2936 |
|
|
((SG_INFO_DIRECT_IO_MASK & hp->info) ? " dio>> " : " "));
|
2937 |
|
|
blen = srp->my_cmdp ? srp->my_cmdp->sr_bufflen : srp->data.bufflen;
|
2938 |
|
|
usg = srp->my_cmdp ? srp->my_cmdp->sr_use_sg : srp->data.k_use_sg;
|
2939 |
|
|
PRINT_PROC(srp->done ? ((1 == srp->done) ? "rcv:" : "fin:")
|
2940 |
|
|
: (srp->my_cmdp ? "act:" : "prior:"));
|
2941 |
|
|
PRINT_PROC(" id=%d blen=%d", srp->header.pack_id, blen);
|
2942 |
|
|
if (srp->done)
|
2943 |
|
|
PRINT_PROC(" dur=%d", hp->duration);
|
2944 |
|
|
else
|
2945 |
|
|
PRINT_PROC(" t_o/elap=%d/%d", new_interface ? hp->timeout :
|
2946 |
|
|
sg_jif_to_ms(fp->timeout),
|
2947 |
|
|
sg_jif_to_ms(hp->duration ? (jiffies - hp->duration) : 0));
|
2948 |
|
|
PRINT_PROC("ms sgat=%d op=0x%02x\n", usg, (int)srp->data.cmd_opcode);
|
2949 |
|
|
/* reset indenting */
|
2950 |
|
|
}
|
2951 |
|
|
if (0 == m)
|
2952 |
|
|
PRINT_PROC(" No requests active\n");
|
2953 |
|
|
}
|
2954 |
|
|
}
|
2955 |
|
|
}
|
2956 |
|
|
return 1;
|
2957 |
|
|
}
|
2958 |
|
|
|
2959 |
|
|
static int sg_proc_dev_read(char * buffer, char ** start, off_t offset,
|
2960 |
|
|
int size, int * eof, void * data)
|
2961 |
|
|
{ SG_PROC_READ_FN(sg_proc_dev_info); }
|
2962 |
|
|
|
2963 |
|
|
static int sg_proc_dev_info(char * buffer, int * len, off_t * begin,
|
2964 |
|
|
off_t offset, int size)
|
2965 |
|
|
{
|
2966 |
|
|
Sg_device * sdp;
|
2967 |
|
|
int j, max_dev;
|
2968 |
|
|
struct scsi_device * scsidp;
|
2969 |
|
|
|
2970 |
|
|
max_dev = sg_last_dev();
|
2971 |
|
|
for (j = 0; j < max_dev; ++j) {
|
2972 |
|
|
sdp = sg_get_dev(j);
|
2973 |
|
|
if (sdp && (scsidp = sdp->device) && (! sdp->detached))
|
2974 |
|
|
PRINT_PROC("%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
|
2975 |
|
|
scsidp->host->host_no, scsidp->channel, scsidp->id,
|
2976 |
|
|
scsidp->lun, (int)scsidp->type, (int)scsidp->access_count,
|
2977 |
|
|
(int)scsidp->queue_depth, (int)scsidp->device_busy,
|
2978 |
|
|
(int)scsidp->online);
|
2979 |
|
|
else
|
2980 |
|
|
PRINT_PROC("-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
|
2981 |
|
|
}
|
2982 |
|
|
return 1;
|
2983 |
|
|
}
|
2984 |
|
|
|
2985 |
|
|
static int sg_proc_devhdr_read(char * buffer, char ** start, off_t offset,
|
2986 |
|
|
int size, int * eof, void * data)
|
2987 |
|
|
{ SG_PROC_READ_FN(sg_proc_devhdr_info); }
|
2988 |
|
|
|
2989 |
|
|
static int sg_proc_devhdr_info(char * buffer, int * len, off_t * begin,
|
2990 |
|
|
off_t offset, int size)
|
2991 |
|
|
{
|
2992 |
|
|
PRINT_PROC("host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\tonline\n");
|
2993 |
|
|
return 1;
|
2994 |
|
|
}
|
2995 |
|
|
|
2996 |
|
|
static int sg_proc_devstrs_read(char * buffer, char ** start, off_t offset,
|
2997 |
|
|
int size, int * eof, void * data)
|
2998 |
|
|
{ SG_PROC_READ_FN(sg_proc_devstrs_info); }
|
2999 |
|
|
|
3000 |
|
|
static int sg_proc_devstrs_info(char * buffer, int * len, off_t * begin,
|
3001 |
|
|
off_t offset, int size)
|
3002 |
|
|
{
|
3003 |
|
|
Sg_device * sdp;
|
3004 |
|
|
int j, max_dev;
|
3005 |
|
|
struct scsi_device * scsidp;
|
3006 |
|
|
|
3007 |
|
|
max_dev = sg_last_dev();
|
3008 |
|
|
for (j = 0; j < max_dev; ++j) {
|
3009 |
|
|
sdp = sg_get_dev(j);
|
3010 |
|
|
if (sdp && (scsidp = sdp->device) && (! sdp->detached))
|
3011 |
|
|
PRINT_PROC("%8.8s\t%16.16s\t%4.4s\n",
|
3012 |
|
|
scsidp->vendor, scsidp->model, scsidp->rev);
|
3013 |
|
|
else
|
3014 |
|
|
PRINT_PROC("<no active device>\n");
|
3015 |
|
|
}
|
3016 |
|
|
return 1;
|
3017 |
|
|
}
|
3018 |
|
|
|
3019 |
|
|
static int sg_proc_host_read(char * buffer, char ** start, off_t offset,
|
3020 |
|
|
int size, int * eof, void * data)
|
3021 |
|
|
{ SG_PROC_READ_FN(sg_proc_host_info); }
|
3022 |
|
|
|
3023 |
|
|
static int sg_proc_host_info(char * buffer, int * len, off_t * begin,
|
3024 |
|
|
off_t offset, int size)
|
3025 |
|
|
{
|
3026 |
|
|
struct Scsi_Host * shp;
|
3027 |
|
|
int k;
|
3028 |
|
|
|
3029 |
|
|
for (k = 0, shp = scsi_hostlist; shp; shp = shp->next, ++k) {
|
3030 |
|
|
for ( ; k < shp->host_no; ++k)
|
3031 |
|
|
PRINT_PROC("-1\t-1\t-1\t-1\t-1\t-1\n");
|
3032 |
|
|
PRINT_PROC("%u\t%hu\t%hd\t%hu\t%d\t%d\n",
|
3033 |
|
|
shp->unique_id, shp->host_busy, shp->cmd_per_lun,
|
3034 |
|
|
shp->sg_tablesize, (int)shp->unchecked_isa_dma,
|
3035 |
|
|
(int)shp->hostt->emulated);
|
3036 |
|
|
}
|
3037 |
|
|
return 1;
|
3038 |
|
|
}
|
3039 |
|
|
|
3040 |
|
|
static int sg_proc_hosthdr_read(char * buffer, char ** start, off_t offset,
|
3041 |
|
|
int size, int * eof, void * data)
|
3042 |
|
|
{ SG_PROC_READ_FN(sg_proc_hosthdr_info); }
|
3043 |
|
|
|
3044 |
|
|
static int sg_proc_hosthdr_info(char * buffer, int * len, off_t * begin,
|
3045 |
|
|
off_t offset, int size)
|
3046 |
|
|
{
|
3047 |
|
|
PRINT_PROC("uid\tbusy\tcpl\tscatg\tisa\temul\n");
|
3048 |
|
|
return 1;
|
3049 |
|
|
}
|
3050 |
|
|
|
3051 |
|
|
static int sg_proc_hoststrs_read(char * buffer, char ** start, off_t offset,
|
3052 |
|
|
int size, int * eof, void * data)
|
3053 |
|
|
{ SG_PROC_READ_FN(sg_proc_hoststrs_info); }
|
3054 |
|
|
|
3055 |
|
|
#define SG_MAX_HOST_STR_LEN 256
|
3056 |
|
|
|
3057 |
|
|
static int sg_proc_hoststrs_info(char * buffer, int * len, off_t * begin,
|
3058 |
|
|
off_t offset, int size)
|
3059 |
|
|
{
|
3060 |
|
|
struct Scsi_Host * shp;
|
3061 |
|
|
int k;
|
3062 |
|
|
char buff[SG_MAX_HOST_STR_LEN];
|
3063 |
|
|
char * cp;
|
3064 |
|
|
|
3065 |
|
|
for (k = 0, shp = scsi_hostlist; shp; shp = shp->next, ++k) {
|
3066 |
|
|
for ( ; k < shp->host_no; ++k)
|
3067 |
|
|
PRINT_PROC("<no active host>\n");
|
3068 |
|
|
strncpy(buff, shp->hostt->info ? shp->hostt->info(shp) :
|
3069 |
|
|
(shp->hostt->name ? shp->hostt->name : "<no name>"),
|
3070 |
|
|
SG_MAX_HOST_STR_LEN);
|
3071 |
|
|
buff[SG_MAX_HOST_STR_LEN - 1] = '\0';
|
3072 |
|
|
for (cp = buff; *cp; ++cp) {
|
3073 |
|
|
if ('\n' == *cp)
|
3074 |
|
|
*cp = ' '; /* suppress imbedded newlines */
|
3075 |
|
|
}
|
3076 |
|
|
PRINT_PROC("%s\n", buff);
|
3077 |
|
|
}
|
3078 |
|
|
return 1;
|
3079 |
|
|
}
|
3080 |
|
|
|
3081 |
|
|
static int sg_proc_version_read(char * buffer, char ** start, off_t offset,
|
3082 |
|
|
int size, int * eof, void * data)
|
3083 |
|
|
{ SG_PROC_READ_FN(sg_proc_version_info); }
|
3084 |
|
|
|
3085 |
|
|
static int sg_proc_version_info(char * buffer, int * len, off_t * begin,
|
3086 |
|
|
off_t offset, int size)
|
3087 |
|
|
{
|
3088 |
|
|
PRINT_PROC("%d\t%s\n", sg_version_num, sg_version_str);
|
3089 |
|
|
return 1;
|
3090 |
|
|
}
|
3091 |
|
|
#endif /* CONFIG_PROC_FS */
|
3092 |
|
|
|
3093 |
|
|
|
3094 |
|
|
module_init(init_sg);
|
3095 |
|
|
module_exit(exit_sg);
|