1 |
1275 |
phoenix |
/*======================================================================
|
2 |
|
|
|
3 |
|
|
PCMCIA Bulk Memory Services
|
4 |
|
|
|
5 |
|
|
bulkmem.c 1.38 2000/09/25 19:29:51
|
6 |
|
|
|
7 |
|
|
The contents of this file are subject to the Mozilla Public
|
8 |
|
|
License Version 1.1 (the "License"); you may not use this file
|
9 |
|
|
except in compliance with the License. You may obtain a copy of
|
10 |
|
|
the License at http://www.mozilla.org/MPL/
|
11 |
|
|
|
12 |
|
|
Software distributed under the License is distributed on an "AS
|
13 |
|
|
IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
|
14 |
|
|
implied. See the License for the specific language governing
|
15 |
|
|
rights and limitations under the License.
|
16 |
|
|
|
17 |
|
|
The initial developer of the original code is David A. Hinds
|
18 |
|
|
<dahinds@users.sourceforge.net>. Portions created by David A. Hinds
|
19 |
|
|
are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
|
20 |
|
|
|
21 |
|
|
Alternatively, the contents of this file may be used under the
|
22 |
|
|
terms of the GNU General Public License version 2 (the "GPL"), in which
|
23 |
|
|
case the provisions of the GPL are applicable instead of the
|
24 |
|
|
above. If you wish to allow the use of your version of this file
|
25 |
|
|
only under the terms of the GPL and not to allow others to use
|
26 |
|
|
your version of this file under the MPL, indicate your decision
|
27 |
|
|
by deleting the provisions above and replace them with the notice
|
28 |
|
|
and other provisions required by the GPL. If you do not delete
|
29 |
|
|
the provisions above, a recipient may use your version of this
|
30 |
|
|
file under either the MPL or the GPL.
|
31 |
|
|
|
32 |
|
|
======================================================================*/
|
33 |
|
|
|
34 |
|
|
#define __NO_VERSION__
|
35 |
|
|
|
36 |
|
|
#include <linux/module.h>
|
37 |
|
|
#include <linux/kernel.h>
|
38 |
|
|
#include <linux/string.h>
|
39 |
|
|
#include <linux/errno.h>
|
40 |
|
|
#include <linux/slab.h>
|
41 |
|
|
#include <linux/mm.h>
|
42 |
|
|
#include <linux/sched.h>
|
43 |
|
|
#include <linux/timer.h>
|
44 |
|
|
#include <linux/proc_fs.h>
|
45 |
|
|
|
46 |
|
|
#define IN_CARD_SERVICES
|
47 |
|
|
#include <pcmcia/cs_types.h>
|
48 |
|
|
#include <pcmcia/ss.h>
|
49 |
|
|
#include <pcmcia/cs.h>
|
50 |
|
|
#include <pcmcia/bulkmem.h>
|
51 |
|
|
#include <pcmcia/cistpl.h>
|
52 |
|
|
#include "cs_internal.h"
|
53 |
|
|
|
54 |
|
|
/*======================================================================
|
55 |
|
|
|
56 |
|
|
This function handles submitting an MTD request, and retrying
|
57 |
|
|
requests when an MTD is busy.
|
58 |
|
|
|
59 |
|
|
An MTD request should never block.
|
60 |
|
|
|
61 |
|
|
======================================================================*/
|
62 |
|
|
|
63 |
|
|
static int do_mtd_request(memory_handle_t handle, mtd_request_t *req,
|
64 |
|
|
caddr_t buf)
|
65 |
|
|
{
|
66 |
|
|
int ret, tries;
|
67 |
|
|
client_t *mtd;
|
68 |
|
|
socket_info_t *s;
|
69 |
|
|
|
70 |
|
|
mtd = handle->mtd;
|
71 |
|
|
if (mtd == NULL)
|
72 |
|
|
return CS_GENERAL_FAILURE;
|
73 |
|
|
s = SOCKET(mtd);
|
74 |
|
|
for (ret = tries = 0; tries < 100; tries++) {
|
75 |
|
|
mtd->event_callback_args.mtdrequest = req;
|
76 |
|
|
mtd->event_callback_args.buffer = buf;
|
77 |
|
|
ret = EVENT(mtd, CS_EVENT_MTD_REQUEST, CS_EVENT_PRI_LOW);
|
78 |
|
|
if (ret != CS_BUSY)
|
79 |
|
|
break;
|
80 |
|
|
switch (req->Status) {
|
81 |
|
|
case MTD_WAITREQ:
|
82 |
|
|
/* Not that we should ever need this... */
|
83 |
|
|
interruptible_sleep_on_timeout(&mtd->mtd_req, HZ);
|
84 |
|
|
break;
|
85 |
|
|
case MTD_WAITTIMER:
|
86 |
|
|
case MTD_WAITRDY:
|
87 |
|
|
interruptible_sleep_on_timeout(&mtd->mtd_req, req->Timeout*HZ/1000);
|
88 |
|
|
req->Function |= MTD_REQ_TIMEOUT;
|
89 |
|
|
break;
|
90 |
|
|
case MTD_WAITPOWER:
|
91 |
|
|
interruptible_sleep_on(&mtd->mtd_req);
|
92 |
|
|
break;
|
93 |
|
|
}
|
94 |
|
|
if (signal_pending(current))
|
95 |
|
|
printk(KERN_NOTICE "cs: do_mtd_request interrupted!\n");
|
96 |
|
|
}
|
97 |
|
|
if (tries == 20) {
|
98 |
|
|
printk(KERN_NOTICE "cs: MTD request timed out!\n");
|
99 |
|
|
ret = CS_GENERAL_FAILURE;
|
100 |
|
|
}
|
101 |
|
|
wake_up_interruptible(&mtd->mtd_req);
|
102 |
|
|
retry_erase_list(&mtd->erase_busy, 0);
|
103 |
|
|
return ret;
|
104 |
|
|
} /* do_mtd_request */
|
105 |
|
|
|
106 |
|
|
/*======================================================================
|
107 |
|
|
|
108 |
|
|
This stuff is all for handling asynchronous erase requests. It
|
109 |
|
|
is complicated because all the retry stuff has to be dealt with
|
110 |
|
|
in timer interrupts or in the card status event handler.
|
111 |
|
|
|
112 |
|
|
======================================================================*/
|
113 |
|
|
|
114 |
|
|
static void insert_queue(erase_busy_t *head, erase_busy_t *entry)
|
115 |
|
|
{
|
116 |
|
|
DEBUG(2, "cs: adding 0x%p to queue 0x%p\n", entry, head);
|
117 |
|
|
entry->next = head;
|
118 |
|
|
entry->prev = head->prev;
|
119 |
|
|
head->prev->next = entry;
|
120 |
|
|
head->prev = entry;
|
121 |
|
|
}
|
122 |
|
|
|
123 |
|
|
static void remove_queue(erase_busy_t *entry)
|
124 |
|
|
{
|
125 |
|
|
DEBUG(2, "cs: unqueueing 0x%p\n", entry);
|
126 |
|
|
entry->next->prev = entry->prev;
|
127 |
|
|
entry->prev->next = entry->next;
|
128 |
|
|
}
|
129 |
|
|
|
130 |
|
|
static void retry_erase(erase_busy_t *busy, u_int cause)
|
131 |
|
|
{
|
132 |
|
|
eraseq_entry_t *erase = busy->erase;
|
133 |
|
|
mtd_request_t req;
|
134 |
|
|
client_t *mtd;
|
135 |
|
|
socket_info_t *s;
|
136 |
|
|
int ret;
|
137 |
|
|
|
138 |
|
|
DEBUG(2, "cs: trying erase request 0x%p...\n", busy);
|
139 |
|
|
if (busy->next)
|
140 |
|
|
remove_queue(busy);
|
141 |
|
|
req.Function = MTD_REQ_ERASE | cause;
|
142 |
|
|
req.TransferLength = erase->Size;
|
143 |
|
|
req.DestCardOffset = erase->Offset + erase->Handle->info.CardOffset;
|
144 |
|
|
req.MediaID = erase->Handle->MediaID;
|
145 |
|
|
mtd = erase->Handle->mtd;
|
146 |
|
|
s = SOCKET(mtd);
|
147 |
|
|
mtd->event_callback_args.mtdrequest = &req;
|
148 |
|
|
ret = EVENT(mtd, CS_EVENT_MTD_REQUEST, CS_EVENT_PRI_LOW);
|
149 |
|
|
if (ret == CS_BUSY) {
|
150 |
|
|
DEBUG(2, " Status = %d, requeueing.\n", req.Status);
|
151 |
|
|
switch (req.Status) {
|
152 |
|
|
case MTD_WAITREQ:
|
153 |
|
|
case MTD_WAITPOWER:
|
154 |
|
|
insert_queue(&mtd->erase_busy, busy);
|
155 |
|
|
break;
|
156 |
|
|
case MTD_WAITTIMER:
|
157 |
|
|
case MTD_WAITRDY:
|
158 |
|
|
if (req.Status == MTD_WAITRDY)
|
159 |
|
|
insert_queue(&s->erase_busy, busy);
|
160 |
|
|
mod_timer(&busy->timeout, jiffies + req.Timeout*HZ/1000);
|
161 |
|
|
break;
|
162 |
|
|
}
|
163 |
|
|
} else {
|
164 |
|
|
/* update erase queue status */
|
165 |
|
|
DEBUG(2, " Ret = %d\n", ret);
|
166 |
|
|
switch (ret) {
|
167 |
|
|
case CS_SUCCESS:
|
168 |
|
|
erase->State = ERASE_PASSED; break;
|
169 |
|
|
case CS_WRITE_PROTECTED:
|
170 |
|
|
erase->State = ERASE_MEDIA_WRPROT; break;
|
171 |
|
|
case CS_BAD_OFFSET:
|
172 |
|
|
erase->State = ERASE_BAD_OFFSET; break;
|
173 |
|
|
case CS_BAD_SIZE:
|
174 |
|
|
erase->State = ERASE_BAD_SIZE; break;
|
175 |
|
|
case CS_NO_CARD:
|
176 |
|
|
erase->State = ERASE_BAD_SOCKET; break;
|
177 |
|
|
default:
|
178 |
|
|
erase->State = ERASE_FAILED; break;
|
179 |
|
|
}
|
180 |
|
|
busy->client->event_callback_args.info = erase;
|
181 |
|
|
EVENT(busy->client, CS_EVENT_ERASE_COMPLETE, CS_EVENT_PRI_LOW);
|
182 |
|
|
kfree(busy);
|
183 |
|
|
/* Resubmit anything waiting for a request to finish */
|
184 |
|
|
wake_up_interruptible(&mtd->mtd_req);
|
185 |
|
|
retry_erase_list(&mtd->erase_busy, 0);
|
186 |
|
|
}
|
187 |
|
|
} /* retry_erase */
|
188 |
|
|
|
189 |
|
|
void retry_erase_list(erase_busy_t *list, u_int cause)
|
190 |
|
|
{
|
191 |
|
|
erase_busy_t tmp = *list;
|
192 |
|
|
|
193 |
|
|
DEBUG(2, "cs: rescanning erase queue list 0x%p\n", list);
|
194 |
|
|
if (list->next == list)
|
195 |
|
|
return;
|
196 |
|
|
/* First, truncate the original list */
|
197 |
|
|
list->prev->next = &tmp;
|
198 |
|
|
list->next->prev = &tmp;
|
199 |
|
|
list->prev = list->next = list;
|
200 |
|
|
tmp.prev->next = &tmp;
|
201 |
|
|
tmp.next->prev = &tmp;
|
202 |
|
|
|
203 |
|
|
/* Now, retry each request, in order. */
|
204 |
|
|
while (tmp.next != &tmp)
|
205 |
|
|
retry_erase(tmp.next, cause);
|
206 |
|
|
} /* retry_erase_list */
|
207 |
|
|
|
208 |
|
|
static void handle_erase_timeout(u_long arg)
|
209 |
|
|
{
|
210 |
|
|
DEBUG(0, "cs: erase timeout for entry 0x%lx\n", arg);
|
211 |
|
|
retry_erase((erase_busy_t *)arg, MTD_REQ_TIMEOUT);
|
212 |
|
|
}
|
213 |
|
|
|
214 |
|
|
static void setup_erase_request(client_handle_t handle, eraseq_entry_t *erase)
|
215 |
|
|
{
|
216 |
|
|
erase_busy_t *busy;
|
217 |
|
|
region_info_t *info;
|
218 |
|
|
|
219 |
|
|
if (CHECK_REGION(erase->Handle))
|
220 |
|
|
erase->State = ERASE_BAD_SOCKET;
|
221 |
|
|
else {
|
222 |
|
|
info = &erase->Handle->info;
|
223 |
|
|
if ((erase->Offset >= info->RegionSize) ||
|
224 |
|
|
(erase->Offset & (info->BlockSize-1)))
|
225 |
|
|
erase->State = ERASE_BAD_OFFSET;
|
226 |
|
|
else if ((erase->Offset+erase->Size > info->RegionSize) ||
|
227 |
|
|
(erase->Size & (info->BlockSize-1)))
|
228 |
|
|
erase->State = ERASE_BAD_SIZE;
|
229 |
|
|
else {
|
230 |
|
|
erase->State = 1;
|
231 |
|
|
busy = kmalloc(sizeof(erase_busy_t), GFP_KERNEL);
|
232 |
|
|
if (!busy) {
|
233 |
|
|
erase->State = ERASE_FAILED;
|
234 |
|
|
return;
|
235 |
|
|
}
|
236 |
|
|
busy->erase = erase;
|
237 |
|
|
busy->client = handle;
|
238 |
|
|
init_timer(&busy->timeout);
|
239 |
|
|
busy->timeout.data = (u_long)busy;
|
240 |
|
|
busy->timeout.function = &handle_erase_timeout;
|
241 |
|
|
busy->prev = busy->next = NULL;
|
242 |
|
|
retry_erase(busy, 0);
|
243 |
|
|
}
|
244 |
|
|
}
|
245 |
|
|
} /* setup_erase_request */
|
246 |
|
|
|
247 |
|
|
/*======================================================================
|
248 |
|
|
|
249 |
|
|
MTD helper functions
|
250 |
|
|
|
251 |
|
|
======================================================================*/
|
252 |
|
|
|
253 |
|
|
static int mtd_modify_window(window_handle_t win, mtd_mod_win_t *req)
|
254 |
|
|
{
|
255 |
|
|
if ((win == NULL) || (win->magic != WINDOW_MAGIC))
|
256 |
|
|
return CS_BAD_HANDLE;
|
257 |
|
|
win->ctl.flags = MAP_16BIT | MAP_ACTIVE;
|
258 |
|
|
if (req->Attributes & WIN_USE_WAIT)
|
259 |
|
|
win->ctl.flags |= MAP_USE_WAIT;
|
260 |
|
|
if (req->Attributes & WIN_MEMORY_TYPE)
|
261 |
|
|
win->ctl.flags |= MAP_ATTRIB;
|
262 |
|
|
win->ctl.speed = req->AccessSpeed;
|
263 |
|
|
win->ctl.card_start = req->CardOffset;
|
264 |
|
|
win->sock->ss_entry->set_mem_map(win->sock->sock, &win->ctl);
|
265 |
|
|
return CS_SUCCESS;
|
266 |
|
|
}
|
267 |
|
|
|
268 |
|
|
static int mtd_set_vpp(client_handle_t handle, mtd_vpp_req_t *req)
|
269 |
|
|
{
|
270 |
|
|
socket_info_t *s;
|
271 |
|
|
if (CHECK_HANDLE(handle))
|
272 |
|
|
return CS_BAD_HANDLE;
|
273 |
|
|
if (req->Vpp1 != req->Vpp2)
|
274 |
|
|
return CS_BAD_VPP;
|
275 |
|
|
s = SOCKET(handle);
|
276 |
|
|
s->socket.Vpp = req->Vpp1;
|
277 |
|
|
if (s->ss_entry->set_socket(s->sock, &s->socket))
|
278 |
|
|
return CS_BAD_VPP;
|
279 |
|
|
return CS_SUCCESS;
|
280 |
|
|
}
|
281 |
|
|
|
282 |
|
|
static int mtd_rdy_mask(client_handle_t handle, mtd_rdy_req_t *req)
|
283 |
|
|
{
|
284 |
|
|
socket_info_t *s;
|
285 |
|
|
if (CHECK_HANDLE(handle))
|
286 |
|
|
return CS_BAD_HANDLE;
|
287 |
|
|
s = SOCKET(handle);
|
288 |
|
|
if (req->Mask & CS_EVENT_READY_CHANGE)
|
289 |
|
|
s->socket.csc_mask |= SS_READY;
|
290 |
|
|
else
|
291 |
|
|
s->socket.csc_mask &= ~SS_READY;
|
292 |
|
|
if (s->ss_entry->set_socket(s->sock, &s->socket))
|
293 |
|
|
return CS_GENERAL_FAILURE;
|
294 |
|
|
return CS_SUCCESS;
|
295 |
|
|
}
|
296 |
|
|
|
297 |
|
|
int MTDHelperEntry(int func, void *a1, void *a2)
|
298 |
|
|
{
|
299 |
|
|
switch (func) {
|
300 |
|
|
case MTDRequestWindow:
|
301 |
|
|
{
|
302 |
|
|
window_handle_t w;
|
303 |
|
|
int ret = pcmcia_request_window(a1, a2, &w);
|
304 |
|
|
(window_handle_t *)a1 = w;
|
305 |
|
|
return ret;
|
306 |
|
|
}
|
307 |
|
|
break;
|
308 |
|
|
case MTDReleaseWindow:
|
309 |
|
|
return pcmcia_release_window(a1);
|
310 |
|
|
case MTDModifyWindow:
|
311 |
|
|
return mtd_modify_window(a1, a2); break;
|
312 |
|
|
case MTDSetVpp:
|
313 |
|
|
return mtd_set_vpp(a1, a2); break;
|
314 |
|
|
case MTDRDYMask:
|
315 |
|
|
return mtd_rdy_mask(a1, a2); break;
|
316 |
|
|
default:
|
317 |
|
|
return CS_UNSUPPORTED_FUNCTION; break;
|
318 |
|
|
}
|
319 |
|
|
} /* MTDHelperEntry */
|
320 |
|
|
|
321 |
|
|
/*======================================================================
|
322 |
|
|
|
323 |
|
|
This stuff is used by Card Services to initialize the table of
|
324 |
|
|
region info used for subsequent calls to GetFirstRegion and
|
325 |
|
|
GetNextRegion.
|
326 |
|
|
|
327 |
|
|
======================================================================*/
|
328 |
|
|
|
329 |
|
|
static void setup_regions(client_handle_t handle, int attr,
|
330 |
|
|
memory_handle_t *list)
|
331 |
|
|
{
|
332 |
|
|
int i, code, has_jedec, has_geo;
|
333 |
|
|
u_int offset;
|
334 |
|
|
cistpl_device_t device;
|
335 |
|
|
cistpl_jedec_t jedec;
|
336 |
|
|
cistpl_device_geo_t geo;
|
337 |
|
|
memory_handle_t r;
|
338 |
|
|
|
339 |
|
|
DEBUG(1, "cs: setup_regions(0x%p, %d, 0x%p)\n",
|
340 |
|
|
handle, attr, list);
|
341 |
|
|
|
342 |
|
|
code = (attr) ? CISTPL_DEVICE_A : CISTPL_DEVICE;
|
343 |
|
|
if (read_tuple(handle, code, &device) != CS_SUCCESS)
|
344 |
|
|
return;
|
345 |
|
|
code = (attr) ? CISTPL_JEDEC_A : CISTPL_JEDEC_C;
|
346 |
|
|
has_jedec = (read_tuple(handle, code, &jedec) == CS_SUCCESS);
|
347 |
|
|
if (has_jedec && (device.ndev != jedec.nid)) {
|
348 |
|
|
#ifdef PCMCIA_DEBUG
|
349 |
|
|
printk(KERN_DEBUG "cs: Device info does not match JEDEC info.\n");
|
350 |
|
|
#endif
|
351 |
|
|
has_jedec = 0;
|
352 |
|
|
}
|
353 |
|
|
code = (attr) ? CISTPL_DEVICE_GEO_A : CISTPL_DEVICE_GEO;
|
354 |
|
|
has_geo = (read_tuple(handle, code, &geo) == CS_SUCCESS);
|
355 |
|
|
if (has_geo && (device.ndev != geo.ngeo)) {
|
356 |
|
|
#ifdef PCMCIA_DEBUG
|
357 |
|
|
printk(KERN_DEBUG "cs: Device info does not match geometry tuple.\n");
|
358 |
|
|
#endif
|
359 |
|
|
has_geo = 0;
|
360 |
|
|
}
|
361 |
|
|
|
362 |
|
|
offset = 0;
|
363 |
|
|
for (i = 0; i < device.ndev; i++) {
|
364 |
|
|
if ((device.dev[i].type != CISTPL_DTYPE_NULL) &&
|
365 |
|
|
(device.dev[i].size != 0)) {
|
366 |
|
|
r = kmalloc(sizeof(*r), GFP_KERNEL);
|
367 |
|
|
if (!r) {
|
368 |
|
|
printk(KERN_NOTICE "cs: setup_regions: kmalloc failed!\n");
|
369 |
|
|
return;
|
370 |
|
|
}
|
371 |
|
|
r->region_magic = REGION_MAGIC;
|
372 |
|
|
r->state = 0;
|
373 |
|
|
r->dev_info[0] = '\0';
|
374 |
|
|
r->mtd = NULL;
|
375 |
|
|
r->info.Attributes = (attr) ? REGION_TYPE_AM : 0;
|
376 |
|
|
r->info.CardOffset = offset;
|
377 |
|
|
r->info.RegionSize = device.dev[i].size;
|
378 |
|
|
r->info.AccessSpeed = device.dev[i].speed;
|
379 |
|
|
if (has_jedec) {
|
380 |
|
|
r->info.JedecMfr = jedec.id[i].mfr;
|
381 |
|
|
r->info.JedecInfo = jedec.id[i].info;
|
382 |
|
|
} else
|
383 |
|
|
r->info.JedecMfr = r->info.JedecInfo = 0;
|
384 |
|
|
if (has_geo) {
|
385 |
|
|
r->info.BlockSize = geo.geo[i].buswidth *
|
386 |
|
|
geo.geo[i].erase_block * geo.geo[i].interleave;
|
387 |
|
|
r->info.PartMultiple =
|
388 |
|
|
r->info.BlockSize * geo.geo[i].partition;
|
389 |
|
|
} else
|
390 |
|
|
r->info.BlockSize = r->info.PartMultiple = 1;
|
391 |
|
|
r->info.next = *list; *list = r;
|
392 |
|
|
}
|
393 |
|
|
offset += device.dev[i].size;
|
394 |
|
|
}
|
395 |
|
|
} /* setup_regions */
|
396 |
|
|
|
397 |
|
|
/*======================================================================
|
398 |
|
|
|
399 |
|
|
This is tricky. When get_first_region() is called by Driver
|
400 |
|
|
Services, we initialize the region info table in the socket
|
401 |
|
|
structure. When it is called by an MTD, we can just scan the
|
402 |
|
|
table for matching entries.
|
403 |
|
|
|
404 |
|
|
======================================================================*/
|
405 |
|
|
|
406 |
|
|
static int match_region(client_handle_t handle, memory_handle_t list,
|
407 |
|
|
region_info_t *match)
|
408 |
|
|
{
|
409 |
|
|
while (list != NULL) {
|
410 |
|
|
if (!(handle->Attributes & INFO_MTD_CLIENT) ||
|
411 |
|
|
(strcmp(handle->dev_info, list->dev_info) == 0)) {
|
412 |
|
|
*match = list->info;
|
413 |
|
|
return CS_SUCCESS;
|
414 |
|
|
}
|
415 |
|
|
list = list->info.next;
|
416 |
|
|
}
|
417 |
|
|
return CS_NO_MORE_ITEMS;
|
418 |
|
|
} /* match_region */
|
419 |
|
|
|
420 |
|
|
int pcmcia_get_first_region(client_handle_t handle, region_info_t *rgn)
|
421 |
|
|
{
|
422 |
|
|
socket_info_t *s = SOCKET(handle);
|
423 |
|
|
if (CHECK_HANDLE(handle))
|
424 |
|
|
return CS_BAD_HANDLE;
|
425 |
|
|
|
426 |
|
|
if ((handle->Attributes & INFO_MASTER_CLIENT) &&
|
427 |
|
|
(!(s->state & SOCKET_REGION_INFO))) {
|
428 |
|
|
setup_regions(handle, 0, &s->c_region);
|
429 |
|
|
setup_regions(handle, 1, &s->a_region);
|
430 |
|
|
s->state |= SOCKET_REGION_INFO;
|
431 |
|
|
}
|
432 |
|
|
|
433 |
|
|
if (rgn->Attributes & REGION_TYPE_AM)
|
434 |
|
|
return match_region(handle, s->a_region, rgn);
|
435 |
|
|
else
|
436 |
|
|
return match_region(handle, s->c_region, rgn);
|
437 |
|
|
} /* get_first_region */
|
438 |
|
|
|
439 |
|
|
int pcmcia_get_next_region(client_handle_t handle, region_info_t *rgn)
|
440 |
|
|
{
|
441 |
|
|
if (CHECK_HANDLE(handle))
|
442 |
|
|
return CS_BAD_HANDLE;
|
443 |
|
|
return match_region(handle, rgn->next, rgn);
|
444 |
|
|
} /* get_next_region */
|
445 |
|
|
|
446 |
|
|
/*======================================================================
|
447 |
|
|
|
448 |
|
|
Connect an MTD with a memory region.
|
449 |
|
|
|
450 |
|
|
======================================================================*/
|
451 |
|
|
|
452 |
|
|
int pcmcia_register_mtd(client_handle_t handle, mtd_reg_t *reg)
|
453 |
|
|
{
|
454 |
|
|
memory_handle_t list;
|
455 |
|
|
socket_info_t *s;
|
456 |
|
|
|
457 |
|
|
if (CHECK_HANDLE(handle))
|
458 |
|
|
return CS_BAD_HANDLE;
|
459 |
|
|
s = SOCKET(handle);
|
460 |
|
|
if (reg->Attributes & REGION_TYPE_AM)
|
461 |
|
|
list = s->a_region;
|
462 |
|
|
else
|
463 |
|
|
list = s->c_region;
|
464 |
|
|
DEBUG(1, "cs: register_mtd(0x%p, '%s', 0x%x)\n",
|
465 |
|
|
handle, handle->dev_info, reg->Offset);
|
466 |
|
|
while (list) {
|
467 |
|
|
if (list->info.CardOffset == reg->Offset) break;
|
468 |
|
|
list = list->info.next;
|
469 |
|
|
}
|
470 |
|
|
if (list && (list->mtd == NULL) &&
|
471 |
|
|
(strcmp(handle->dev_info, list->dev_info) == 0)) {
|
472 |
|
|
list->info.Attributes = reg->Attributes;
|
473 |
|
|
list->MediaID = reg->MediaID;
|
474 |
|
|
list->mtd = handle;
|
475 |
|
|
handle->mtd_count++;
|
476 |
|
|
return CS_SUCCESS;
|
477 |
|
|
} else
|
478 |
|
|
return CS_BAD_OFFSET;
|
479 |
|
|
} /* register_mtd */
|
480 |
|
|
|
481 |
|
|
/*======================================================================
|
482 |
|
|
|
483 |
|
|
Erase queue management functions
|
484 |
|
|
|
485 |
|
|
======================================================================*/
|
486 |
|
|
|
487 |
|
|
int pcmcia_register_erase_queue(client_handle_t *handle, eraseq_hdr_t *header,
|
488 |
|
|
eraseq_handle_t *e)
|
489 |
|
|
{
|
490 |
|
|
eraseq_t *queue;
|
491 |
|
|
|
492 |
|
|
if ((handle == NULL) || CHECK_HANDLE(*handle))
|
493 |
|
|
return CS_BAD_HANDLE;
|
494 |
|
|
queue = kmalloc(sizeof(*queue), GFP_KERNEL);
|
495 |
|
|
if (!queue) return CS_OUT_OF_RESOURCE;
|
496 |
|
|
queue->eraseq_magic = ERASEQ_MAGIC;
|
497 |
|
|
queue->handle = *handle;
|
498 |
|
|
queue->count = header->QueueEntryCnt;
|
499 |
|
|
queue->entry = header->QueueEntryArray;
|
500 |
|
|
*e = queue;
|
501 |
|
|
return CS_SUCCESS;
|
502 |
|
|
} /* register_erase_queue */
|
503 |
|
|
|
504 |
|
|
int pcmcia_deregister_erase_queue(eraseq_handle_t eraseq)
|
505 |
|
|
{
|
506 |
|
|
int i;
|
507 |
|
|
if (CHECK_ERASEQ(eraseq))
|
508 |
|
|
return CS_BAD_HANDLE;
|
509 |
|
|
for (i = 0; i < eraseq->count; i++)
|
510 |
|
|
if (ERASE_IN_PROGRESS(eraseq->entry[i].State)) break;
|
511 |
|
|
if (i < eraseq->count)
|
512 |
|
|
return CS_BUSY;
|
513 |
|
|
eraseq->eraseq_magic = 0;
|
514 |
|
|
kfree(eraseq);
|
515 |
|
|
return CS_SUCCESS;
|
516 |
|
|
} /* deregister_erase_queue */
|
517 |
|
|
|
518 |
|
|
int pcmcia_check_erase_queue(eraseq_handle_t eraseq)
|
519 |
|
|
{
|
520 |
|
|
int i;
|
521 |
|
|
if (CHECK_ERASEQ(eraseq))
|
522 |
|
|
return CS_BAD_HANDLE;
|
523 |
|
|
for (i = 0; i < eraseq->count; i++)
|
524 |
|
|
if (eraseq->entry[i].State == ERASE_QUEUED)
|
525 |
|
|
setup_erase_request(eraseq->handle, &eraseq->entry[i]);
|
526 |
|
|
return CS_SUCCESS;
|
527 |
|
|
} /* check_erase_queue */
|
528 |
|
|
|
529 |
|
|
/*======================================================================
|
530 |
|
|
|
531 |
|
|
Look up the memory region matching the request, and return a
|
532 |
|
|
memory handle.
|
533 |
|
|
|
534 |
|
|
======================================================================*/
|
535 |
|
|
|
536 |
|
|
int pcmcia_open_memory(client_handle_t *handle, open_mem_t *open, memory_handle_t *mh)
|
537 |
|
|
{
|
538 |
|
|
socket_info_t *s;
|
539 |
|
|
memory_handle_t region;
|
540 |
|
|
|
541 |
|
|
if ((handle == NULL) || CHECK_HANDLE(*handle))
|
542 |
|
|
return CS_BAD_HANDLE;
|
543 |
|
|
s = SOCKET(*handle);
|
544 |
|
|
if (open->Attributes & MEMORY_TYPE_AM)
|
545 |
|
|
region = s->a_region;
|
546 |
|
|
else
|
547 |
|
|
region = s->c_region;
|
548 |
|
|
while (region) {
|
549 |
|
|
if (region->info.CardOffset == open->Offset) break;
|
550 |
|
|
region = region->info.next;
|
551 |
|
|
}
|
552 |
|
|
if (region && region->mtd) {
|
553 |
|
|
*mh = region;
|
554 |
|
|
DEBUG(1, "cs: open_memory(0x%p, 0x%x) = 0x%p\n",
|
555 |
|
|
handle, open->Offset, region);
|
556 |
|
|
return CS_SUCCESS;
|
557 |
|
|
} else
|
558 |
|
|
return CS_BAD_OFFSET;
|
559 |
|
|
} /* open_memory */
|
560 |
|
|
|
561 |
|
|
/*======================================================================
|
562 |
|
|
|
563 |
|
|
Close a memory handle from an earlier call to OpenMemory.
|
564 |
|
|
|
565 |
|
|
For the moment, I don't think this needs to do anything.
|
566 |
|
|
|
567 |
|
|
======================================================================*/
|
568 |
|
|
|
569 |
|
|
int pcmcia_close_memory(memory_handle_t handle)
|
570 |
|
|
{
|
571 |
|
|
DEBUG(1, "cs: close_memory(0x%p)\n", handle);
|
572 |
|
|
if (CHECK_REGION(handle))
|
573 |
|
|
return CS_BAD_HANDLE;
|
574 |
|
|
return CS_SUCCESS;
|
575 |
|
|
} /* close_memory */
|
576 |
|
|
|
577 |
|
|
/*======================================================================
|
578 |
|
|
|
579 |
|
|
Read from a memory device, using a handle previously returned
|
580 |
|
|
by a call to OpenMemory.
|
581 |
|
|
|
582 |
|
|
======================================================================*/
|
583 |
|
|
|
584 |
|
|
int pcmcia_read_memory(memory_handle_t handle, mem_op_t *req, caddr_t buf)
|
585 |
|
|
{
|
586 |
|
|
mtd_request_t mtd;
|
587 |
|
|
if (CHECK_REGION(handle))
|
588 |
|
|
return CS_BAD_HANDLE;
|
589 |
|
|
if (req->Offset >= handle->info.RegionSize)
|
590 |
|
|
return CS_BAD_OFFSET;
|
591 |
|
|
if (req->Offset+req->Count > handle->info.RegionSize)
|
592 |
|
|
return CS_BAD_SIZE;
|
593 |
|
|
|
594 |
|
|
mtd.SrcCardOffset = req->Offset + handle->info.CardOffset;
|
595 |
|
|
mtd.TransferLength = req->Count;
|
596 |
|
|
mtd.MediaID = handle->MediaID;
|
597 |
|
|
mtd.Function = MTD_REQ_READ;
|
598 |
|
|
if (req->Attributes & MEM_OP_BUFFER_KERNEL)
|
599 |
|
|
mtd.Function |= MTD_REQ_KERNEL;
|
600 |
|
|
return do_mtd_request(handle, &mtd, buf);
|
601 |
|
|
} /* read_memory */
|
602 |
|
|
|
603 |
|
|
/*======================================================================
|
604 |
|
|
|
605 |
|
|
Write to a memory device, using a handle previously returned by
|
606 |
|
|
a call to OpenMemory.
|
607 |
|
|
|
608 |
|
|
======================================================================*/
|
609 |
|
|
|
610 |
|
|
int pcmcia_write_memory(memory_handle_t handle, mem_op_t *req, caddr_t buf)
|
611 |
|
|
{
|
612 |
|
|
mtd_request_t mtd;
|
613 |
|
|
if (CHECK_REGION(handle))
|
614 |
|
|
return CS_BAD_HANDLE;
|
615 |
|
|
if (req->Offset >= handle->info.RegionSize)
|
616 |
|
|
return CS_BAD_OFFSET;
|
617 |
|
|
if (req->Offset+req->Count > handle->info.RegionSize)
|
618 |
|
|
return CS_BAD_SIZE;
|
619 |
|
|
|
620 |
|
|
mtd.DestCardOffset = req->Offset + handle->info.CardOffset;
|
621 |
|
|
mtd.TransferLength = req->Count;
|
622 |
|
|
mtd.MediaID = handle->MediaID;
|
623 |
|
|
mtd.Function = MTD_REQ_WRITE;
|
624 |
|
|
if (req->Attributes & MEM_OP_BUFFER_KERNEL)
|
625 |
|
|
mtd.Function |= MTD_REQ_KERNEL;
|
626 |
|
|
return do_mtd_request(handle, &mtd, buf);
|
627 |
|
|
} /* write_memory */
|
628 |
|
|
|
629 |
|
|
/*======================================================================
|
630 |
|
|
|
631 |
|
|
This isn't needed for anything I could think of.
|
632 |
|
|
|
633 |
|
|
======================================================================*/
|
634 |
|
|
|
635 |
|
|
int pcmcia_copy_memory(memory_handle_t handle, copy_op_t *req)
|
636 |
|
|
{
|
637 |
|
|
if (CHECK_REGION(handle))
|
638 |
|
|
return CS_BAD_HANDLE;
|
639 |
|
|
return CS_UNSUPPORTED_FUNCTION;
|
640 |
|
|
}
|
641 |
|
|
|