OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [mtd/] [chips/] [cfi_cmdset_0002.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * Common Flash Interface support:
3
 *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4
 *
5
 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6
 *
7
 * 2_by_8 routines added by Simon Munton
8
 *
9
 * This code is GPL
10
 *
11
 * $Id: cfi_cmdset_0002.c,v 1.1.1.1 2004-04-15 01:52:14 phoenix Exp $
12
 *
13
 */
14
 
15
#include <linux/module.h>
16
#include <linux/types.h>
17
#include <linux/kernel.h>
18
#include <linux/sched.h>
19
#include <asm/io.h>
20
#include <asm/byteorder.h>
21
 
22
#include <linux/errno.h>
23
#include <linux/slab.h>
24
#include <linux/delay.h>
25
#include <linux/interrupt.h>
26
#include <linux/mtd/map.h>
27
#include <linux/mtd/cfi.h>
28
 
29
#define AMD_BOOTLOC_BUG
30
 
31
static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
32
static int cfi_amdstd_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
33
static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
34
static int cfi_amdstd_erase_onesize(struct mtd_info *, struct erase_info *);
35
static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
36
static void cfi_amdstd_sync (struct mtd_info *);
37
static int cfi_amdstd_suspend (struct mtd_info *);
38
static void cfi_amdstd_resume (struct mtd_info *);
39
static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
40
 
41
static void cfi_amdstd_destroy(struct mtd_info *);
42
 
43
struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
44
static struct mtd_info *cfi_amdstd_setup (struct map_info *);
45
 
46
 
47
static struct mtd_chip_driver cfi_amdstd_chipdrv = {
48
        probe: NULL, /* Not usable directly */
49
        destroy: cfi_amdstd_destroy,
50
        name: "cfi_cmdset_0002",
51
        module: THIS_MODULE
52
};
53
 
54
struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
55
{
56
        struct cfi_private *cfi = map->fldrv_priv;
57
        unsigned char bootloc;
58
        int ofs_factor = cfi->interleave * cfi->device_type;
59
        int i;
60
        __u8 major, minor;
61
        __u32 base = cfi->chips[0].start;
62
 
63
        if (cfi->cfi_mode==CFI_MODE_CFI){
64
                __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
65
 
66
                cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
67
 
68
                major = cfi_read_query(map, base + (adr+3)*ofs_factor);
69
                minor = cfi_read_query(map, base + (adr+4)*ofs_factor);
70
 
71
                printk(KERN_NOTICE " Amd/Fujitsu Extended Query Table v%c.%c at 0x%4.4X\n",
72
                       major, minor, adr);
73
                                cfi_send_gen_cmd(0xf0, 0x55, base, map, cfi, cfi->device_type, NULL);
74
 
75
                cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL);
76
                cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL);
77
                cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL);
78
                cfi->mfr = cfi_read_query(map, base);
79
                cfi->id = cfi_read_query(map, base + ofs_factor);
80
 
81
                /* Wheee. Bring me the head of someone at AMD. */
82
#ifdef AMD_BOOTLOC_BUG
83
                if (((major << 8) | minor) < 0x3131) {
84
                        /* CFI version 1.0 => don't trust bootloc */
85
                        if (cfi->id & 0x80) {
86
                                printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
87
                                bootloc = 3;    /* top boot */
88
                        } else {
89
                                bootloc = 2;    /* bottom boot */
90
                        }
91
                } else
92
#endif
93
                        {
94
                                cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
95
                                bootloc = cfi_read_query(map, base + (adr+15)*ofs_factor);
96
                        }
97
                if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
98
                        printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
99
 
100
                        for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
101
                                int j = (cfi->cfiq->NumEraseRegions-1)-i;
102
                                __u32 swap;
103
 
104
                                swap = cfi->cfiq->EraseRegionInfo[i];
105
                                cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
106
                                cfi->cfiq->EraseRegionInfo[j] = swap;
107
                        }
108
                }
109
                switch (cfi->device_type) {
110
                case CFI_DEVICETYPE_X8:
111
                        cfi->addr_unlock1 = 0x555;
112
                        cfi->addr_unlock2 = 0x2aa;
113
                        break;
114
                case CFI_DEVICETYPE_X16:
115
                        cfi->addr_unlock1 = 0xaaa;
116
                        if (map->buswidth == cfi->interleave) {
117
                                /* X16 chip(s) in X8 mode */
118
                                cfi->addr_unlock2 = 0x555;
119
                        } else {
120
                                cfi->addr_unlock2 = 0x554;
121
                        }
122
                        break;
123
                case CFI_DEVICETYPE_X32:
124
                        cfi->addr_unlock1 = 0x1555;
125
                        cfi->addr_unlock2 = 0xaaa;
126
                        break;
127
                default:
128
                        printk(KERN_NOTICE "Eep. Unknown cfi_cmdset_0002 device type %d\n", cfi->device_type);
129
                        return NULL;
130
                }
131
        } /* CFI mode */
132
 
133
        for (i=0; i< cfi->numchips; i++) {
134
                cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
135
                cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
136
                cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
137
        }
138
 
139
        map->fldrv = &cfi_amdstd_chipdrv;
140
 
141
        cfi_send_gen_cmd(0xf0, 0x55, base, map, cfi, cfi->device_type, NULL);
142
        return cfi_amdstd_setup(map);
143
}
144
 
145
static struct mtd_info *cfi_amdstd_setup(struct map_info *map)
146
{
147
        struct cfi_private *cfi = map->fldrv_priv;
148
        struct mtd_info *mtd;
149
        unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
150
 
151
        mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
152
        printk(KERN_NOTICE "number of %s chips: %d\n",
153
                (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
154
 
155
        if (!mtd) {
156
          printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
157
          goto setup_err;
158
        }
159
 
160
        memset(mtd, 0, sizeof(*mtd));
161
        mtd->priv = map;
162
        mtd->type = MTD_NORFLASH;
163
        /* Also select the correct geometry setup too */
164
        mtd->size = devsize * cfi->numchips;
165
 
166
        if (cfi->cfiq->NumEraseRegions == 1) {
167
                /* No need to muck about with multiple erase sizes */
168
                mtd->erasesize = ((cfi->cfiq->EraseRegionInfo[0] >> 8) & ~0xff) * cfi->interleave;
169
        } else {
170
                unsigned long offset = 0;
171
                int i,j;
172
 
173
                mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
174
                mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) * mtd->numeraseregions, GFP_KERNEL);
175
                if (!mtd->eraseregions) {
176
                        printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
177
                        goto setup_err;
178
                }
179
 
180
                for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
181
                        unsigned long ernum, ersize;
182
                        ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
183
                        ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
184
 
185
                        if (mtd->erasesize < ersize) {
186
                                mtd->erasesize = ersize;
187
                        }
188
                        for (j=0; j<cfi->numchips; j++) {
189
                                mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
190
                                mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
191
                                mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
192
                        }
193
                        offset += (ersize * ernum);
194
                }
195
                if (offset != devsize) {
196
                        /* Argh */
197
                        printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
198
                        goto setup_err;
199
                }
200
#if 0
201
                // debug
202
                for (i=0; i<mtd->numeraseregions;i++){
203
                        printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
204
                               i,mtd->eraseregions[i].offset,
205
                               mtd->eraseregions[i].erasesize,
206
                               mtd->eraseregions[i].numblocks);
207
                }
208
#endif
209
        }
210
 
211
        switch (CFIDEV_BUSWIDTH)
212
        {
213
        case 1:
214
        case 2:
215
        case 4:
216
#if 1
217
                if (mtd->numeraseregions > 1)
218
                        mtd->erase = cfi_amdstd_erase_varsize;
219
                else
220
#endif
221
                if (((cfi->cfiq->EraseRegionInfo[0] & 0xffff) + 1) == 1)
222
                        mtd->erase = cfi_amdstd_erase_chip;
223
                else
224
                        mtd->erase = cfi_amdstd_erase_onesize;
225
                mtd->read = cfi_amdstd_read;
226
                mtd->write = cfi_amdstd_write;
227
                break;
228
 
229
        default:
230
                printk(KERN_WARNING "Unsupported buswidth\n");
231
                goto setup_err;
232
                break;
233
        }
234
        if (cfi->fast_prog) {
235
                /* In cfi_amdstd_write() we frob the protection stuff
236
                   without paying any attention to the state machine.
237
                   This upsets in-progress erases. So we turn this flag
238
                   off for now till the code gets fixed. */
239
                printk(KERN_NOTICE "cfi_cmdset_0002: Disabling fast programming due to code brokenness.\n");
240
                cfi->fast_prog = 0;
241
        }
242
 
243
 
244
        /* does this chip have a secsi area? */
245
        if(cfi->mfr==1){
246
 
247
                switch(cfi->id){
248
                case 0x50:
249
                case 0x53:
250
                case 0x55:
251
                case 0x56:
252
                case 0x5C:
253
                case 0x5F:
254
                        /* Yes */
255
                        mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
256
                        mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
257
                default:
258
                        ;
259
                }
260
        }
261
 
262
 
263
        mtd->sync = cfi_amdstd_sync;
264
        mtd->suspend = cfi_amdstd_suspend;
265
        mtd->resume = cfi_amdstd_resume;
266
        mtd->flags = MTD_CAP_NORFLASH;
267
        map->fldrv = &cfi_amdstd_chipdrv;
268
        mtd->name = map->name;
269
        MOD_INC_USE_COUNT;
270
        return mtd;
271
 
272
 setup_err:
273
        if(mtd) {
274
                if(mtd->eraseregions)
275
                        kfree(mtd->eraseregions);
276
                kfree(mtd);
277
        }
278
        kfree(cfi->cmdset_priv);
279
        kfree(cfi->cfiq);
280
        return NULL;
281
}
282
 
283
static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
284
{
285
        DECLARE_WAITQUEUE(wait, current);
286
        unsigned long timeo = jiffies + HZ;
287
 
288
 retry:
289
        cfi_spin_lock(chip->mutex);
290
 
291
        if (chip->state != FL_READY){
292
#if 0
293
                printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
294
#endif
295
                set_current_state(TASK_UNINTERRUPTIBLE);
296
                add_wait_queue(&chip->wq, &wait);
297
 
298
                cfi_spin_unlock(chip->mutex);
299
 
300
                schedule();
301
                remove_wait_queue(&chip->wq, &wait);
302
#if 0
303
                if(signal_pending(current))
304
                        return -EINTR;
305
#endif
306
                timeo = jiffies + HZ;
307
 
308
                goto retry;
309
        }
310
 
311
        adr += chip->start;
312
 
313
        chip->state = FL_READY;
314
 
315
        map->copy_from(map, buf, adr, len);
316
 
317
        wake_up(&chip->wq);
318
        cfi_spin_unlock(chip->mutex);
319
 
320
        return 0;
321
}
322
 
323
static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
324
{
325
        struct map_info *map = mtd->priv;
326
        struct cfi_private *cfi = map->fldrv_priv;
327
        unsigned long ofs;
328
        int chipnum;
329
        int ret = 0;
330
 
331
        /* ofs: offset within the first chip that the first read should start */
332
 
333
        chipnum = (from >> cfi->chipshift);
334
        ofs = from - (chipnum <<  cfi->chipshift);
335
 
336
 
337
        *retlen = 0;
338
 
339
        while (len) {
340
                unsigned long thislen;
341
 
342
                if (chipnum >= cfi->numchips)
343
                        break;
344
 
345
                if ((len + ofs -1) >> cfi->chipshift)
346
                        thislen = (1<<cfi->chipshift) - ofs;
347
                else
348
                        thislen = len;
349
 
350
                ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
351
                if (ret)
352
                        break;
353
 
354
                *retlen += thislen;
355
                len -= thislen;
356
                buf += thislen;
357
 
358
                ofs = 0;
359
                chipnum++;
360
        }
361
        return ret;
362
}
363
 
364
static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
365
{
366
        DECLARE_WAITQUEUE(wait, current);
367
        unsigned long timeo = jiffies + HZ;
368
        struct cfi_private *cfi = map->fldrv_priv;
369
 
370
 retry:
371
        cfi_spin_lock(chip->mutex);
372
 
373
        if (chip->state != FL_READY){
374
#if 0
375
                printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
376
#endif
377
                set_current_state(TASK_UNINTERRUPTIBLE);
378
                add_wait_queue(&chip->wq, &wait);
379
 
380
                cfi_spin_unlock(chip->mutex);
381
 
382
                schedule();
383
                remove_wait_queue(&chip->wq, &wait);
384
#if 0
385
                if(signal_pending(current))
386
                        return -EINTR;
387
#endif
388
                timeo = jiffies + HZ;
389
 
390
                goto retry;
391
        }
392
 
393
        adr += chip->start;
394
 
395
        chip->state = FL_READY;
396
 
397
        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
398
        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
399
        cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
400
 
401
        map->copy_from(map, buf, adr, len);
402
 
403
        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
404
        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
405
        cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
406
        cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
407
 
408
        wake_up(&chip->wq);
409
        cfi_spin_unlock(chip->mutex);
410
 
411
        return 0;
412
}
413
 
414
static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
415
{
416
        struct map_info *map = mtd->priv;
417
        struct cfi_private *cfi = map->fldrv_priv;
418
        unsigned long ofs;
419
        int chipnum;
420
        int ret = 0;
421
 
422
 
423
        /* ofs: offset within the first chip that the first read should start */
424
 
425
        /* 8 secsi bytes per chip */
426
        chipnum=from>>3;
427
        ofs=from & 7;
428
 
429
 
430
        *retlen = 0;
431
 
432
        while (len) {
433
                unsigned long thislen;
434
 
435
                if (chipnum >= cfi->numchips)
436
                        break;
437
 
438
                if ((len + ofs -1) >> 3)
439
                        thislen = (1<<3) - ofs;
440
                else
441
                        thislen = len;
442
 
443
                ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
444
                if (ret)
445
                        break;
446
 
447
                *retlen += thislen;
448
                len -= thislen;
449
                buf += thislen;
450
 
451
                ofs = 0;
452
                chipnum++;
453
        }
454
        return ret;
455
}
456
 
457
static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, __u32 datum, int fast)
458
{
459
        unsigned long timeo = jiffies + HZ;
460
        unsigned int oldstatus, status;
461
        unsigned int dq6, dq5;
462
        struct cfi_private *cfi = map->fldrv_priv;
463
        DECLARE_WAITQUEUE(wait, current);
464
        int ret = 0;
465
 
466
 retry:
467
        cfi_spin_lock(chip->mutex);
468
 
469
        if (chip->state != FL_READY) {
470
#if 0
471
                printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", chip->state);
472
#endif
473
                set_current_state(TASK_UNINTERRUPTIBLE);
474
                add_wait_queue(&chip->wq, &wait);
475
 
476
                cfi_spin_unlock(chip->mutex);
477
 
478
                schedule();
479
                remove_wait_queue(&chip->wq, &wait);
480
#if 0
481
                printk(KERN_DEBUG "Wake up to write:\n");
482
                if(signal_pending(current))
483
                        return -EINTR;
484
#endif
485
                timeo = jiffies + HZ;
486
 
487
                goto retry;
488
        }
489
 
490
        chip->state = FL_WRITING;
491
 
492
        adr += chip->start;
493
        ENABLE_VPP(map);
494
        if (fast) { /* Unlock bypass */
495
                cfi_send_gen_cmd(0xA0, 0, chip->start, map, cfi, cfi->device_type, NULL);
496
        }
497
        else {
498
                cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
499
                cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
500
                cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
501
        }
502
 
503
        cfi_write(map, datum, adr);
504
 
505
        cfi_spin_unlock(chip->mutex);
506
        cfi_udelay(chip->word_write_time);
507
        cfi_spin_lock(chip->mutex);
508
 
509
        /* Polling toggle bits instead of reading back many times
510
           This ensures that write operation is really completed,
511
           or tells us why it failed. */
512
        dq6 = CMD(1<<6);
513
        dq5 = CMD(1<<5);
514
        timeo = jiffies + (HZ/1000); /* setting timeout to 1ms for now */
515
 
516
        oldstatus = cfi_read(map, adr);
517
        status = cfi_read(map, adr);
518
 
519
        while( (status & dq6) != (oldstatus & dq6) &&
520
               (status & dq5) != dq5 &&
521
               !time_after(jiffies, timeo) ) {
522
 
523
                if (need_resched()) {
524
                        cfi_spin_unlock(chip->mutex);
525
                        yield();
526
                        cfi_spin_lock(chip->mutex);
527
                } else
528
                        udelay(1);
529
 
530
                oldstatus = cfi_read( map, adr );
531
                status = cfi_read( map, adr );
532
        }
533
 
534
        if( (status & dq6) != (oldstatus & dq6) ) {
535
                /* The erasing didn't stop?? */
536
                if( (status & dq5) == dq5 ) {
537
                        /* When DQ5 raises, we must check once again
538
                           if DQ6 is toggling.  If not, the erase has been
539
                           completed OK.  If not, reset chip. */
540
                        oldstatus = cfi_read(map, adr);
541
                        status = cfi_read(map, adr);
542
 
543
                        if ( (oldstatus & 0x00FF) == (status & 0x00FF) ) {
544
                                printk(KERN_WARNING "Warning: DQ5 raised while program operation was in progress, however operation completed OK\n" );
545
                        } else {
546
                                /* DQ5 is active so we can do a reset and stop the erase */
547
                                cfi_write(map, CMD(0xF0), chip->start);
548
                                printk(KERN_WARNING "Internal flash device timeout occurred or write operation was performed while flash was programming.\n" );
549
                        }
550
                } else {
551
                        printk(KERN_WARNING "Waiting for write to complete timed out in do_write_oneword.");
552
 
553
                        chip->state = FL_READY;
554
                        wake_up(&chip->wq);
555
                        cfi_spin_unlock(chip->mutex);
556
                        DISABLE_VPP(map);
557
                        ret = -EIO;
558
                }
559
        }
560
 
561
        DISABLE_VPP(map);
562
        chip->state = FL_READY;
563
        wake_up(&chip->wq);
564
        cfi_spin_unlock(chip->mutex);
565
 
566
        return ret;
567
}
568
 
569
static int cfi_amdstd_write (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
570
{
571
        struct map_info *map = mtd->priv;
572
        struct cfi_private *cfi = map->fldrv_priv;
573
        int ret = 0;
574
        int chipnum;
575
        unsigned long ofs, chipstart;
576
 
577
        *retlen = 0;
578
        if (!len)
579
                return 0;
580
 
581
        chipnum = to >> cfi->chipshift;
582
        ofs = to  - (chipnum << cfi->chipshift);
583
        chipstart = cfi->chips[chipnum].start;
584
 
585
        /* If it's not bus-aligned, do the first byte write */
586
        if (ofs & (CFIDEV_BUSWIDTH-1)) {
587
                unsigned long bus_ofs = ofs & ~(CFIDEV_BUSWIDTH-1);
588
                int i = ofs - bus_ofs;
589
                int n = 0;
590
                u_char tmp_buf[4];
591
                __u32 datum;
592
 
593
                map->copy_from(map, tmp_buf, bus_ofs + cfi->chips[chipnum].start, CFIDEV_BUSWIDTH);
594
                while (len && i < CFIDEV_BUSWIDTH)
595
                        tmp_buf[i++] = buf[n++], len--;
596
 
597
                if (cfi_buswidth_is_2()) {
598
                        datum = *(__u16*)tmp_buf;
599
                } else if (cfi_buswidth_is_4()) {
600
                        datum = *(__u32*)tmp_buf;
601
                } else {
602
                        return -EINVAL;  /* should never happen, but be safe */
603
                }
604
 
605
                ret = do_write_oneword(map, &cfi->chips[chipnum],
606
                                bus_ofs, datum, 0);
607
                if (ret)
608
                        return ret;
609
 
610
                ofs += n;
611
                buf += n;
612
                (*retlen) += n;
613
 
614
                if (ofs >> cfi->chipshift) {
615
                        chipnum ++;
616
                        ofs = 0;
617
                        if (chipnum == cfi->numchips)
618
                                return 0;
619
                }
620
        }
621
 
622
        if (cfi->fast_prog) {
623
                /* Go into unlock bypass mode */
624
                cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
625
                cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
626
                cfi_send_gen_cmd(0x20, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
627
        }
628
 
629
        /* We are now aligned, write as much as possible */
630
        while(len >= CFIDEV_BUSWIDTH) {
631
                __u32 datum;
632
 
633
                if (cfi_buswidth_is_1()) {
634
                        datum = *(__u8*)buf;
635
                } else if (cfi_buswidth_is_2()) {
636
                        datum = *(__u16*)buf;
637
                } else if (cfi_buswidth_is_4()) {
638
                        datum = *(__u32*)buf;
639
                } else {
640
                        return -EINVAL;
641
                }
642
                ret = do_write_oneword(map, &cfi->chips[chipnum],
643
                                       ofs, datum, cfi->fast_prog);
644
                if (ret) {
645
                        if (cfi->fast_prog){
646
                                /* Get out of unlock bypass mode */
647
                                cfi_send_gen_cmd(0x90, 0, chipstart, map, cfi, cfi->device_type, NULL);
648
                                cfi_send_gen_cmd(0x00, 0, chipstart, map, cfi, cfi->device_type, NULL);
649
                        }
650
                        return ret;
651
                }
652
 
653
                ofs += CFIDEV_BUSWIDTH;
654
                buf += CFIDEV_BUSWIDTH;
655
                (*retlen) += CFIDEV_BUSWIDTH;
656
                len -= CFIDEV_BUSWIDTH;
657
 
658
                if (ofs >> cfi->chipshift) {
659
                        if (cfi->fast_prog){
660
                                /* Get out of unlock bypass mode */
661
                                cfi_send_gen_cmd(0x90, 0, chipstart, map, cfi, cfi->device_type, NULL);
662
                                cfi_send_gen_cmd(0x00, 0, chipstart, map, cfi, cfi->device_type, NULL);
663
                        }
664
 
665
                        chipnum ++;
666
                        ofs = 0;
667
                        if (chipnum == cfi->numchips)
668
                                return 0;
669
                        chipstart = cfi->chips[chipnum].start;
670
                        if (cfi->fast_prog){
671
                                /* Go into unlock bypass mode for next set of chips */
672
                                cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
673
                                cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
674
                                cfi_send_gen_cmd(0x20, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
675
                        }
676
                }
677
        }
678
 
679
        if (cfi->fast_prog){
680
                /* Get out of unlock bypass mode */
681
                cfi_send_gen_cmd(0x90, 0, chipstart, map, cfi, cfi->device_type, NULL);
682
                cfi_send_gen_cmd(0x00, 0, chipstart, map, cfi, cfi->device_type, NULL);
683
        }
684
 
685
        /* Write the trailing bytes if any */
686
        if (len & (CFIDEV_BUSWIDTH-1)) {
687
                int i = 0, n = 0;
688
                u_char tmp_buf[4];
689
                __u32 datum;
690
 
691
                map->copy_from(map, tmp_buf, ofs + cfi->chips[chipnum].start, CFIDEV_BUSWIDTH);
692
                while (len--)
693
                        tmp_buf[i++] = buf[n++];
694
 
695
                if (cfi_buswidth_is_2()) {
696
                        datum = *(__u16*)tmp_buf;
697
                } else if (cfi_buswidth_is_4()) {
698
                        datum = *(__u32*)tmp_buf;
699
                } else {
700
                        return -EINVAL;  /* should never happen, but be safe */
701
                }
702
 
703
                ret = do_write_oneword(map, &cfi->chips[chipnum],
704
                                ofs, datum, 0);
705
                if (ret)
706
                        return ret;
707
 
708
                (*retlen) += n;
709
        }
710
 
711
        return 0;
712
}
713
 
714
static inline int do_erase_chip(struct map_info *map, struct flchip *chip)
715
{
716
        unsigned int oldstatus, status;
717
        unsigned int dq6, dq5;
718
        unsigned long timeo = jiffies + HZ;
719
        unsigned int adr;
720
        struct cfi_private *cfi = map->fldrv_priv;
721
        DECLARE_WAITQUEUE(wait, current);
722
 
723
 retry:
724
        cfi_spin_lock(chip->mutex);
725
 
726
        if (chip->state != FL_READY){
727
                set_current_state(TASK_UNINTERRUPTIBLE);
728
                add_wait_queue(&chip->wq, &wait);
729
 
730
                cfi_spin_unlock(chip->mutex);
731
 
732
                schedule();
733
                remove_wait_queue(&chip->wq, &wait);
734
#if 0
735
                if(signal_pending(current))
736
                        return -EINTR;
737
#endif
738
                timeo = jiffies + HZ;
739
 
740
                goto retry;
741
        }
742
 
743
        chip->state = FL_ERASING;
744
 
745
        /* Handle devices with one erase region, that only implement
746
         * the chip erase command.
747
         */
748
        ENABLE_VPP(map);
749
        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
750
        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
751
        cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
752
        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
753
        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
754
        cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
755
        timeo = jiffies + (HZ*20);
756
        adr = cfi->addr_unlock1;
757
 
758
        /* Wait for the end of programing/erasure by using the toggle method.
759
         * As long as there is a programming procedure going on, bit 6 of the last
760
         * written byte is toggling it's state with each consectuve read.
761
         * The toggling stops as soon as the procedure is completed.
762
         *
763
         * If the process has gone on for too long on the chip bit 5 gets.
764
         * After bit5 is set you can kill the operation by sending a reset
765
         * command to the chip.
766
         */
767
        dq6 = CMD(1<<6);
768
        dq5 = CMD(1<<5);
769
 
770
        oldstatus = cfi_read(map, adr);
771
        status = cfi_read(map, adr);
772
        while( ((status & dq6) != (oldstatus & dq6)) &&
773
                ((status & dq5) != dq5) &&
774
                !time_after(jiffies, timeo)) {
775
                int wait_reps;
776
 
777
                /* an initial short sleep */
778
                cfi_spin_unlock(chip->mutex);
779
                schedule_timeout(HZ/100);
780
                cfi_spin_lock(chip->mutex);
781
 
782
                if (chip->state != FL_ERASING) {
783
                        /* Someone's suspended the erase. Sleep */
784
                        set_current_state(TASK_UNINTERRUPTIBLE);
785
                        add_wait_queue(&chip->wq, &wait);
786
 
787
                        cfi_spin_unlock(chip->mutex);
788
                        printk("erase suspended. Sleeping\n");
789
 
790
                        schedule();
791
                        remove_wait_queue(&chip->wq, &wait);
792
#if 0                   
793
                        if (signal_pending(current))
794
                                return -EINTR;
795
#endif                  
796
                        timeo = jiffies + (HZ*2); /* FIXME */
797
                        cfi_spin_lock(chip->mutex);
798
                        continue;
799
                }
800
 
801
                /* Busy wait for 1/10 of a milisecond */
802
                for(wait_reps = 0;
803
                        (wait_reps < 100) &&
804
                        ((status & dq6) != (oldstatus & dq6)) &&
805
                        ((status & dq5) != dq5);
806
                        wait_reps++) {
807
 
808
                        /* Latency issues. Drop the lock, wait a while and retry */
809
                        cfi_spin_unlock(chip->mutex);
810
 
811
                        cfi_udelay(1);
812
 
813
                        cfi_spin_lock(chip->mutex);
814
                        oldstatus = cfi_read(map, adr);
815
                        status = cfi_read(map, adr);
816
                }
817
                oldstatus = cfi_read(map, adr);
818
                status = cfi_read(map, adr);
819
        }
820
        if ((status & dq6) != (oldstatus & dq6)) {
821
                /* The erasing didn't stop?? */
822
                if ((status & dq5) == dq5) {
823
                        /* dq5 is active so we can do a reset and stop the erase */
824
                        cfi_write(map, CMD(0xF0), chip->start);
825
                }
826
                chip->state = FL_READY;
827
                wake_up(&chip->wq);
828
                cfi_spin_unlock(chip->mutex);
829
                printk("waiting for erase to complete timed out.");
830
                DISABLE_VPP(map);
831
                return -EIO;
832
        }
833
        DISABLE_VPP(map);
834
        chip->state = FL_READY;
835
        wake_up(&chip->wq);
836
        cfi_spin_unlock(chip->mutex);
837
 
838
        return 0;
839
}
840
 
841
static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
842
{
843
        unsigned int oldstatus, status;
844
        unsigned int dq6, dq5;
845
        unsigned long timeo = jiffies + HZ;
846
        struct cfi_private *cfi = map->fldrv_priv;
847
        DECLARE_WAITQUEUE(wait, current);
848
 
849
 retry:
850
        cfi_spin_lock(chip->mutex);
851
 
852
        if (chip->state != FL_READY){
853
                set_current_state(TASK_UNINTERRUPTIBLE);
854
                add_wait_queue(&chip->wq, &wait);
855
 
856
                cfi_spin_unlock(chip->mutex);
857
 
858
                schedule();
859
                remove_wait_queue(&chip->wq, &wait);
860
#if 0
861
                if(signal_pending(current))
862
                        return -EINTR;
863
#endif
864
                timeo = jiffies + HZ;
865
 
866
                goto retry;
867
        }
868
 
869
        chip->state = FL_ERASING;
870
 
871
        adr += chip->start;
872
        ENABLE_VPP(map);
873
        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
874
        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
875
        cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
876
        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
877
        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
878
        cfi_write(map, CMD(0x30), adr);
879
 
880
        timeo = jiffies + (HZ*20);
881
 
882
        /* Wait for the end of programing/erasure by using the toggle method.
883
         * As long as there is a programming procedure going on, bit 6 of the last
884
         * written byte is toggling it's state with each consectuve read.
885
         * The toggling stops as soon as the procedure is completed.
886
         *
887
         * If the process has gone on for too long on the chip bit 5 gets.
888
         * After bit5 is set you can kill the operation by sending a reset
889
         * command to the chip.
890
         */
891
        dq6 = CMD(1<<6);
892
        dq5 = CMD(1<<5);
893
 
894
        oldstatus = cfi_read(map, adr);
895
        status = cfi_read(map, adr);
896
        while( ((status & dq6) != (oldstatus & dq6)) &&
897
                ((status & dq5) != dq5) &&
898
                !time_after(jiffies, timeo)) {
899
                int wait_reps;
900
 
901
                /* an initial short sleep */
902
                cfi_spin_unlock(chip->mutex);
903
                schedule_timeout(HZ/100);
904
                cfi_spin_lock(chip->mutex);
905
 
906
                if (chip->state != FL_ERASING) {
907
                        /* Someone's suspended the erase. Sleep */
908
                        set_current_state(TASK_UNINTERRUPTIBLE);
909
                        add_wait_queue(&chip->wq, &wait);
910
 
911
                        cfi_spin_unlock(chip->mutex);
912
                        printk(KERN_DEBUG "erase suspended. Sleeping\n");
913
 
914
                        schedule();
915
                        remove_wait_queue(&chip->wq, &wait);
916
#if 0                   
917
                        if (signal_pending(current))
918
                                return -EINTR;
919
#endif                  
920
                        timeo = jiffies + (HZ*2); /* FIXME */
921
                        cfi_spin_lock(chip->mutex);
922
                        continue;
923
                }
924
 
925
                /* Busy wait for 1/10 of a milisecond */
926
                for(wait_reps = 0;
927
                        (wait_reps < 100) &&
928
                        ((status & dq6) != (oldstatus & dq6)) &&
929
                        ((status & dq5) != dq5);
930
                        wait_reps++) {
931
 
932
                        /* Latency issues. Drop the lock, wait a while and retry */
933
                        cfi_spin_unlock(chip->mutex);
934
 
935
                        cfi_udelay(1);
936
 
937
                        cfi_spin_lock(chip->mutex);
938
                        oldstatus = cfi_read(map, adr);
939
                        status = cfi_read(map, adr);
940
                }
941
                oldstatus = cfi_read(map, adr);
942
                status = cfi_read(map, adr);
943
        }
944
        if( (status & dq6) != (oldstatus & dq6) )
945
        {
946
                /* The erasing didn't stop?? */
947
                if( ( status & dq5 ) == dq5 )
948
                {
949
                        /* When DQ5 raises, we must check once again if DQ6 is toggling.
950
               If not, the erase has been completed OK.  If not, reset chip. */
951
                    oldstatus   = cfi_read( map, adr );
952
                    status      = cfi_read( map, adr );
953
 
954
                    if( ( oldstatus & 0x00FF ) == ( status & 0x00FF ) )
955
                    {
956
                printk( "Warning: DQ5 raised while erase operation was in progress, but erase completed OK\n" );
957
                    }
958
                        else
959
            {
960
                            /* DQ5 is active so we can do a reset and stop the erase */
961
                                cfi_write(map, CMD(0xF0), chip->start);
962
                printk( KERN_WARNING "Internal flash device timeout occured or write operation was performed while flash was erasing\n" );
963
                        }
964
                }
965
        else
966
        {
967
                    printk( "Waiting for erase to complete timed out in do_erase_oneblock.");
968
 
969
                chip->state = FL_READY;
970
                wake_up(&chip->wq);
971
                cfi_spin_unlock(chip->mutex);
972
                DISABLE_VPP(map);
973
                return -EIO;
974
        }
975
        }
976
 
977
        DISABLE_VPP(map);
978
        chip->state = FL_READY;
979
        wake_up(&chip->wq);
980
        cfi_spin_unlock(chip->mutex);
981
        return 0;
982
}
983
 
984
static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
985
{
986
        struct map_info *map = mtd->priv;
987
        struct cfi_private *cfi = map->fldrv_priv;
988
        unsigned long adr, len;
989
        int chipnum, ret = 0;
990
        int i, first;
991
        struct mtd_erase_region_info *regions = mtd->eraseregions;
992
 
993
        if (instr->addr > mtd->size)
994
                return -EINVAL;
995
 
996
        if ((instr->len + instr->addr) > mtd->size)
997
                return -EINVAL;
998
 
999
        /* Check that both start and end of the requested erase are
1000
         * aligned with the erasesize at the appropriate addresses.
1001
         */
1002
 
1003
        i = 0;
1004
 
1005
        /* Skip all erase regions which are ended before the start of
1006
           the requested erase. Actually, to save on the calculations,
1007
           we skip to the first erase region which starts after the
1008
           start of the requested erase, and then go back one.
1009
        */
1010
 
1011
        while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
1012
               i++;
1013
        i--;
1014
 
1015
        /* OK, now i is pointing at the erase region in which this
1016
           erase request starts. Check the start of the requested
1017
           erase range is aligned with the erase size which is in
1018
           effect here.
1019
        */
1020
 
1021
        if (instr->addr & (regions[i].erasesize-1))
1022
                return -EINVAL;
1023
 
1024
        /* Remember the erase region we start on */
1025
        first = i;
1026
 
1027
        /* Next, check that the end of the requested erase is aligned
1028
         * with the erase region at that address.
1029
         */
1030
 
1031
        while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
1032
                i++;
1033
 
1034
        /* As before, drop back one to point at the region in which
1035
           the address actually falls
1036
        */
1037
        i--;
1038
 
1039
        if ((instr->addr + instr->len) & (regions[i].erasesize-1))
1040
                return -EINVAL;
1041
 
1042
        chipnum = instr->addr >> cfi->chipshift;
1043
        adr = instr->addr - (chipnum << cfi->chipshift);
1044
        len = instr->len;
1045
 
1046
        i=first;
1047
 
1048
        while(len) {
1049
                ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
1050
 
1051
                if (ret)
1052
                        return ret;
1053
 
1054
                adr += regions[i].erasesize;
1055
                len -= regions[i].erasesize;
1056
 
1057
                if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
1058
                        i++;
1059
 
1060
                if (adr >> cfi->chipshift) {
1061
                        adr = 0;
1062
                        chipnum++;
1063
 
1064
                        if (chipnum >= cfi->numchips)
1065
                        break;
1066
                }
1067
        }
1068
 
1069
        instr->state = MTD_ERASE_DONE;
1070
        if (instr->callback)
1071
                instr->callback(instr);
1072
 
1073
        return 0;
1074
}
1075
 
1076
static int cfi_amdstd_erase_onesize(struct mtd_info *mtd, struct erase_info *instr)
1077
{
1078
        struct map_info *map = mtd->priv;
1079
        struct cfi_private *cfi = map->fldrv_priv;
1080
        unsigned long adr, len;
1081
        int chipnum, ret = 0;
1082
 
1083
        if (instr->addr & (mtd->erasesize - 1))
1084
                return -EINVAL;
1085
 
1086
        if (instr->len & (mtd->erasesize -1))
1087
                return -EINVAL;
1088
 
1089
        if ((instr->len + instr->addr) > mtd->size)
1090
                return -EINVAL;
1091
 
1092
        chipnum = instr->addr >> cfi->chipshift;
1093
        adr = instr->addr - (chipnum << cfi->chipshift);
1094
        len = instr->len;
1095
 
1096
        while(len) {
1097
                ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
1098
 
1099
                if (ret)
1100
                        return ret;
1101
 
1102
                adr += mtd->erasesize;
1103
                len -= mtd->erasesize;
1104
 
1105
                if (adr >> cfi->chipshift) {
1106
                        adr = 0;
1107
                        chipnum++;
1108
 
1109
                        if (chipnum >= cfi->numchips)
1110
                        break;
1111
                }
1112
        }
1113
 
1114
        instr->state = MTD_ERASE_DONE;
1115
        if (instr->callback)
1116
                instr->callback(instr);
1117
 
1118
        return 0;
1119
}
1120
 
1121
static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1122
{
1123
        struct map_info *map = mtd->priv;
1124
        struct cfi_private *cfi = map->fldrv_priv;
1125
        int ret = 0;
1126
 
1127
        if (instr->addr != 0)
1128
                return -EINVAL;
1129
 
1130
        if (instr->len != mtd->size)
1131
                return -EINVAL;
1132
 
1133
        ret = do_erase_chip(map, &cfi->chips[0]);
1134
        if (ret)
1135
                return ret;
1136
 
1137
        instr->state = MTD_ERASE_DONE;
1138
        if (instr->callback)
1139
                instr->callback(instr);
1140
 
1141
        return 0;
1142
}
1143
 
1144
static void cfi_amdstd_sync (struct mtd_info *mtd)
1145
{
1146
        struct map_info *map = mtd->priv;
1147
        struct cfi_private *cfi = map->fldrv_priv;
1148
        int i;
1149
        struct flchip *chip;
1150
        int ret = 0;
1151
        DECLARE_WAITQUEUE(wait, current);
1152
 
1153
        for (i=0; !ret && i<cfi->numchips; i++) {
1154
                chip = &cfi->chips[i];
1155
 
1156
        retry:
1157
                cfi_spin_lock(chip->mutex);
1158
 
1159
                switch(chip->state) {
1160
                case FL_READY:
1161
                case FL_STATUS:
1162
                case FL_CFI_QUERY:
1163
                case FL_JEDEC_QUERY:
1164
                        chip->oldstate = chip->state;
1165
                        chip->state = FL_SYNCING;
1166
                        /* No need to wake_up() on this state change -
1167
                         * as the whole point is that nobody can do anything
1168
                         * with the chip now anyway.
1169
                         */
1170
                case FL_SYNCING:
1171
                        cfi_spin_unlock(chip->mutex);
1172
                        break;
1173
 
1174
                default:
1175
                        /* Not an idle state */
1176
                        add_wait_queue(&chip->wq, &wait);
1177
 
1178
                        cfi_spin_unlock(chip->mutex);
1179
 
1180
                        schedule();
1181
 
1182
                        remove_wait_queue(&chip->wq, &wait);
1183
 
1184
                        goto retry;
1185
                }
1186
        }
1187
 
1188
        /* Unlock the chips again */
1189
 
1190
        for (i--; i >=0; i--) {
1191
                chip = &cfi->chips[i];
1192
 
1193
                cfi_spin_lock(chip->mutex);
1194
 
1195
                if (chip->state == FL_SYNCING) {
1196
                        chip->state = chip->oldstate;
1197
                        wake_up(&chip->wq);
1198
                }
1199
                cfi_spin_unlock(chip->mutex);
1200
        }
1201
}
1202
 
1203
 
1204
static int cfi_amdstd_suspend(struct mtd_info *mtd)
1205
{
1206
        struct map_info *map = mtd->priv;
1207
        struct cfi_private *cfi = map->fldrv_priv;
1208
        int i;
1209
        struct flchip *chip;
1210
        int ret = 0;
1211
 
1212
        for (i=0; !ret && i<cfi->numchips; i++) {
1213
                chip = &cfi->chips[i];
1214
 
1215
                cfi_spin_lock(chip->mutex);
1216
 
1217
                switch(chip->state) {
1218
                case FL_READY:
1219
                case FL_STATUS:
1220
                case FL_CFI_QUERY:
1221
                case FL_JEDEC_QUERY:
1222
                        chip->oldstate = chip->state;
1223
                        chip->state = FL_PM_SUSPENDED;
1224
                        /* No need to wake_up() on this state change -
1225
                         * as the whole point is that nobody can do anything
1226
                         * with the chip now anyway.
1227
                         */
1228
                case FL_PM_SUSPENDED:
1229
                        break;
1230
 
1231
                default:
1232
                        ret = -EAGAIN;
1233
                        break;
1234
                }
1235
                cfi_spin_unlock(chip->mutex);
1236
        }
1237
 
1238
        /* Unlock the chips again */
1239
 
1240
        if (ret) {
1241
                for (i--; i >=0; i--) {
1242
                        chip = &cfi->chips[i];
1243
 
1244
                        cfi_spin_lock(chip->mutex);
1245
 
1246
                        if (chip->state == FL_PM_SUSPENDED) {
1247
                                chip->state = chip->oldstate;
1248
                                wake_up(&chip->wq);
1249
                        }
1250
                        cfi_spin_unlock(chip->mutex);
1251
                }
1252
        }
1253
 
1254
        return ret;
1255
}
1256
 
1257
static void cfi_amdstd_resume(struct mtd_info *mtd)
1258
{
1259
        struct map_info *map = mtd->priv;
1260
        struct cfi_private *cfi = map->fldrv_priv;
1261
        int i;
1262
        struct flchip *chip;
1263
 
1264
        for (i=0; i<cfi->numchips; i++) {
1265
 
1266
                chip = &cfi->chips[i];
1267
 
1268
                cfi_spin_lock(chip->mutex);
1269
 
1270
                if (chip->state == FL_PM_SUSPENDED) {
1271
                        chip->state = FL_READY;
1272
                        cfi_write(map, CMD(0xF0), chip->start);
1273
                        wake_up(&chip->wq);
1274
                }
1275
                else
1276
                        printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1277
 
1278
                cfi_spin_unlock(chip->mutex);
1279
        }
1280
}
1281
 
1282
static void cfi_amdstd_destroy(struct mtd_info *mtd)
1283
{
1284
        struct map_info *map = mtd->priv;
1285
        struct cfi_private *cfi = map->fldrv_priv;
1286
        kfree(cfi->cmdset_priv);
1287
        kfree(cfi->cfiq);
1288
        kfree(cfi);
1289
        kfree(mtd->eraseregions);
1290
}
1291
 
1292
static char im_name[]="cfi_cmdset_0002";
1293
 
1294
int __init cfi_amdstd_init(void)
1295
{
1296
        inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0002);
1297
        return 0;
1298
}
1299
 
1300
static void __exit cfi_amdstd_exit(void)
1301
{
1302
        inter_module_unregister(im_name);
1303
}
1304
 
1305
module_init(cfi_amdstd_init);
1306
module_exit(cfi_amdstd_exit);
1307
 
1308
MODULE_LICENSE("GPL");
1309
MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1310
MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
1311
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.